hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5a62009ad2b82b577462a38161332570b8a7e124 | 4,314 | py | Python | tests/integration/tools/test_zoom_in_tool.py | karen-poon/bokeh | d0ecb0c9089fc0838ca36371814430af51901708 | [
"BSD-3-Clause"
] | null | null | null | tests/integration/tools/test_zoom_in_tool.py | karen-poon/bokeh | d0ecb0c9089fc0838ca36371814430af51901708 | [
"BSD-3-Clause"
] | null | null | null | tests/integration/tools/test_zoom_in_tool.py | karen-poon/bokeh | d0ecb0c9089fc0838ca36371814430af51901708 | [
"BSD-3-Clause"
] | null | null | null | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2021, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations # isort:skip
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Bokeh imports
from bokeh._testing.plugins.project import SinglePlotPage
from bokeh._testing.util.selenium import RECORD
from bokeh.events import RangesUpdate
from bokeh.models import (
ColumnDataSource,
CustomAction,
CustomJS,
Plot,
Range1d,
Rect,
ZoomInTool,
)
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
pytest_plugins = (
"bokeh._testing.plugins.project",
)
def _make_plot():
source = ColumnDataSource(dict(x=[1, 2], y=[1, 1]))
plot = Plot(height=400, width=400, x_range=Range1d(0, 1), y_range=Range1d(0, 1), min_border=0)
plot.add_glyph(source, Rect(x='x', y='y', width=0.9, height=0.9))
plot.add_tools(ZoomInTool())
code = RECORD("xrstart", "p.x_range.start", final=False) + \
RECORD("xrend", "p.x_range.end", final=False) + \
RECORD("yrstart", "p.y_range.start", final=False) + \
RECORD("yrend", "p.y_range.end")
plot.add_tools(CustomAction(callback=CustomJS(args=dict(p=plot), code=code)))
plot.toolbar_sticky = False
return plot
@pytest.mark.selenium
class Test_ZoomInTool:
def test_deselected_by_default(self, single_plot_page: SinglePlotPage) -> None:
plot = _make_plot()
page = single_plot_page(plot)
button = page.get_toolbar_button('zoom-in')
assert 'active' not in button.get_attribute('class')
assert page.has_no_console_errors()
def test_clicking_zooms_in(self, single_plot_page: SinglePlotPage) -> None:
plot = _make_plot()
page = single_plot_page(plot)
button = page.get_toolbar_button('zoom-in')
button.click()
page.click_custom_action()
first = page.results
assert first['xrstart'] > 0
assert first['xrend'] < 1
assert first['yrstart'] > 0
assert first['yrend'] < 1
button = page.get_toolbar_button('zoom-in')
button.click()
page.click_custom_action()
second = page.results
assert second['xrstart'] > first['xrstart']
assert second['xrend'] < first['xrend']
assert second['yrstart'] > first['yrstart']
assert second['yrend'] < first['yrend']
assert page.has_no_console_errors()
def test_ranges_udpate(self, single_plot_page: SinglePlotPage) -> None:
source = ColumnDataSource(dict(x=[1, 2], y=[1, 1]))
plot = Plot(height=400, width=400, x_range=Range1d(0, 1), y_range=Range1d(0, 1), min_border=0)
plot.add_glyph(source, Rect(x='x', y='y', width=0.9, height=0.9))
plot.add_tools(ZoomInTool())
code = RECORD("event_name", "cb_obj.event_name", final=False) + \
RECORD("x0", "cb_obj.x0", final=False) + \
RECORD("x1", "cb_obj.x1", final=False) + \
RECORD("y0", "cb_obj.y0", final=False) + \
RECORD("y1", "cb_obj.y1")
plot.js_on_event(RangesUpdate, CustomJS(code=code))
plot.add_tools(CustomAction(callback=CustomJS(code="")))
plot.toolbar_sticky = False
page = single_plot_page(plot)
button = page.get_toolbar_button('zoom-in')
button.click()
page.click_custom_action()
results = page.results
assert results['event_name'] == "rangesupdate"
assert results['x0'] > 0
assert results['x1'] < 1
assert results['y0'] > 0
assert results['y1'] < 1
assert page.has_no_console_errors()
| 34.238095 | 102 | 0.541725 |
2c3e8616e1e60372adf57c7af481a68b4fb5a39c | 5,122 | py | Python | tests/keras/layers/pooling_test.py | kalyc/keras-apache-mxnet | 5497ebd50a45ccc446b8944ebbe11fb7721a5533 | [
"MIT"
] | 300 | 2018-04-04T05:01:21.000Z | 2022-02-25T18:56:04.000Z | tests/keras/layers/pooling_test.py | kalyc/keras-apache-mxnet | 5497ebd50a45ccc446b8944ebbe11fb7721a5533 | [
"MIT"
] | 163 | 2018-04-03T17:41:22.000Z | 2021-09-03T16:44:04.000Z | tests/keras/layers/pooling_test.py | kalyc/keras-apache-mxnet | 5497ebd50a45ccc446b8944ebbe11fb7721a5533 | [
"MIT"
] | 72 | 2018-04-21T06:42:30.000Z | 2021-12-26T06:02:42.000Z | import numpy as np
import pytest
from keras.utils.test_utils import layer_test
from keras.layers import pooling
from keras.layers import Masking
from keras.layers import convolutional
from keras.models import Sequential
@pytest.mark.parametrize(
'padding,stride,data_format',
[(padding, stride, data_format)
for padding in ['valid', 'same']
for stride in [1, 2]
for data_format in ['channels_first', 'channels_last']
if not (padding == 'same' and stride == 1)]
)
def test_maxpooling_1d(padding, stride, data_format):
layer_test(convolutional.MaxPooling1D,
kwargs={'strides': stride,
'padding': padding,
'data_format': data_format},
input_shape=(3, 5, 4))
@pytest.mark.parametrize(
'strides',
[(1, 1), (2, 3)]
)
def test_maxpooling_2d(strides):
pool_size = (3, 3)
layer_test(convolutional.MaxPooling2D,
kwargs={'strides': strides,
'padding': 'valid',
'pool_size': pool_size},
input_shape=(3, 5, 6, 4))
@pytest.mark.parametrize(
'strides,data_format,input_shape',
[(2, None, (3, 11, 12, 10, 4)),
(3, 'channels_first', (3, 4, 11, 12, 10))]
)
def test_maxpooling_3d(strides, data_format, input_shape):
pool_size = (3, 3, 3)
layer_test(convolutional.MaxPooling3D,
kwargs={'strides': strides,
'padding': 'valid',
'data_format': data_format,
'pool_size': pool_size},
input_shape=input_shape)
@pytest.mark.parametrize(
'padding,stride,data_format',
[(padding, stride, data_format)
for padding in ['valid', 'same']
for stride in [1, 2]
for data_format in ['channels_first', 'channels_last']
if not(padding == 'same' and stride == 1)]
)
def test_averagepooling_1d(padding, stride, data_format):
layer_test(convolutional.AveragePooling1D,
kwargs={'strides': stride,
'padding': padding,
'data_format': data_format},
input_shape=(3, 5, 4))
@pytest.mark.parametrize(
'strides,padding,data_format,input_shape',
[((2, 2), 'same', None, (3, 5, 6, 4)),
((2, 2), 'valid', None, (3, 5, 6, 4)),
((1, 1), 'valid', 'channels_first', (3, 4, 5, 6))]
)
def test_averagepooling_2d(strides, padding, data_format, input_shape):
layer_test(convolutional.AveragePooling2D,
kwargs={'strides': strides,
'padding': padding,
'pool_size': (2, 2),
'data_format': data_format},
input_shape=input_shape)
@pytest.mark.parametrize(
'strides,data_format,input_shape',
[(2, None, (3, 11, 12, 10, 4)),
(3, 'channels_first', (3, 4, 11, 12, 10))]
)
def test_averagepooling_3d(strides, data_format, input_shape):
pool_size = (3, 3, 3)
layer_test(convolutional.AveragePooling3D,
kwargs={'strides': strides,
'padding': 'valid',
'data_format': data_format,
'pool_size': pool_size},
input_shape=input_shape)
@pytest.mark.parametrize(
'data_format,pooling_class',
[(data_format, pooling_class)
for data_format in ['channels_first', 'channels_last']
for pooling_class in [pooling.GlobalMaxPooling1D,
pooling.GlobalAveragePooling1D]]
)
def test_globalpooling_1d(data_format, pooling_class):
layer_test(pooling_class,
kwargs={'data_format': data_format},
input_shape=(3, 4, 5))
def test_globalpooling_1d_supports_masking():
# Test GlobalAveragePooling1D supports masking
model = Sequential()
model.add(Masking(mask_value=0., input_shape=(3, 4)))
model.add(pooling.GlobalAveragePooling1D())
model.compile(loss='mae', optimizer='adam')
model_input = np.random.randint(low=1, high=5, size=(2, 3, 4))
model_input[0, 1:, :] = 0
output = model.predict(model_input)
assert np.array_equal(output[0], model_input[0, 0, :])
@pytest.mark.parametrize(
'data_format,pooling_class',
[(data_format, pooling_class)
for data_format in ['channels_first', 'channels_last']
for pooling_class in [pooling.GlobalMaxPooling2D,
pooling.GlobalAveragePooling2D]]
)
def test_globalpooling_2d(data_format, pooling_class):
layer_test(pooling_class,
kwargs={'data_format': data_format},
input_shape=(3, 4, 5, 6))
@pytest.mark.parametrize(
'data_format,pooling_class',
[(data_format, pooling_class)
for data_format in ['channels_first', 'channels_last']
for pooling_class in [pooling.GlobalMaxPooling3D,
pooling.GlobalAveragePooling3D]]
)
def test_globalpooling_3d(data_format, pooling_class):
layer_test(pooling_class,
kwargs={'data_format': data_format},
input_shape=(3, 4, 3, 4, 3))
if __name__ == '__main__':
pytest.main([__file__])
| 32.833333 | 71 | 0.611089 |
a4908b2121809177e435d403900e1ccb8d0846e6 | 13,542 | py | Python | tests/delete_regress/tests.py | webjunkie/django | 5dbca13f3baa2e1bafd77e84a80ad6d8a074712e | [
"BSD-3-Clause"
] | 166 | 2015-01-07T08:23:17.000Z | 2022-02-23T00:09:44.000Z | tests/delete_regress/tests.py | webjunkie/django | 5dbca13f3baa2e1bafd77e84a80ad6d8a074712e | [
"BSD-3-Clause"
] | 107 | 2015-01-19T22:11:09.000Z | 2021-09-18T19:29:44.000Z | tests/delete_regress/tests.py | webjunkie/django | 5dbca13f3baa2e1bafd77e84a80ad6d8a074712e | [
"BSD-3-Clause"
] | 89 | 2015-01-08T19:52:16.000Z | 2021-12-17T11:26:53.000Z | from __future__ import absolute_import
import datetime
from django.conf import settings
from django.db import transaction, DEFAULT_DB_ALIAS, models
from django.db.utils import ConnectionHandler
from django.test import TestCase, TransactionTestCase, skipUnlessDBFeature
from .models import (Book, Award, AwardNote, Person, Child, Toy, PlayedWith,
PlayedWithNote, Email, Researcher, Food, Eaten, Policy, Version, Location,
Item, Image, File, Photo, FooFile, FooImage, FooPhoto, FooFileProxy, Login,
OrgUnit, OrderedPerson, House)
# Can't run this test under SQLite, because you can't
# get two connections to an in-memory database.
class DeleteLockingTest(TransactionTestCase):
available_apps = ['delete_regress']
def setUp(self):
# Create a second connection to the default database
new_connections = ConnectionHandler(settings.DATABASES)
self.conn2 = new_connections[DEFAULT_DB_ALIAS]
# Put both DB connections into managed transaction mode
transaction.enter_transaction_management()
self.conn2.enter_transaction_management()
def tearDown(self):
# Close down the second connection.
transaction.leave_transaction_management()
self.conn2.abort()
self.conn2.close()
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_concurrent_delete(self):
"Deletes on concurrent transactions don't collide and lock the database. Regression for #9479"
# Create some dummy data
b1 = Book(id=1, pagecount=100)
b2 = Book(id=2, pagecount=200)
b3 = Book(id=3, pagecount=300)
b1.save()
b2.save()
b3.save()
transaction.commit()
self.assertEqual(3, Book.objects.count())
# Delete something using connection 2.
cursor2 = self.conn2.cursor()
cursor2.execute('DELETE from delete_regress_book WHERE id=1')
self.conn2._commit()
# Now perform a queryset delete that covers the object
# deleted in connection 2. This causes an infinite loop
# under MySQL InnoDB unless we keep track of already
# deleted objects.
Book.objects.filter(pagecount__lt=250).delete()
transaction.commit()
self.assertEqual(1, Book.objects.count())
transaction.commit()
class DeleteCascadeTests(TestCase):
def test_generic_relation_cascade(self):
"""
Django cascades deletes through generic-related objects to their
reverse relations.
"""
person = Person.objects.create(name='Nelson Mandela')
award = Award.objects.create(name='Nobel', content_object=person)
note = AwardNote.objects.create(note='a peace prize',
award=award)
self.assertEqual(AwardNote.objects.count(), 1)
person.delete()
self.assertEqual(Award.objects.count(), 0)
# first two asserts are just sanity checks, this is the kicker:
self.assertEqual(AwardNote.objects.count(), 0)
def test_fk_to_m2m_through(self):
"""
If an M2M relationship has an explicitly-specified through model, and
some other model has an FK to that through model, deletion is cascaded
from one of the participants in the M2M, to the through model, to its
related model.
"""
juan = Child.objects.create(name='Juan')
paints = Toy.objects.create(name='Paints')
played = PlayedWith.objects.create(child=juan, toy=paints,
date=datetime.date.today())
note = PlayedWithNote.objects.create(played=played,
note='the next Jackson Pollock')
self.assertEqual(PlayedWithNote.objects.count(), 1)
paints.delete()
self.assertEqual(PlayedWith.objects.count(), 0)
# first two asserts just sanity checks, this is the kicker:
self.assertEqual(PlayedWithNote.objects.count(), 0)
def test_15776(self):
policy = Policy.objects.create(pk=1, policy_number="1234")
version = Version.objects.create(policy=policy)
location = Location.objects.create(version=version)
item = Item.objects.create(version=version, location=location)
policy.delete()
class DeleteCascadeTransactionTests(TransactionTestCase):
available_apps = ['delete_regress']
def test_inheritance(self):
"""
Auto-created many-to-many through tables referencing a parent model are
correctly found by the delete cascade when a child of that parent is
deleted.
Refs #14896.
"""
r = Researcher.objects.create()
email = Email.objects.create(
label="office-email", email_address="carl@science.edu"
)
r.contacts.add(email)
email.delete()
def test_to_field(self):
"""
Cascade deletion works with ForeignKey.to_field set to non-PK.
"""
apple = Food.objects.create(name="apple")
eaten = Eaten.objects.create(food=apple, meal="lunch")
apple.delete()
self.assertFalse(Food.objects.exists())
self.assertFalse(Eaten.objects.exists())
class LargeDeleteTests(TestCase):
def test_large_deletes(self):
"Regression for #13309 -- if the number of objects > chunk size, deletion still occurs"
for x in range(300):
track = Book.objects.create(pagecount=x+100)
# attach a signal to make sure we will not fast-delete
def noop(*args, **kwargs):
pass
models.signals.post_delete.connect(noop, sender=Book)
Book.objects.all().delete()
models.signals.post_delete.disconnect(noop, sender=Book)
self.assertEqual(Book.objects.count(), 0)
class ProxyDeleteTest(TestCase):
"""
Tests on_delete behavior for proxy models.
See #16128.
"""
def create_image(self):
"""Return an Image referenced by both a FooImage and a FooFile."""
# Create an Image
test_image = Image()
test_image.save()
foo_image = FooImage(my_image=test_image)
foo_image.save()
# Get the Image instance as a File
test_file = File.objects.get(pk=test_image.pk)
foo_file = FooFile(my_file=test_file)
foo_file.save()
return test_image
def test_delete_proxy(self):
"""
Deleting the *proxy* instance bubbles through to its non-proxy and
*all* referring objects are deleted.
"""
self.create_image()
Image.objects.all().delete()
# An Image deletion == File deletion
self.assertEqual(len(Image.objects.all()), 0)
self.assertEqual(len(File.objects.all()), 0)
# The Image deletion cascaded and *all* references to it are deleted.
self.assertEqual(len(FooImage.objects.all()), 0)
self.assertEqual(len(FooFile.objects.all()), 0)
def test_delete_proxy_of_proxy(self):
"""
Deleting a proxy-of-proxy instance should bubble through to its proxy
and non-proxy parents, deleting *all* referring objects.
"""
test_image = self.create_image()
# Get the Image as a Photo
test_photo = Photo.objects.get(pk=test_image.pk)
foo_photo = FooPhoto(my_photo=test_photo)
foo_photo.save()
Photo.objects.all().delete()
# A Photo deletion == Image deletion == File deletion
self.assertEqual(len(Photo.objects.all()), 0)
self.assertEqual(len(Image.objects.all()), 0)
self.assertEqual(len(File.objects.all()), 0)
# The Photo deletion should have cascaded and deleted *all*
# references to it.
self.assertEqual(len(FooPhoto.objects.all()), 0)
self.assertEqual(len(FooFile.objects.all()), 0)
self.assertEqual(len(FooImage.objects.all()), 0)
def test_delete_concrete_parent(self):
"""
Deleting an instance of a concrete model should also delete objects
referencing its proxy subclass.
"""
self.create_image()
File.objects.all().delete()
# A File deletion == Image deletion
self.assertEqual(len(File.objects.all()), 0)
self.assertEqual(len(Image.objects.all()), 0)
# The File deletion should have cascaded and deleted *all* references
# to it.
self.assertEqual(len(FooFile.objects.all()), 0)
self.assertEqual(len(FooImage.objects.all()), 0)
def test_delete_proxy_pair(self):
"""
If a pair of proxy models are linked by an FK from one concrete parent
to the other, deleting one proxy model cascade-deletes the other, and
the deletion happens in the right order (not triggering an
IntegrityError on databases unable to defer integrity checks).
Refs #17918.
"""
# Create an Image (proxy of File) and FooFileProxy (proxy of FooFile,
# which has an FK to File)
image = Image.objects.create()
as_file = File.objects.get(pk=image.pk)
FooFileProxy.objects.create(my_file=as_file)
Image.objects.all().delete()
self.assertEqual(len(FooFileProxy.objects.all()), 0)
def test_19187_values(self):
with self.assertRaises(TypeError):
Image.objects.values().delete()
with self.assertRaises(TypeError):
Image.objects.values_list().delete()
class Ticket19102Tests(TestCase):
"""
Test different queries which alter the SELECT clause of the query. We
also must be using a subquery for the deletion (that is, the original
query has a join in it). The deletion should be done as "fast-path"
deletion (that is, just one query for the .delete() call).
Note that .values() is not tested here on purpose. .values().delete()
doesn't work for non fast-path deletes at all.
"""
def setUp(self):
self.o1 = OrgUnit.objects.create(name='o1')
self.o2 = OrgUnit.objects.create(name='o2')
self.l1 = Login.objects.create(description='l1', orgunit=self.o1)
self.l2 = Login.objects.create(description='l2', orgunit=self.o2)
@skipUnlessDBFeature("update_can_self_select")
def test_ticket_19102_annotate(self):
with self.assertNumQueries(1):
Login.objects.order_by('description').filter(
orgunit__name__isnull=False
).annotate(
n=models.Count('description')
).filter(
n=1, pk=self.l1.pk
).delete()
self.assertFalse(Login.objects.filter(pk=self.l1.pk).exists())
self.assertTrue(Login.objects.filter(pk=self.l2.pk).exists())
@skipUnlessDBFeature("update_can_self_select")
def test_ticket_19102_extra(self):
with self.assertNumQueries(1):
Login.objects.order_by('description').filter(
orgunit__name__isnull=False
).extra(
select={'extraf':'1'}
).filter(
pk=self.l1.pk
).delete()
self.assertFalse(Login.objects.filter(pk=self.l1.pk).exists())
self.assertTrue(Login.objects.filter(pk=self.l2.pk).exists())
@skipUnlessDBFeature("update_can_self_select")
@skipUnlessDBFeature('can_distinct_on_fields')
def test_ticket_19102_distinct_on(self):
# Both Login objs should have same description so that only the one
# having smaller PK will be deleted.
Login.objects.update(description='description')
with self.assertNumQueries(1):
Login.objects.distinct('description').order_by('pk').filter(
orgunit__name__isnull=False
).delete()
# Assumed that l1 which is created first has smaller PK.
self.assertFalse(Login.objects.filter(pk=self.l1.pk).exists())
self.assertTrue(Login.objects.filter(pk=self.l2.pk).exists())
@skipUnlessDBFeature("update_can_self_select")
def test_ticket_19102_select_related(self):
with self.assertNumQueries(1):
Login.objects.filter(
pk=self.l1.pk
).filter(
orgunit__name__isnull=False
).order_by(
'description'
).select_related('orgunit').delete()
self.assertFalse(Login.objects.filter(pk=self.l1.pk).exists())
self.assertTrue(Login.objects.filter(pk=self.l2.pk).exists())
@skipUnlessDBFeature("update_can_self_select")
def test_ticket_19102_defer(self):
with self.assertNumQueries(1):
Login.objects.filter(
pk=self.l1.pk
).filter(
orgunit__name__isnull=False
).order_by(
'description'
).only('id').delete()
self.assertFalse(Login.objects.filter(pk=self.l1.pk).exists())
self.assertTrue(Login.objects.filter(pk=self.l2.pk).exists())
class OrderedDeleteTests(TestCase):
def test_meta_ordered_delete(self):
# When a subquery is performed by deletion code, the subquery must be
# cleared of all ordering. There was a but that caused _meta ordering
# to be used. Refs #19720.
h = House.objects.create(address='Foo')
OrderedPerson.objects.create(name='Jack', lives_in=h)
OrderedPerson.objects.create(name='Bob', lives_in=h)
OrderedPerson.objects.filter(lives_in__address='Foo').delete()
self.assertEqual(OrderedPerson.objects.count(), 0)
| 36.899183 | 102 | 0.643775 |
8f0225557f5fd36f7a83d4b6c2fcda0e47312656 | 1,128 | py | Python | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_03_01/models/virtual_machine_health_status.py | pjquirk/azure-sdk-for-python | cbf02ec4f177b96eae1dbbba87c34c2c93880150 | [
"MIT"
] | 1 | 2021-09-07T18:36:04.000Z | 2021-09-07T18:36:04.000Z | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_03_01/models/virtual_machine_health_status.py | pjquirk/azure-sdk-for-python | cbf02ec4f177b96eae1dbbba87c34c2c93880150 | [
"MIT"
] | 2 | 2019-10-02T23:37:38.000Z | 2020-10-02T01:17:31.000Z | azure-mgmt-compute/azure/mgmt/compute/v2019_03_01/models/virtual_machine_health_status.py | xiafu-msft/azure-sdk-for-python | 4d9560cfd519ee60667f3cc2f5295a58c18625db | [
"MIT"
] | 1 | 2019-06-17T22:18:23.000Z | 2019-06-17T22:18:23.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VirtualMachineHealthStatus(Model):
"""The health status of the VM.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar status: The health status information for the VM.
:vartype status: ~azure.mgmt.compute.v2019_03_01.models.InstanceViewStatus
"""
_validation = {
'status': {'readonly': True},
}
_attribute_map = {
'status': {'key': 'status', 'type': 'InstanceViewStatus'},
}
def __init__(self, **kwargs):
super(VirtualMachineHealthStatus, self).__init__(**kwargs)
self.status = None
| 31.333333 | 78 | 0.603723 |
c05107a951cd71a03696cd3dc8c566d4b55d6b42 | 3,359 | py | Python | app/app/settings.py | mabdullahadeel/django-docker-basic | 697ca83be6a435ba65b8903cf0f9db01938fd828 | [
"MIT"
] | null | null | null | app/app/settings.py | mabdullahadeel/django-docker-basic | 697ca83be6a435ba65b8903cf0f9db01938fd828 | [
"MIT"
] | null | null | null | app/app/settings.py | mabdullahadeel/django-docker-basic | 697ca83be6a435ba65b8903cf0f9db01938fd828 | [
"MIT"
] | null | null | null | """
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY', 'chagneMeAsSoonAsPossible')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(int(os.environ.get('DEBUG', 0)))
ALLOWED_HOSTS = []
ALLOWED_HOSTS_ENV = os.environ.get('ALLOWED_HOSTS')
if ALLOWED_HOSTS_ENV:
ALLOWED_HOSTS.extend(ALLOWED_HOSTS_ENV.split(','))
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'hello_world',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/static'
MEDIA_URL = '/static/media/'
STATIC_ROOT = '/vol/web/static'
MEDIA_ROOT = '/vol/web/media' | 26.242188 | 91 | 0.699911 |
028c9c7de78dda1bc70db1045e021e5d1f844d92 | 15,778 | py | Python | electricitylci/upstream_dict.py | gschivley/ElectricityLCI | 1c1c1b69705d3ffab1e1e844aaf7379e4f51198e | [
"CC0-1.0"
] | 1 | 2019-04-15T18:11:16.000Z | 2019-04-15T18:11:16.000Z | electricitylci/upstream_dict.py | gschivley/ElectricityLCI | 1c1c1b69705d3ffab1e1e844aaf7379e4f51198e | [
"CC0-1.0"
] | 3 | 2019-05-07T19:04:22.000Z | 2019-09-30T21:29:59.000Z | electricitylci/upstream_dict.py | gschivley/ElectricityLCI | 1c1c1b69705d3ffab1e1e844aaf7379e4f51198e | [
"CC0-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
import pandas as pd
from electricitylci.globals import output_dir, data_dir
from electricitylci.coal_upstream import (
coal_type_codes,
mine_type_codes,
basin_codes,
)
from electricitylci import write_process_dicts_to_jsonld
import logging
module_logger=logging.getLogger("upstream_dict.py")
def _unit(unt):
ar = dict()
ar["internalId"] = ""
ar["@type"] = "Unit"
ar["name"] = unt
return ar
def _process_table_creation_gen(process_name, exchanges_list, fuel_type):
fuel_category_dict = {
"COAL": "21: Mining, Quarrying, and Oil and Gas Extraction/2121: Coal Mining",
"GAS": "21: Mining, Quarrying, and Oil and Gas Extraction/2111: Oil and Gas Extraction",
"OIL": "21: Mining, Quarrying, and Oil and Gas Extraction/2111: Oil and Gas Extraction",
"NUCLEAR": "21: Mining, Quarrying, and Oil and Gas Extraction/2122: Metal Ore Mining",
"GEOTHERMAL": "22: Utilities/2211: Electric Power Generation Transmission and Distribuion",
"WIND": "22: Utilities/2211: Electric Power Generation Transmission and Distribuion",
"SOLAR": "22: Utilities/2211: Electric Power Generation Transmission and Distribuion",
"CONSTRUCTION":"23: Construction/2371: Utility System Construction",
}
ar = dict()
ar["@type"] = "Process"
ar["allocationFactors"] = ""
ar["defaultAllocationMethod"] = ""
ar["exchanges"] = exchanges_list
ar["location"] = "" # location(region)
ar["parameters"] = ""
# ar['processDocumentation']=process_doc_creation();
ar["processType"] = "UNIT_PROCESS"
ar["name"] = process_name
ar["category"] = fuel_category_dict[fuel_type]
ar["description"] = "Fuel produced in stated region"
return ar
def _exchange_table_creation_ref(fuel_type):
# region = data['Subregion'].iloc[0]
natural_gas_flow = {
"flowType": "PRODUCT_FLOW",
"flowProperties": "",
"name": "natural gas, through transmission",
"id": "",
"category": "21: Mining, Quarrying, and Oil and Gas Extraction",
}
coal_flow = {
"flowType": "PRODUCT_FLOW",
"flowProperties": "",
"name": "coal, through cleaning",
"id": "",
"category": "21: Mining, Quarrying, and Oil and Gas Extraction",
}
petroleum_flow = {
"flowType": "PRODUCT_FLOW",
"flowProperties": "",
"name": "petroleum fuel, through transportation",
"id": "",
"category": "21: Mining, Quarrying, and Oil and Gas Extraction",
}
transport_flow = {
"flowType": "PRODUCT_FLOW",
"flowProperties": "",
"name": "coal, transported",
"id": "",
"category": "21: Mining, Quarrying, and Oil and Gas Extraction",
}
nuclear_flow = {
"flowType": "PRODUCT_FLOW",
"flowProperties": "",
"name": "nuclear fuel, through transportation",
"id": "",
"category": "21: Mining, Quarrying, and Oil and Gas Extraction",
}
construction_flow ={
"flowType":"PRODUCT_FLOW",
"flowProperties":"",
"name":"power plant construction",
"id":"",
"category":"23: Construction"
}
# geothermal_flow = {
# "flowType": "PRODUCT_FLOW",
# "flowProperties": "",
# "name": "geothermal, upstream and plant",
# "id": "",
# "category": "22: Utilities",
# }
# solar_flow = {
# "flowType": "PRODUCT_FLOW",
# "flowProperties": "",
# "name": "solar facility construction and operations",
# "id": "",
# "category": "22: Utilities",
# }
# wind_flow = {
# "flowType": "PRODUCT_FLOW",
# "flowProperties": "",
# "name": "wind farm construction and operations",
# "id": "",
# "category": "22: Utilities",
# }
ar = dict()
ar["internalId"] = ""
ar["@type"] = "Exchange"
ar["avoidedProduct"] = False
if fuel_type == "COAL":
ar["flow"] = coal_flow
ar["unit"] = _unit("sh tn")
ar["amount"] = 1.0
elif fuel_type == "GAS":
ar["flow"] = natural_gas_flow
ar["unit"] = _unit("MJ")
ar["amount"] = 1
elif fuel_type == "OIL":
ar["flow"] = petroleum_flow
ar["unit"] = _unit("MJ")
ar["amount"] = 1
elif fuel_type == "Coal transport":
ar["flow"] = transport_flow
ar["unit"] = _unit("kg*km")
ar["amount"] = 1
elif fuel_type == "NUCLEAR":
ar["flow"] = nuclear_flow
ar["unit"] = _unit("MWh")
ar["amount"] = 1
elif fuel_type == "GEOTHERMAL":
ar["flow"] = geothermal_flow
ar["unit"] = _unit("MWh")
ar["amount"] = 1
elif fuel_type == "SOLAR":
ar["flow"] = solar_flow
ar["unit"] = _unit("Item(s)")
ar["amount"] = 1
elif fuel_type == "WIND":
ar["flow"] = wind_flow
ar["unit"] = _unit("Item(s)")
ar["amount"] = 1
elif fuel_type == "CONSTRUCTION":
ar["flow"] = construction_flow
ar["unit"] = _unit("Item(s)")
ar["amount"] = 1
ar["flowProperty"] = ""
ar["input"] = False
ar["quantitativeReference"] = True
ar["baseUncertainty"] = ""
ar["provider"] = ""
ar["amountFormula"] = ""
return ar
def _flow_table_creation(data):
ar = dict()
ar["flowType"] = "ELEMENTARY_FLOW"
ar["flowProperties"] = ""
ar["name"] = data["FlowName"][
0:255
] # cutoff name at length 255 if greater than that
ar["id"] = data["FlowUUID"]
comp = str(data["Compartment"])
if (ar["flowType"] == "ELEMENTARY_FLOW") & (comp != ""):
ar["category"] = "Elementary flows/" + "emission" + "/" + comp
elif (ar["flowType"] == "PRODUCT_FLOW") & (comp != ""):
ar["category"] = comp
elif ar["flowType"] == "WASTE_FLOW":
ar["category"] = "Waste flows/"
else:
ar[
"category"
] = "22: Utilities/2211: Electric Power Generation, Transmission and Distribution"
return ar
def _exchange_table_creation_output(data):
# year = data['Year'].iloc[0]
# source = data['Source'].iloc[0]
ar = dict()
ar["internalId"] = ""
ar["@type"] = "Exchange"
ar["avoidedProduct"] = False
ar["flow"] = _flow_table_creation(data)
ar["flowProperty"] = ""
ar["input"] = False
ar["quantitativeReference"] = False
ar["baseUncertainty"] = ""
ar["provider"] = ""
ar["amount"] = data["emission_factor"]
ar["amountFormula"] = ""
ar["unit"] = _unit("kg")
ar["pedigreeUncertainty"] = ""
# ar['dqEntry'] = '('+str(round(data['Reliability_Score'].iloc[0],1))+\
# ';'+str(round(data['TemporalCorrelation'].iloc[0],1))+\
# ';' + str(round(data['GeographicalCorrelation'].iloc[0],1))+\
# ';' + str(round(data['TechnologicalCorrelation'].iloc[0],1))+ \
# ';' + str(round(data['DataCollection'].iloc[0],1))+')'
# ar['uncertainty']=uncertainty_table_creation(data)
# ar['comment'] = str(source)+' '+str(year)
# if data['FlowType'].iloc[0] == 'ELEMENTARY_FLOW':
# ar['category'] = 'Elementary flows/'+str(data['ElementaryFlowPrimeContext'].iloc[0])+'/'+str(data['Compartment'].iloc[0])
# elif data['FlowType'].iloc[0] == 'WASTE_FLOW':
# ar['category'] = 'Waste flows/'
# else:
# ar['category'] = '22: Utilities/2211: Electric Power Generation, Transmission and Distribution'+data['FlowName'].iloc[0]
if type(ar) == "DataFrame":
print(data)
return ar
def olcaschema_genupstream_processes(merged):
"""
Generate olca-schema dictionaries for upstream processes for the inventory
provided in the given dataframe.
Parameters
----------
merged: dataframe
Dataframe containing the inventory for upstream processes used by
eletricity generation.
Returns
----------
dictionary
Dictionary containing all of the unit processes to be written to
JSON-LD for import to openLCA
"""
# mapped_column_dict={
# 'UUID (EPA)':'FlowUUID',
# 'FlowName':'model_flow_name',
# 'Flow name (EPA)':'FlowName'
# }
#
# #This is a mapping of various NETL flows to federal lca commons flows
# netl_epa_flows = pd.read_csv(
# data_dir+'/Elementary_Flows_NETL.csv',
# skiprows=2,
# usecols=[0,1,2,6,7,8]
# )
# netl_epa_flows['Category']=netl_epa_flows['Category'].str.replace(
# 'Emissions to ','',).str.lower()
# netl_epa_flows['Category']=netl_epa_flows['Category'].str.replace(
# 'emission to ','',).str.lower()
coal_type_codes_inv = dict(map(reversed, coal_type_codes.items()))
mine_type_codes_inv = dict(map(reversed, mine_type_codes.items()))
basin_codes_inv = dict(map(reversed, basin_codes.items()))
coal_transport = [
"Barge",
"Lake Vessel",
"Ocean Vessel",
"Railroad",
"Truck",
]
# merged_summary = merged.groupby([
# 'fuel_type','stage_code','FlowName','Compartment'],as_index=False
# )['quantity','FlowAmount'].sum()
# First going to keep plant IDs to account for possible emission repeats
# for the same compartment, leading to erroneously low emission factors
merged_summary = merged.groupby(
[
"FuelCategory",
"stage_code",
"FlowName",
"FlowUUID",
"Compartment",
"plant_id",
],
as_index=False,
).agg({"FlowAmount": "sum", "quantity": "mean"})
merged_summary = merged_summary.groupby(
["FuelCategory", "stage_code", "FlowName", "FlowUUID", "Compartment"],
as_index=False,
)["quantity", "FlowAmount"].sum()
# ng_rows = merged_summary['fuel_type']=='Natural gas'
# For natural gas extraction there are extraction and transportation stages
# that will get lumped together in the groupby which will double
# the quantity and erroneously lower emission rates.
# merged_summary.loc[ng_rows,'quantity']=merged_summary.loc[ng_rows,'quantity']/2
merged_summary["emission_factor"] = (
merged_summary["FlowAmount"] / merged_summary["quantity"]
)
merged_summary.dropna(subset=["emission_factor"],inplace=True)
upstream_list = list(
x
for x in merged_summary["stage_code"].unique()
# if x not in coal_transport
)
merged_summary["FlowDirection"] = "output"
upstream_process_dict = dict()
# upstream_list=['Appalachian']
for upstream in upstream_list:
module_logger.info(f"Building dictionary for {upstream}")
exchanges_list = list()
# upstream = upstream_list[0]
upstream_filter = merged_summary["stage_code"] == upstream
merged_summary_filter = merged_summary.loc[upstream_filter, :].copy()
# merged_summary_filter_mapped = pd.merge(
# left=merged_summary_filter,
# right=netl_epa_flows,
# left_on=['FlowName','Compartment'],
# right_on=['NETL Flows','Category'],
# how='left'
# )
# merged_summary_filter = merged_summary_filter.rename(
# columns=mapped_column_dict,copy=False)
merged_summary_filter.drop_duplicates(
subset=["FlowName", "Compartment", "FlowAmount"], inplace=True
)
merged_summary_filter.dropna(subset=["FlowName"], inplace=True)
garbage = merged_summary_filter.loc[
merged_summary_filter["FlowName"] == "[no match]", :
].index
merged_summary_filter.drop(garbage, inplace=True)
ra = merged_summary_filter.apply(
_exchange_table_creation_output, axis=1
).tolist()
exchanges_list.extend(ra)
first_row = min(merged_summary_filter.index)
fuel_type = merged_summary_filter.loc[first_row, "FuelCategory"]
stage_code = merged_summary_filter.loc[first_row, "stage_code"]
if (fuel_type == "COAL") & (stage_code not in coal_transport):
split_name = merged_summary_filter.loc[
first_row, "stage_code"
].split("-")
combined_name = (
"coal extraction and processing - "
+ basin_codes_inv[split_name[0]]
+ ", "
+ coal_type_codes_inv[split_name[1]]
+ ", "
+ mine_type_codes_inv[split_name[2]]
)
exchanges_list.append(_exchange_table_creation_ref(fuel_type))
elif (fuel_type == "COAL") & (stage_code in coal_transport):
combined_name = "coal transport - " + stage_code
exchanges_list.append(
_exchange_table_creation_ref("Coal transport")
)
elif fuel_type == "GAS":
combined_name = (
"natural gas extraction and processing - "
+ merged_summary_filter.loc[first_row, "stage_code"]
)
exchanges_list.append(_exchange_table_creation_ref(fuel_type))
elif fuel_type == "OIL":
split_name = merged_summary_filter.loc[
first_row, "stage_code"
].split("_")
combined_name = (
"petroleum extraction and processing - " + split_name[0] + " "
f"PADD {split_name[1]}"
)
exchanges_list.append(_exchange_table_creation_ref(fuel_type))
elif fuel_type == "NUCLEAR":
combined_name = (
"nuclear fuel extraction, prococessing, and transport"
)
exchanges_list.append(_exchange_table_creation_ref(fuel_type))
elif fuel_type == "GEOTHERMAL":
combined_name = f"geothermal upstream and operation - {stage_code}"
exchanges_list.append(_exchange_table_creation_ref(fuel_type))
elif fuel_type == "SOLAR":
combined_name = f"solar photovoltaic upstream and operation - {stage_code}"
exchanges_list.append(_exchange_table_creation_ref(fuel_type))
elif fuel_type == "WIND":
combined_name = f"wind upstream and operation - {stage_code}"
exchanges_list.append(_exchange_table_creation_ref(fuel_type))
elif fuel_type == "CONSTRUCTION":
combined_name= f"power plant construction - {stage_code}"
exchanges_list.append(_exchange_table_creation_ref(fuel_type))
process_name = f"{combined_name}"
final = _process_table_creation_gen(
process_name, exchanges_list, fuel_type
)
upstream_process_dict[
merged_summary_filter.loc[first_row, "stage_code"]
] = final
# print("complete")
return upstream_process_dict
if __name__ == "__main__":
import electricitylci.coal_upstream as coal
import electricitylci.natural_gas_upstream as ng
import electricitylci.petroleum_upstream as petro
import electricitylci.nuclear_upstream as nuke
from combinator import concat_map_upstream_databases
year = 2016
coal_df = coal.generate_upstream_coal(year)
ng_df = ng.generate_upstream_ng(year)
petro_df = petro.generate_petroleum_upstream(year)
nuke_df = nuke.generate_upstream_nuc(year)
merged = concat_map_upstream_databases(
coal_df, ng_df, petro_df, nuke_df
)
merged.to_csv(f"{output_dir}/total_upstream_{year}.csv")
upstream_process_dict = olcaschema_genupstream_processes(merged)
upstream_olca_processes = write_process_dicts_to_jsonld(upstream_process_dict)
| 37.927885 | 128 | 0.597541 |
0711def3e684b17d5b73a833321f95c0b3ddefaf | 5,186 | py | Python | main.py | afrafaris72/BelajarGIS | 0166cd6d34537560e693b341d9861e1ef5347afb | [
"MIT"
] | null | null | null | main.py | afrafaris72/BelajarGIS | 0166cd6d34537560e693b341d9861e1ef5347afb | [
"MIT"
] | null | null | null | main.py | afrafaris72/BelajarGIS | 0166cd6d34537560e693b341d9861e1ef5347afb | [
"MIT"
] | null | null | null | import ketiga
import kedua
import pertama
pertama = pertama.Pertama()
# Harun Ar - Rasyid - 1174027
pertama.baak('BAAK')
# Harun Ar - Rasyid - 1174027
pertama.bauk('BAUK')
# Kadek Diva Krishna Murti - 1174006
pertama.ruangan111('Ruangan 111')
# Nico Ekklesia Sembiring - 1174096
pertama.ruangan113('Ruangan 113')
# Kadek Diva Krishna Murti - 1174006
pertama.wcBL('WC Belakang Kiri')
# Kadek Diva Krishna Murti - 1174006
pertama.tanggaBL('Tangga Belakang Kiri')
# Kadek Diva Krishna Murti - 1174006
pertama.gudangBL('Gudang Belakang Kiri')
# Kadek Diva Krishna Murti - 1174006
pertama.haloPos('Halo Pos 161')
# Dwi Yulianingsih - 1174009
pertama.ruang117('Ruang 117')
# Dwi Septiani Tsaniyah - 1174003
pertama.ruang115('ruang 115')
# Habib Abdul R - 1174002
pertama.ruang103('ruang 103')
# Felix Lase - 1174026
pertama.ruang107('RUANG 107')
pertama.ruang108('RUANG 108')
# Evietania Charis Sujadi - 1174051
pertama.gudangL('GUDANG L')
pertama.tanggaL('TANGGA L')
# Muhammad Dzihan Al-Banna - 1174095
pertama.ruang116('RUANG 116')
# Arjun Yuda Firwanda 1174008
pertama.gudangR('GUDANG R')
pertama.tanggaR('TANGGA R')
# Dezha Aidil Martha 1174025
pertama.ruang101('RUANG 101')
# Damara Bendikta 1174012
pertama.ruang102('RUANG 102')
# Oniwaldus Bere Mali 1174005
pertama.ruang104('RUANG 104')
# Srirahayu 1174015
pertama.ruang105('RUANG 105')
# Muhammad Tomy Nur Maulidy 1174031
pertama.ruang106('RUANG 106')
# Choirul anam 117404
pertama.ruang114('RUANG 114')
# Muh. Rifky Prananda 1174017
pertama.ruang112('RUANG 112')
# Muhammad Fahmi - 1174021
pertama.ruang109('RUANG 109')
# Muhammad Fahmi - 1174021
pertama.wcBL2('WC Belakang Kanan')
# Muhammad Fahmi - 1174021
pertama.tanggaBL2('Tangga Belakang Kanan')
# Muhammad Fahmi - 1174021
pertama.gudangBL2('Gudang Belakang Kanan')
pertama.close()
# Kelas B
kedua = kedua.Kedua()
# Luthfi Muhammad Nabil - 1174035
kedua.tanggaBawahKiri('Tangga Bawah Kiri')
# Luthfi Muhammad Nabil - 1174035
kedua.tanggaBawahKanan('Tangga Bawah Kanan')
# Luthfi Muhammad Nabil - 1174035
kedua.tanggaAtasKanan('Tangga Atas Kanan')
# Luthfi Muhammad Nabil - 1174035
kedua.tanggaAtasKiri('Tangga Atas Kiri')
# Luthfi Muhammad Nabil - 1174035
kedua.tamanKosongTengah('Taman Kosong Tengah')
# Hagan Rowlenstino - 1174040
kedua.R213('Ruangan_213')
# Hagan Rowlenstino - 1174040
kedua.IRC('Ruangan_IRC')
# Hagan Rowlenstino - 1174040
kedua.RLabBisnis('Ruangan_Lab_Bisnis')
# Hagan Rowlenstino - 1174040
kedua.RLabComprehensive('Ruangan_Labkom_Comprehensive')
# Irvan Rizkiansyah - 1174043
kedua.ruangan205('Ruangan 205')
# Irvan Rizkiansyah - 1174043
kedua.ruangan206('Ruangan 206')
# Irvan Rizkiansyah - 1174043
kedua.ruangan207('Ruangan 207')
# Liyana Majdah Rahma - 1174039
kedua.ruangan201('Ruangan 201')
# Liyana Majdah Rahma - 1174039
kedua.ruangan202('Ruangan 202')
# Alit Fajar Kurniawan - 1174057
kedua.RServer('Ruangan Server')
# Alit Fajar Kurniawan - 1174057
kedua.LabLogistik('Laboratorium Logistik')
# Kevin Natanael Nainggolan-1174059
kedua.ruang208('Ruangan 208')
# Kevin Natanael Nainggolan-1174059
kedua.ruang209('Ruangan 209')
# Kevin Natanael Nainggolan-1174059
kedua.ruang210('Ruangan 210')
# Dika Sukma Pradana - 1174050
kedua.ruangan219('Ruangan 219')
# Dika Sukma Pradana - 1174050
kedua.ruangan220('Ruangan 220')
# Faisal Najib Abdullah - 1174042
kedua.toiletdosen('Toilet Dosen')
kedua.toiletcowo('Toilet Cowo')
kedua.prodiak('Prodi AK')
# Rangga Putra Ramdhani - 1174056
kedua.ruangan203('Ruangan 203')
kedua.ruangan204('Ruangan 204')
# Ichsan Hizman Hardy - 1174034
kedua.ruangan221('Ruangan 221')
kedua.ruangan222('Ruangan 222')
#Muhammad Afra Faris - 1174041
kedua.ruangan211('Ruangan 211')
kedua.ruangan212('Ruangan 212')
# Selanjutnya disini
# Close Jangan DIhapus
kedua.close()
# KelasC
ketiga = ketiga.ketiga()
#-------------------KODING --------------------------#
# Ilham Muhammad Ariq 1174087
ketiga.tanggaD2('Tangga Depan 2')
ketiga.r301('Ruangan 301')
# Alvan Alvanzah 1174077
ketiga.r302('Ruangan 302')
# Advent Nopele Sihite 1174089
ketiga.r304('Ruangan 304')
# Difa
ketiga.r303('Ruangan 303')
# Muhammad Reza Syachrani 1174084
ketiga.r307('Ruangan 307')
ketiga.r308('Ruangan 308')
# Kaka Kamaludin 1174067
ketiga.r305('Ruangan 305')
ketiga.r306('Ruangan 306')
# Arrizal Furqona Gifary 1174070
ketiga.r309('Ruangan 309')
# Fanny Shafira 1174069
ketiga.r310('Ruangan 310')
# Chandra Kirana Poetra 1174079
ketiga.rwccewek2('WC Cewek 2')
ketiga.rwccewek3('WC Cewek 3')
# Mochamad Arifqi Ramadhan 1174074
ketiga.tanggaB2('Tangga Belakang 2')
# Handi Hermawan 1174080
ketiga.r311('Ruangan 311')
# Bakti Qilan Mufid 1174083
ketiga.r312('Ruangan 312')
ketiga.tanggaB1('Tangga Belakang 1')
# Ainul Filiani 1174073
ketiga.rwccewek1('WC Cewek 1')
# Aulyardha Anindita 1174054
ketiga.rwccowok('WC Cowok')
# Nurul Izza Hamka 1174062
ketiga.rteknisi('Ruangan Teknisi')
# Tia Nur Candida 1174086
ketiga.r314('Ruangan 314')
# D.Irga B. Naufal Fakhri 1174066
ketiga.r315('Ruangan 315')
ketiga.r316('Ruangan 316')
# Muhammad Abdul Gani Wijaya 1174071
ketiga.r319('Ruangan 319')
ketiga.r320('Ruangan 320')
# Alfadian Owen 1174091
ketiga.r321('Ruangan 321')
ketiga.center('Center')
ketiga.close() | 23.898618 | 55 | 0.758774 |
52ffb1d740b8016dedbb200eab19a9f76d9f6cf3 | 884 | py | Python | src/pytorch_metric_learning/losses/cosface_loss.py | wconnell/pytorch-metric-learning | 1affee7c77bb5d6d4ee559bad62b910a21b39d48 | [
"MIT"
] | 1 | 2021-05-30T14:59:42.000Z | 2021-05-30T14:59:42.000Z | src/pytorch_metric_learning/losses/cosface_loss.py | umitkacar/pytorch-metric-learning | bf2b7675b7b80e5762b75428d51e4ab0a861e710 | [
"MIT"
] | null | null | null | src/pytorch_metric_learning/losses/cosface_loss.py | umitkacar/pytorch-metric-learning | bf2b7675b7b80e5762b75428d51e4ab0a861e710 | [
"MIT"
] | null | null | null | from .large_margin_softmax_loss import LargeMarginSoftmaxLoss
import numpy as np
import torch
class CosFaceLoss(LargeMarginSoftmaxLoss):
"""
Implementation of https://arxiv.org/pdf/1801.07698.pdf
"""
def __init__(self, *args, margin=0.35, scale=64, **kwargs):
super().__init__(*args, margin=margin, scale=scale, **kwargs)
def init_margin(self):
pass
def cast_types(self, dtype, device):
self.W.data = self.W.data.to(device).type(dtype)
def modify_cosine_of_target_classes(self, cosine_of_target_classes, *args):
if self.collect_stats:
with torch.no_grad():
self.get_angles(cosine_of_target_classes) # For the purpose of collecting stats
return cosine_of_target_classes - self.margin
def scale_logits(self, logits, *_):
return logits * self.scale
| 34 | 96 | 0.669683 |
c89968a883c037d2b8abdb5c460ae75060befcaf | 15,028 | py | Python | pay-api/src/pay_api/services/fas/routing_slip.py | bcgov/sbc-pay | 142872e61109f097ee7f926375c3139421f38849 | [
"Apache-2.0"
] | 4 | 2020-03-23T21:37:02.000Z | 2021-06-15T11:25:22.000Z | pay-api/src/pay_api/services/fas/routing_slip.py | bcgov/sbc-pay | 142872e61109f097ee7f926375c3139421f38849 | [
"Apache-2.0"
] | 757 | 2019-05-02T17:53:52.000Z | 2022-03-31T22:42:01.000Z | pay-api/src/pay_api/services/fas/routing_slip.py | bcgov/sbc-pay | 142872e61109f097ee7f926375c3139421f38849 | [
"Apache-2.0"
] | 39 | 2019-01-30T20:05:36.000Z | 2022-03-24T15:07:54.000Z | # Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Service to manage routing slip operations."""
from __future__ import annotations
from datetime import datetime
from decimal import Decimal
from typing import Dict, List
from flask import current_app
from pay_api.exceptions import BusinessException
from pay_api.models import CfsAccount as CfsAccountModel
from pay_api.models import Payment as PaymentModel
from pay_api.models import PaymentAccount as PaymentAccountModel
from pay_api.models import RoutingSlip as RoutingSlipModel
from pay_api.models import RoutingSlipSchema
from pay_api.services.cfs_service import CFSService
from pay_api.services.oauth_service import OAuthService
from pay_api.utils.enums import (
AuthHeaderType, CfsAccountStatus, ContentType, PatchActions, PaymentMethod, PaymentStatus, PaymentSystem,
RoutingSlipStatus)
from pay_api.utils.errors import Error
from pay_api.utils.user_context import user_context
from pay_api.utils.util import get_local_time, string_to_date
class RoutingSlip: # pylint: disable=too-many-instance-attributes, too-many-public-methods
"""Service to manage Routing slip related operations."""
def __init__(self):
"""Initialize the service."""
self.__dao = None
self._id: int = None
self._number: str = None
self._payment_account_id: int = None
self._status_code: str = None
self._total: Decimal = None
self._remaining_amount: Decimal = None
@property
def _dao(self):
if not self.__dao:
self.__dao = RoutingSlipModel()
return self.__dao
@_dao.setter
def _dao(self, value):
self.__dao: RoutingSlipModel = value
self.id: int = self._dao.id
self.number: str = self._dao.number
self.status_code: str = self._dao.status_code
self.payment_account_id: int = self._dao.payment_account_id
self.total: Decimal = self._dao.total
self.remaining_amount: Decimal = self._dao.remaining_amount
@property
def id(self):
"""Return the _id."""
return self._id
@id.setter
def id(self, value: int):
"""Set the id."""
self._id = value
self._dao.id = value
@property
def number(self):
"""Return the number."""
return self._number
@number.setter
def number(self, value: str):
"""Set the number."""
self._number = value
self._dao.number = value
@property
def status_code(self):
"""Return the status_code."""
return self._status_code
@status_code.setter
def status_code(self, value: str):
"""Set the status_code."""
self._status_code = value
self._dao.status_code = value
@property
def payment_account_id(self):
"""Return the payment_account_id."""
return self._payment_account_id
@payment_account_id.setter
def payment_account_id(self, value: int):
"""Set the payment_account_id."""
self._payment_account_id = value
self._dao.payment_account_id = value
@property
def total(self):
"""Return the total."""
return self._total
@total.setter
def total(self, value: Decimal):
"""Set the total."""
self._total = value
self._dao.total = value
@property
def remaining_amount(self):
"""Return the remaining_amount."""
return self._remaining_amount
@remaining_amount.setter
def remaining_amount(self, value: Decimal):
"""Set the amount."""
self._remaining_amount = value
self._dao.remaining_amount = value
def commit(self):
"""Save the information to the DB."""
return self._dao.commit()
def flush(self):
"""Save the information to the DB."""
return self._dao.flush()
def rollback(self):
"""Rollback."""
return self._dao.rollback()
def save(self):
"""Save the information to the DB."""
return self._dao.save()
def asdict(self) -> Dict[str]:
"""Return the routing slip as a python dict."""
routing_slip_schema = RoutingSlipSchema()
d = routing_slip_schema.dump(self._dao)
return d
@classmethod
def search(cls, search_filter: Dict, page: int, limit: int, return_all: bool = False):
"""Search for routing slip."""
max_no_records: int = 0
if not bool(search_filter) or not any(search_filter.values()):
max_no_records = current_app.config.get('ROUTING_SLIP_DEFAULT_TOTAL')
routing_slips, total = RoutingSlipModel.search(search_filter, page, limit, return_all,
max_no_records)
data = {
'total': total,
'page': page,
'limit': limit,
'items': RoutingSlipSchema().dump(routing_slips, many=True)
}
return data
@classmethod
@user_context
def create_daily_reports(cls, date: str, **kwargs):
"""Create and return daily report for the day provided."""
routing_slips: List[RoutingSlipModel] = RoutingSlipModel.search(
dict(
dateFilter=dict(
endDate=date,
startDate=date,
target='created_on'
)
),
page=1, limit=0, return_all=True
)[0]
total: float = 0
no_of_cash: int = 0
no_of_cheque: int = 0
total_cash_usd: float = 0
total_cheque_usd: float = 0
total_cash_cad: float = 0
total_cheque_cad: float = 0
# TODO Only CAD supported now, so just add up the total.
for routing_slip in routing_slips:
total += float(routing_slip.total)
if routing_slip.payment_account.payment_method == PaymentMethod.CASH.value:
no_of_cash += 1
# TODO check if the payment is CAD or USD.
total_cash_cad += float(routing_slip.total)
else:
no_of_cheque += 1
total_cheque_cad += float(routing_slip.total)
report_dict = dict(
templateName='routing_slip_report',
reportName=f'Routing-Slip-Daily-Report-{date}',
templateVars=dict(
day=date,
reportDay=str(get_local_time(datetime.now())),
total=total,
numberOfCashReceipts=no_of_cash,
numberOfChequeReceipts=no_of_cheque,
totalCashInUsd=total_cash_usd,
totalChequeInUsd=total_cheque_usd,
totalCashInCad=total_cash_cad,
totalChequeInCad=total_cheque_cad
)
)
pdf_response = OAuthService.post(current_app.config.get('REPORT_API_BASE_URL'),
kwargs['user'].bearer_token, AuthHeaderType.BEARER,
ContentType.JSON, report_dict)
return pdf_response, report_dict.get('reportName')
@classmethod
def find_by_number(cls, rs_number: str) -> Dict[str, any]:
"""Find by routing slip number."""
routing_slip_dict: Dict[str, any] = None
routing_slip: RoutingSlipModel = RoutingSlipModel.find_by_number(rs_number)
if routing_slip:
routing_slip_schema = RoutingSlipSchema()
routing_slip_dict = routing_slip_schema.dump(routing_slip)
return routing_slip_dict
@classmethod
def get_links(cls, rs_number: str) -> Dict[str, any]:
"""Find dependents/links of a routing slips."""
links: Dict[str, any] = None
routing_slip: RoutingSlipModel = RoutingSlipModel.find_by_number(rs_number)
if routing_slip:
routing_slip_schema = RoutingSlipSchema()
children = RoutingSlipModel.find_children(rs_number)
links = {
'parent': routing_slip_schema.dump(routing_slip.parent),
'children': routing_slip_schema.dump(children, many=True)
}
return links
@classmethod
@user_context
def create(cls, request_json: Dict[str, any], **kwargs):
"""Search for routing slip."""
# 1. Create customer profile in CFS and store it in payment_account and cfs_accounts
# 2. Create receipt in CFS
# 3. Create routing slip and payment records.
# Validate if Routing slip number is unique.
rs_number = request_json.get('number')
if cls.find_by_number(rs_number):
raise BusinessException(Error.FAS_INVALID_ROUTING_SLIP_NUMBER)
payment_methods: [str] = [payment.get('paymentMethod') for payment in request_json.get('payments')]
# all the payment should have the same payment method
if len(set(payment_methods)) != 1:
raise BusinessException(Error.FAS_INVALID_PAYMENT_METHOD)
cfs_account_details: Dict[str, any] = CFSService.create_cfs_account(
name=rs_number, # TODO Sending RS number as name of party
contact_info={}
)
pay_account: PaymentAccountModel = PaymentAccountModel(
name=request_json.get('paymentAccount').get('accountName'),
payment_method=payment_methods[0],
).flush()
cfs_account: CfsAccountModel = CfsAccountModel(
account_id=pay_account.id,
cfs_account=cfs_account_details.get('account_number'),
cfs_party=cfs_account_details.get('party_number'),
cfs_site=cfs_account_details.get('site_number'),
status=CfsAccountStatus.ACTIVE.value
).flush()
total = sum(float(payment.get('paidAmount')) for payment in request_json.get('payments'))
# Create receipt in CFS for the payment.
# TODO Create a receipt for the total or for one each ?
CFSService.create_cfs_receipt(cfs_account=cfs_account,
rcpt_number=rs_number,
rcpt_date=request_json.get('routingSlipDate'),
amount=total,
payment_method=payment_methods[0])
# Create a routing slip record.
routing_slip: RoutingSlipModel = RoutingSlipModel(
number=rs_number,
payment_account_id=pay_account.id,
status=RoutingSlipStatus.ACTIVE.value,
total=total,
remaining_amount=total,
routing_slip_date=string_to_date(request_json.get('routingSlipDate'))
).flush()
for payment in request_json.get('payments'):
PaymentModel(
payment_system_code=PaymentSystem.FAS.value,
payment_account_id=pay_account.id,
payment_method_code=payment.get('paymentMethod'),
payment_status_code=PaymentStatus.COMPLETED.value,
receipt_number=rs_number,
cheque_receipt_number=payment.get('chequeReceiptNumber'),
is_routing_slip=True,
paid_amount=payment.get('paidAmount'),
payment_date=string_to_date(payment.get('paymentDate')) if payment.get('paymentDate') else None,
created_by=kwargs['user'].user_name
).flush()
routing_slip.commit()
return cls.find_by_number(rs_number)
@classmethod
def do_link(cls, rs_number: str, parent_rs_number: str) -> Dict[str, any]:
"""Link routing slip to parent routing slip."""
routing_slip: RoutingSlipModel = RoutingSlipModel.find_by_number(rs_number)
parent_routing_slip: RoutingSlipModel = RoutingSlipModel.find_by_number(parent_rs_number)
if routing_slip is None or parent_routing_slip is None:
raise BusinessException(Error.FAS_INVALID_ROUTING_SLIP_NUMBER)
# do validations if its linkable
RoutingSlip._validate_linking(routing_slip=routing_slip, parent_rs_slip=parent_routing_slip)
routing_slip.parent_number = parent_routing_slip.number
routing_slip.status = RoutingSlipStatus.LINKED.value
# transfer the amount to parent.
# we keep the total amount as such and transfer only the remaining amount.
parent_routing_slip.remaining_amount += routing_slip.remaining_amount
routing_slip.remaining_amount = 0
routing_slip.commit()
return cls.find_by_number(rs_number)
@classmethod
def update(cls, rs_number: str, action: str, request_json: Dict[str, any]) -> Dict[str, any]:
"""Update routing slip."""
if (patch_action := PatchActions.from_value(action)) is None:
raise BusinessException(Error.PATCH_INVALID_ACTION)
routing_slip: RoutingSlipModel = RoutingSlipModel.find_by_number(rs_number)
if routing_slip is None:
raise BusinessException(Error.FAS_INVALID_ROUTING_SLIP_NUMBER)
if patch_action == PatchActions.UPDATE_STATUS:
routing_slip.status = request_json.get('status')
routing_slip.save()
return cls.find_by_number(rs_number)
@staticmethod
def _validate_linking(routing_slip: RoutingSlipModel, parent_rs_slip: RoutingSlipModel) -> None:
"""Validate the linking.
1). child already has a parent/already linked.
2). its already a parent.
3). parent_rs_slip has a parent.ie parent_rs_slip shouldn't already be linked
4). one of them has transactions
"""
if RoutingSlip._is_linked_already(routing_slip):
raise BusinessException(Error.RS_ALREADY_LINKED)
children = RoutingSlipModel.find_children(routing_slip.number)
if children and len(children) > 0:
raise BusinessException(Error.RS_ALREADY_A_PARENT)
if RoutingSlip._is_linked_already(parent_rs_slip):
raise BusinessException(Error.RS_PARENT_ALREADY_LINKED)
# prevent self linking
if routing_slip.number == parent_rs_slip.number:
raise BusinessException(Error.RS_CANT_LINK_TO_SAME)
# has one of these has pending
if routing_slip.invoices:
raise BusinessException(Error.RS_CHILD_HAS_TRANSACTIONS)
@staticmethod
def _is_linked_already(routing_slip: RoutingSlipModel):
"""Find if the rs is already linked."""
return routing_slip.parent or routing_slip.status == RoutingSlipStatus.LINKED.value
| 37.949495 | 112 | 0.646061 |
ea2965c2c068b1b82137de3e4e1b55a657e1bdd8 | 8,510 | py | Python | 2021/src/Pre-processing/Isotropic/Noise/src/test/main.py | tansyab1/PhD-project | 15f170c1976e58697454cd992687d808d1b2284a | [
"MIT"
] | null | null | null | 2021/src/Pre-processing/Isotropic/Noise/src/test/main.py | tansyab1/PhD-project | 15f170c1976e58697454cd992687d808d1b2284a | [
"MIT"
] | null | null | null | 2021/src/Pre-processing/Isotropic/Noise/src/test/main.py | tansyab1/PhD-project | 15f170c1976e58697454cd992687d808d1b2284a | [
"MIT"
] | null | null | null | import argparse
import cv2
import numpy as np
import tensorflow as tf
import dataset_reader
import samples_plt
from CNN_denoiser import CNN_denoiser
def load_datasets(img_width=64, img_height=64):
raw_mias = dataset_reader.read_mini_mias() # Read mias dataset
mias_images = np.zeros((raw_mias.shape[0], img_width, img_height))
for i in range(raw_mias.shape[0]):
mias_images[i] = cv2.resize(raw_mias[i], dsize=(img_width, img_height),
interpolation=cv2.INTER_CUBIC)
# raw_dx = dataset_reader.read_dx() # Read DX dataset
# dx_images = np.zeros((raw_dx.shape[0], img_width, img_width))
# for i in range(raw_dx.shape[0]):
# dx_images[i] = cv2.resize(raw_dx[i], dsize=(img_width, img_height),
# interpolation=cv2.INTER_CUBIC)
raw_dental = dataset_reader.read_dental() # Read dental dataset
dental_images = np.zeros((raw_dental.shape[0], img_width, img_width))
for i in range(raw_dental.shape[0]):
dental_images[i] = cv2.resize(raw_dental[i], dsize=(img_width, img_height),
interpolation=cv2.INTER_CUBIC)
#
# rawimages3 = dataset_reader.read_covid() # Read covid dataset
# images3 = np.zeros((329, img_width, img_width))
# for i in range(rawimages3.shape[0]):
# images3[i] = cv2.resize(rawimages3[i], dsize=(img_width, img_height),
# interpolation=cv2.INTER_CUBIC)
return mias_images, dental_images
def add_noise(pure, pure_test):
noise = np.random.normal(noise_mean, noise_std, pure.shape) # np.random.poisson(1, pure.shape)
noise_test = np.random.normal(noise_mean, noise_std, pure_test.shape) # np.random.poisson(1, pure_test.shape)
noisy_input = pure + noise_prop * noise
noisy_input_test = pure_test + noise_prop * noise_test
return noisy_input, noisy_input_test
class Range(object):
def __init__(self, start, end):
self.start = start
self.end = end
def __eq__(self, other):
return self.start <= other <= self.end
if __name__ == "__main__":
gpus = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(gpus[0], True)
img_width, img_height = 64, 64
batch_size = 10
nu_epochs = 50
validation_split = 0
train_split = 0.9
verbosity = 1
noise_prop = 0.1
noise_std = 0.5
noise_mean = 0
number_of_samples = 4
shuffle_test_set = False
parser = argparse.ArgumentParser(description='Image Denoiser')
parser.add_argument("-load", "--load", help="Path of dataset to load [default = DX and MIAS are loaded]", type=str)
parser.add_argument("-size", "--size", help="Image size 64x64 or 128x128 [choices = 128, 64] [default = 64]",
type=int,
choices=[128, 64])
parser.add_argument("-p", "--proportion", help="Gaussian noise proportion [default = 0.1]", type=float)
parser.add_argument("-std", "--sdeviation", help="Gaussian noise standard deviation [default = 0.01]", type=float)
parser.add_argument("-m", "--mean", help="Gaussian noise mean [default = 0]", type=float)
parser.add_argument("-s", "--samples", help="Number of samples [default = 4]", type=int)
parser.add_argument("-shuffle", "--shuffle", help="Shuffle test set", action="store_true")
parser.add_argument("-tsplit", "--trainsplit", help="Train split [0-1] [default = 0.9]", type=float,
choices=[Range(0.0, 1.0)])
parser.add_argument("-epoch", "--epoch", help="Number of epochs [default = 50]", type=int)
parser.add_argument("-batch", "--batch", help="Batch size [default = 10]", type=int)
parser.add_argument("-vsplit", "--validationsplit", help="Validation split [0-1] [default = 0.1]", type=float,
choices=[Range(0.0, 1.0)])
parser.add_argument("-save", "--save", help="Save test set samples", action="store_true")
parser.add_argument("-plot", "--plot", help="Plot model loss", action="store_true")
args = parser.parse_args()
if args.proportion:
noise_prop = args.proportion
if args.sdeviation:
noise_std = args.sdeviation
if args.mean:
noise_mean = args.mean
if args.samples:
number_of_samples = args.samples
if args.epoch:
nu_epochs = args.epoch
if args.batch:
batch_size = args.batch
if args.validationsplit:
validation_split = args.validationsplit
if args.trainsplit:
train_split = args.trainsplit
if args.shuffle:
shuffle_test_set = True
if args.size:
img_width = args.size
img_height = args.size
print("[LOG] Loading datasets...")
if args.load:
print("[LOG] Loading data set from [{0}]".format(args.load))
data_images = dataset_reader.read_dataset(args.load, img_width, img_height)
input_train, input_test = CNN_denoiser.train_test_split1(data_images, train_split=train_split,
shuffle_test_set=shuffle_test_set, img_width=img_width,
img_height=img_height) # Split 1 set to train and test sets
else:
print("Loading default datasets, MIAS and DX")
mias_images, dx_images = load_datasets(img_width, img_height) # Load mias and DX datasets
input_train, input_test = CNN_denoiser.train_test_split(mias_images, dx_images, train_split=train_split,
shuffle_test_set=shuffle_test_set, img_width=img_width,
img_height=img_height) # Split both sets to train and test sets
print(
"[LOG] Load completed\n" + "[LOG] Image size {0}x{1}".format(img_width,
img_height) + "\n[LOG] Splitting datasets with [{0}] train set size\n[LOG] Shuffle test set: {1}".format(
train_split, shuffle_test_set))
# input_train, input_test = CNN_denoiser.train_test_split2(dx_images, train_split=train_split, shuffle_test_set=shuffle_test_set) # Split 1 set to train and test sets
# input_train, input_test = train_test_split3(mias_images, dx_images, dental_images, shuffle_test_set=True)
# Parse numbers as floats
input_train = input_train.astype('float32')
input_test = input_test.astype('float32')
# Normalize data
input_train = input_train / 255
input_test = input_test / 255
print("[LOG] Adding Gaussian noise to train and test sets...\nNoise Proportion: {0}\nMean: {1}\nStandard "
"Deviation: {2}".format(noise_prop, noise_mean, noise_std))
# Add gaussian noise
pure = input_train
pure_test = input_test
noisy_input, noisy_input_test = add_noise(pure, pure_test) # Add Gaussian noise to train and test sets
print("[LOG] Initializing model...\nEPOCHS: {0}\nBatch size: {1}\nValidation split: {2}".format(nu_epochs,
batch_size,
validation_split))
# Create the model
cnn_denoiser = CNN_denoiser(batch_size=batch_size, nu_epochs=nu_epochs, validation_split=validation_split,
img_height=img_height, img_width=img_width)
print("[LOG] Training and evaluating model...")
cnn_denoiser.train(noisy_input, pure, verbosity=verbosity)
# cnn_denoiser.evaluate(noisy_input_test, pure_test)
if args.plot:
cnn_denoiser.model_plots(noise_prop, noise_mean, noise_std)
# Generate denoised images
samples = noisy_input_test[:]
print("[LOG] Training and model evaluation completed\n[LOG] Denoising images test set...")
denoised_images = cnn_denoiser.predict(samples)
print("[LOG] Image denoising completed\n[LOG] Plotting denoised samples")
samples_plt.plot_samples((noise_prop, noise_std, noise_mean), noisy_input_test, denoised_images, pure_test,
number_of_samples, img_width=img_width, img_height=img_height)
if args.save:
samples_plt.save_samples((noise_prop, noise_std, noise_mean), noisy_input_test, denoised_images, pure_test,
img_width=img_width, img_height=img_height)
| 50.35503 | 174 | 0.636193 |
30a282b3889f623a27c8fd7fef702aade67e132d | 6,264 | py | Python | server/galaxyls/tests/unit/test_context.py | blankenberg/galaxy-language-server | 5b56e56a16bf2d93a0cab1aced564bf1fa3f4f3b | [
"Apache-2.0"
] | null | null | null | server/galaxyls/tests/unit/test_context.py | blankenberg/galaxy-language-server | 5b56e56a16bf2d93a0cab1aced564bf1fa3f4f3b | [
"Apache-2.0"
] | null | null | null | server/galaxyls/tests/unit/test_context.py | blankenberg/galaxy-language-server | 5b56e56a16bf2d93a0cab1aced564bf1fa3f4f3b | [
"Apache-2.0"
] | null | null | null | from typing import List, Optional
import pytest
from pygls.workspace import Position
from pytest_mock import MockerFixture
from ...services.context import XmlContext, XmlContextService, XsdNode, XsdTree
from ...services.xml.nodes import XmlAttribute, XmlAttributeKey, XmlAttributeValue, XmlElement
from ...services.xml.types import NodeType
from .utils import TestUtils
# [root]
# ├── [child]
# │ └── [subchild]
# └── [sibling]
@pytest.fixture()
def fake_xsd_tree(mocker: MockerFixture) -> XsdTree:
root = XsdNode(name="root", element=mocker.Mock())
child = XsdNode(name="child", parent=root, element=mocker.Mock())
XsdNode(name="sibling", parent=root, element=mocker.Mock())
XsdNode(name="subchild", parent=child, element=mocker.Mock())
return XsdTree(root)
class TestXmlContextClass:
def test_init_sets_properties(self, fake_xsd_tree: XsdTree) -> None:
expected_xsd_element = fake_xsd_tree.root
exepected_token = XmlElement()
expected_line_content = "test"
expected_position = Position()
context = XmlContext(
expected_xsd_element, exepected_token, line_text=expected_line_content, position=expected_position
)
assert context.token == exepected_token
assert context.xsd_element == expected_xsd_element
assert context.line_text == expected_line_content
assert context.position == expected_position
assert not context.is_empty
def test_context_with_tag_token_type_returns_is_tag(self, fake_xsd_tree: XsdTree) -> None:
context = XmlContext(fake_xsd_tree.root, XmlElement())
assert context.is_tag
assert not context.is_attribute_key
assert not context.is_attribute_value
def test_context_with_attr_key_token_type_returns_is_attr_key(self, fake_xsd_tree: XsdTree) -> None:
fake_attr = XmlAttribute("attr", 0, 0, XmlElement())
context = XmlContext(fake_xsd_tree.root, XmlAttributeKey("attr", 0, 0, fake_attr))
assert not context.is_tag
assert context.is_attribute_key
assert not context.is_attribute_value
def test_context_with_attr_value_token_type_returns_is_attr_value(self, fake_xsd_tree: XsdTree) -> None:
fake_attr = XmlAttribute("attr", 0, 0, XmlElement())
context = XmlContext(fake_xsd_tree.root, XmlAttributeValue("val", 0, 0, fake_attr))
assert not context.is_tag
assert not context.is_attribute_key
assert context.is_attribute_value
class TestXmlContextServiceClass:
def test_init_sets_properties(self, mocker: MockerFixture) -> None:
expected = mocker.Mock()
service = XmlContextService(expected)
assert service.xsd_tree
def test_get_xml_context_returns_empty_document_context(self, mocker: MockerFixture) -> None:
empty_xml_content = ""
position = Position()
xsd_tree_mock = mocker.Mock()
service = XmlContextService(xsd_tree_mock)
context = service.get_xml_context(TestUtils.to_document(empty_xml_content), position)
assert context.is_empty
@pytest.mark.parametrize(
"source_with_mark, expected_token_name, expected_node_type, expected_xsd_node_name, expected_stack",
[
("<root>^", "root", NodeType.ELEMENT, "root", ["root"]),
("<root> ^", None, NodeType.CONTENT, "root", ["root"]),
("^<root><child", "root", NodeType.ELEMENT, "root", ["root"]),
("<root>^<child", "child", NodeType.ELEMENT, "child", ["root", "child"]),
("<root>^ <child", None, NodeType.CONTENT, "root", ["root"]),
("<root><^child", "child", NodeType.ELEMENT, "child", ["root", "child"]),
("<root><child^", "child", NodeType.ELEMENT, "child", ["root", "child"]),
("<root><child ^", "child", NodeType.ELEMENT, "child", ["root", "child"]),
('<root ^ attr="4"><child ', "root", NodeType.ELEMENT, "root", ["root"]),
('<root ^attr="4"><child ', "attr", NodeType.ATTRIBUTE_KEY, "root", ["root"]),
('<root attr^="4"><child ', "attr", NodeType.ATTRIBUTE_KEY, "root", ["root"]),
('<root attr=^"4"><child ', None, NodeType.ATTRIBUTE_VALUE, "root", ["root"]),
('<root attr="4"^><child ', None, NodeType.ATTRIBUTE_VALUE, "root", ["root"]),
('<root attr="4" ^><child ', "root", NodeType.ELEMENT, "root", ["root"]),
('<root attr="4"><^child ', "child", NodeType.ELEMENT, "child", ["root", "child"]),
('<root attr="4">\n<child/^><other', "child", NodeType.ELEMENT, "child", ["root", "child"]),
('<root attr="4">\n<child/>^<other', "other", NodeType.ELEMENT, "root", ["root", "other"]),
('<root attr="4">\n<child/><^other', "other", NodeType.ELEMENT, "root", ["root", "other"]),
('<root attr="4">\n<child/><^sibling', "sibling", NodeType.ELEMENT, "sibling", ["root", "sibling"]),
('<root attr="4">\n <^ \n<child', None, NodeType.ELEMENT, "root", ["root"]),
('<root attr="4">\n < \n<^child', "child", NodeType.ELEMENT, "child", ["root", "child"]),
("<root><child></child>^", "root", NodeType.ELEMENT, "root", ["root"]),
],
)
def test_get_xml_context_returns_context_with_expected_node(
self,
fake_xsd_tree: XsdTree,
source_with_mark: str,
expected_token_name: Optional[str],
expected_node_type: NodeType,
expected_xsd_node_name: XsdNode,
expected_stack: List[str],
) -> None:
service = XmlContextService(fake_xsd_tree)
position, source = TestUtils.extract_mark_from_source("^", source_with_mark)
document = TestUtils.to_document(source)
print(fake_xsd_tree.render())
print(f"Test context at position [line={position.line}, char={position.character}]")
print(f"Document:\n{document.source}")
context = service.get_xml_context(document, position)
assert context
assert context.token
assert context.token.name == expected_token_name
assert context.token.node_type == expected_node_type
assert context.xsd_element.name == expected_xsd_node_name
assert context.stack == expected_stack
| 46.058824 | 112 | 0.646392 |
37596be79d321e633a44729dc88b84e9ab4c9600 | 1,495 | py | Python | miniworld/model/singletons/Singletons.py | miniworld-project/miniworld_core | c591bad232b78eae99e8f55cb1b907c1e228484b | [
"MIT"
] | 5 | 2019-05-11T14:57:15.000Z | 2021-07-05T00:35:25.000Z | miniworld/model/singletons/Singletons.py | miniworld-project/miniworld_core | c591bad232b78eae99e8f55cb1b907c1e228484b | [
"MIT"
] | 27 | 2017-03-17T07:11:02.000Z | 2019-05-26T23:36:56.000Z | miniworld/model/singletons/Singletons.py | miniworld-project/miniworld_core | c591bad232b78eae99e8f55cb1b907c1e228484b | [
"MIT"
] | 6 | 2017-05-03T12:11:33.000Z | 2020-04-03T11:44:27.000Z | __author__ = 'Nils Schmidt'
# TODO: #54,#55: adjust doc
class Singletons:
"""
Stores all singletons.
The module `SingletonInit` shall be used to initialize them. Therefore, we prevent cyclic imports.
Singletons which have state that shall be resetted when a new simulation is started, needs to
be registered with `reset_simulation_scenario_state`.
These singletons shall implement the `Resettable` interface.
Attributes
----------
event_system : EventSystem
"""
def __init__(self):
super(Singletons, self).__init__()
# TODO: DOC
self.network_manager = None
self.shell_helper = None
self.spatial_singleton = None
self.simulation_manager = None
self.network_backend = None
self.event_system = None
self.simulation_state_gc = None
self.simulation_errors = None
self.protocol = None
self.zeromq_server = None
self.node_distribution_strategy = None
self.qemu_process_singletons = None
# TODO: #54,#55: EXTRACT CLASS
#################################################
# Singleton reference
#################################################
singletons = Singletons()
if __name__ == '__main__':
class Foo():
def reset_simulation_state(self):
print("reset")
singletons.simulation_state_gc.add_singleton_with_simulation_scenario_state_(Foo())
singletons.simulation_state_gc.reset_simulation_scenario_state()
| 29.9 | 102 | 0.64214 |
23a03efe86d66a3c85da63c1b91cb7fe5c8eb98c | 2,982 | py | Python | TellTheBell.py | ThatAuskeGuy/Taco-Bell-Survey | 5cb1cec11eb765fb496ae62591c5d05601ee4d80 | [
"MIT"
] | null | null | null | TellTheBell.py | ThatAuskeGuy/Taco-Bell-Survey | 5cb1cec11eb765fb496ae62591c5d05601ee4d80 | [
"MIT"
] | null | null | null | TellTheBell.py | ThatAuskeGuy/Taco-Bell-Survey | 5cb1cec11eb765fb496ae62591c5d05601ee4d80 | [
"MIT"
] | null | null | null | """ Taco Bell Survey Automation """
import random
import selenium
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support.ui import Select
# Get input from user from receipt
name = input("First and Last Name (Joe Mama): ").split()
phone = input("Phone Number (ex. 555-555-5555): ")
store = input("Store Number: ")
date_of_visit = input("MM/DD/YYYY: ").split("/")
time_of_visit = input("HH:MM:AM/PM: ").split(":")
# all definitions for program
def next_button():
driver.find_element_by_xpath('//*[@id="NextButton"]').click()
def questions():
ids = driver.find_elements_by_xpath('//*[contains(@id, "FNSR")]')
try:
for ii in ids:
attr = ii.get_attribute('id')
driver.find_element_by_xpath(('//*[@id="{}"]/td[3]/span').format(attr)).click()
except NoSuchElementException:
try:
attr = ii.get_attribute('id')
driver.find_element_by_xpath(('//*[@id="{}"]/div[3]/div/div[2]').format(attr)).click()
except NoSuchElementException:
attr = ii.get_attribute('id')
driver.find_element_by_xpath(('//*[@id="{}"]/div[2]/div/div[1]/span/span').format(attr)).click()
def textbox():
fill_text = ['I was just not overly satisfied by my experience', 'I just found it to be satisfactory', 'It was just a quick trip to get some food. I wasn\'t looking for an excellent experience.']
rand_text = random.choice(fill_text)
driver.find_element_by_xpath('//*[starts-with(@id, "S")]').send_keys(rand_text)
# Open Browser
driver = webdriver.Chrome("../chromedriver")
driver.get("http://www.tellthebell.com")
# Input receipt info on page
driver.find_element_by_xpath('//*[@id="CN1"]').send_keys(store)
receipt_items = [date_of_visit[0], date_of_visit[1], date_of_visit[2], time_of_visit[0], time_of_visit[1], time_of_visit[2]]
ids = driver.find_elements_by_xpath(('//*[starts-with(@id, "Input")]'))
n = 0
for ii in ids:
attr = ii.get_attribute('id')
driver.find_element_by_xpath(('//*[@id="{}"]').format(attr)).send_keys(receipt_items[n])
n+=1
next_button()
""" Survey Questionaire """
sweepstakes = False
while sweepstakes != True:
try:
driver.find_element_by_xpath('//*[@id="FNSR046000"]/td[2]/span').click()
sweepstakes = True
next_button()
sweepstakes_items = [name[0], name[1], phone]
ids = driver.find_elements_by_xpath('//*[starts-with(@id, "S")]')
n = 0
for ii in ids:
attr = ii.get_attribute('id')
driver.find_element_by_xpath(('//*[@id="{}"]').format(attr)).send_keys(sweepstakes_items[n])
n+=1
next_button()
driver.quit()
except NoSuchElementException:
try:
textbox()
next_button()
except NoSuchElementException:
try:
questions()
next_button()
except:
break
| 35.927711 | 199 | 0.630449 |
7bc8f9869d3cda6cd2464a43d8c8db7ed3eb7dbd | 790 | py | Python | distribution/translation.py | ASL-19/outline-distribution | 6ef595d59c0d7d9416cd1d3c0fb263ba83830126 | [
"Apache-2.0"
] | 5 | 2020-12-21T21:19:28.000Z | 2022-03-28T00:53:45.000Z | distribution/translation.py | ASL-19/outline-distribution | 6ef595d59c0d7d9416cd1d3c0fb263ba83830126 | [
"Apache-2.0"
] | 5 | 2021-01-07T02:32:47.000Z | 2022-03-28T14:28:52.000Z | distribution/translation.py | ASL-19/outline-distribution | 6ef595d59c0d7d9416cd1d3c0fb263ba83830126 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 ASL19 Organization
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from modeltranslation.translator import register, TranslationOptions
from distribution.models import Issue
@register(Issue)
class IssueTranslationOptions(TranslationOptions):
fields = ('description',)
| 35.909091 | 74 | 0.781013 |
1622b51fb3359252725dd26697f7ed305212cb84 | 4,015 | py | Python | eoxserver/resources/coverages/management/commands/browse.py | kalxas/eoxserver | 8073447d926f3833923bde7b7061e8a1658dee06 | [
"OML"
] | 25 | 2015-08-10T19:34:34.000Z | 2021-02-05T08:28:01.000Z | eoxserver/resources/coverages/management/commands/browse.py | kalxas/eoxserver | 8073447d926f3833923bde7b7061e8a1658dee06 | [
"OML"
] | 153 | 2015-01-20T08:35:49.000Z | 2022-03-16T11:00:56.000Z | eoxserver/resources/coverages/management/commands/browse.py | kalxas/eoxserver | 8073447d926f3833923bde7b7061e8a1658dee06 | [
"OML"
] | 10 | 2015-01-23T15:48:30.000Z | 2021-01-21T15:41:18.000Z | # ------------------------------------------------------------------------------
#
# Project: EOxServer <http://eoxserver.org>
# Authors: Fabian Schindler <fabian.schindler@eox.at>
#
# ------------------------------------------------------------------------------
# Copyright (C) 2017 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# ------------------------------------------------------------------------------
from django.core.management.base import CommandError, BaseCommand
from django.db import transaction
from eoxserver.resources.coverages import models
from eoxserver.resources.coverages.management.commands import (
CommandOutputMixIn, SubParserMixIn
)
from eoxserver.resources.coverages.registration.browse import BrowseRegistrator
class Command(CommandOutputMixIn, SubParserMixIn, BaseCommand):
""" Command to manage browses. This command uses sub-commands for the
specific tasks: register, generate, deregister
"""
def add_arguments(self, parser):
register_parser = self.add_subparser(parser, 'register')
generate_parser = self.add_subparser(parser, 'generate')
deregister_parser = self.add_subparser(parser, 'deregister')
for parser in [register_parser, generate_parser, deregister_parser]:
parser.add_argument(
'identifier', nargs=1, help='The associated product identifier'
)
register_parser.add_argument(
'location', nargs='+',
help="The storage location of the browse."
)
register_parser.add_argument(
'--type', '--browse-type', '-t', dest='type_name', default=None,
help='The name of the browse type to associate the browse with.'
)
@transaction.atomic
def handle(self, subcommand, identifier, *args, **kwargs):
""" Dispatch sub-commands: register, deregister.
"""
identifier = identifier[0]
if subcommand == "register":
self.handle_register(identifier, *args, **kwargs)
elif subcommand == "generate":
self.handle_generate(identifier, *args, **kwargs)
elif subcommand == "deregister":
self.handle_deregister(identifier, *args, **kwargs)
def handle_register(self, identifier, location, type_name, **kwargs):
""" Handle the registration of an existing browse.
"""
BrowseRegistrator().register(
product_identifier=identifier,
location=location,
type_name=type_name
)
print(
'Successfully registered browse (%r) for product %r'
% (type_name, identifier)
)
def handle_generate(self, identifier, **kwargs):
""" Handle the generation of a new browse image
"""
raise NotImplementedError
def handle_deregister(self, identifier, **kwargs):
""" Handle the deregistration a browse image
"""
raise NotImplementedError
| 41.822917 | 80 | 0.648817 |
e432a976138e6bff013b57b0556f0168be9d7ae0 | 3,331 | py | Python | custom_components/bwt_perla/config_flow.py | dkarv/bwt_perla | 885472b3484349448f00d44678cd7c14132f862f | [
"MIT"
] | 9 | 2021-06-19T13:26:45.000Z | 2022-02-19T19:27:16.000Z | custom_components/bwt_perla/config_flow.py | dkarv/bwt_perla | 885472b3484349448f00d44678cd7c14132f862f | [
"MIT"
] | 6 | 2021-09-29T07:03:04.000Z | 2022-02-24T16:26:37.000Z | custom_components/bwt_perla/config_flow.py | dkarv/bwt_perla | 885472b3484349448f00d44678cd7c14132f862f | [
"MIT"
] | 4 | 2021-08-17T20:41:08.000Z | 2021-12-28T22:26:33.000Z | import logging
import voluptuous as vol
import traceback
from homeassistant import config_entries
from homeassistant.core import callback
from homeassistant.helpers.aiohttp_client import async_create_clientsession
from .api import BWTPerlaApi
from .const import (
CONF_CODE,
CONF_HOST,
CONF_SYNC_INTERVAL,
DEFAULT_SYNC_INTERVAL,
DOMAIN,
PLATFORMS,
)
_LOGGER = logging.getLogger(__name__)
class BWTPerlaFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
def __init__(self):
self._errors = {}
async def async_step_user(self, user_input=None):
self._errors = {}
if self._async_current_entries():
return self.async_abort(reason="single_instance_allowed")
if user_input is not None:
valid = await self._test_credentials(
user_input[CONF_HOST], user_input[CONF_CODE]
)
if valid:
return self.async_create_entry(
title=user_input[CONF_HOST], data=user_input
)
else:
self._errors["base"] = "auth"
return await self._show_config_form(user_input)
return await self._show_config_form(user_input)
async def _show_config_form(self, user_input): # pylint: disable=unused-argument
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{vol.Required(CONF_HOST): str, vol.Required(CONF_CODE): str}
),
errors=self._errors,
)
async def _test_credentials(self, host, code):
try:
api = BWTPerlaApi(host, code)
await self.hass.async_add_executor_job(api.get_data)
return True
except Exception as ex: # pylint: disable=broad-except
_LOGGER.error(
f"{DOMAIN} Exception in login : %s - traceback: %s",
ex,
traceback.format_exc(),
)
return False
@staticmethod
@callback
def async_get_options_flow(config_entry):
return BWTPerlaOptionsFlowHandler(config_entry)
class BWTPerlaOptionsFlowHandler(config_entries.OptionsFlow):
def __init__(self, config_entry):
self.config_entry = config_entry
self.options = dict(config_entry.options)
async def async_step_init(self, user_input=None): # pylint: disable=unused-argument
return await self.async_step_user()
async def async_step_user(self, user_input=None):
if user_input is not None:
self.options.update(user_input)
return await self._update_options()
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(
CONF_SYNC_INTERVAL,
default=self.options.get(
CONF_SYNC_INTERVAL, DEFAULT_SYNC_INTERVAL
),
): vol.All(vol.Coerce(int))
}
),
)
async def _update_options(self):
return self.async_create_entry(
title=self.config_entry.data.get(CONF_SYNC_INTERVAL), data=self.options
)
| 30.009009 | 88 | 0.61363 |
6a207ac9d8a64829ac27a308db39096085cb5f17 | 1,978 | py | Python | mayan/apps/permissions/tests/test_events.py | darrenflexxu/Mayan-EDMS | 6707365bfacd137e625ddc1b990168012246fa07 | [
"Apache-2.0"
] | null | null | null | mayan/apps/permissions/tests/test_events.py | darrenflexxu/Mayan-EDMS | 6707365bfacd137e625ddc1b990168012246fa07 | [
"Apache-2.0"
] | 5 | 2021-03-19T22:56:45.000Z | 2022-03-12T00:08:43.000Z | mayan/apps/permissions/tests/test_events.py | Sumit-Kumar-Jha/mayan | 5b7ddeccf080b9e41cc1074c70e27dfe447be19f | [
"Apache-2.0"
] | 1 | 2020-07-29T21:03:27.000Z | 2020-07-29T21:03:27.000Z | from __future__ import unicode_literals
from actstream.models import Action
from mayan.apps.common.tests.base import GenericViewTestCase
from ..events import event_role_created, event_role_edited
from ..permissions import permission_role_create, permission_role_edit
from .mixins import RoleTestMixin, RoleViewTestMixin
class RoleEventsTestCase(RoleTestMixin, RoleViewTestMixin, GenericViewTestCase):
def test_role_created_event_no_permissions(self):
Action.objects.all().delete()
response = self._request_test_role_create_view()
self.assertEqual(response.status_code, 403)
self.assertEqual(Action.objects.count(), 0)
def test_role_created_event_with_permissions(self):
Action.objects.all().delete()
self.grant_permission(permission=permission_role_create)
response = self._request_test_role_create_view()
self.assertEqual(response.status_code, 302)
event = Action.objects.first()
self.assertEqual(event.verb, event_role_created.id)
self.assertEqual(event.target, self.test_role)
self.assertEqual(event.actor, self._test_case_user)
def test_role_edited_event_no_permissions(self):
self._create_test_role()
Action.objects.all().delete()
response = self._request_test_role_edit_view()
self.assertEqual(response.status_code, 404)
self.assertEqual(Action.objects.count(), 0)
def test_role_edited_event_with_access(self):
self._create_test_role()
Action.objects.all().delete()
self.grant_access(
obj=self.test_role, permission=permission_role_edit
)
response = self._request_test_role_edit_view()
self.assertEqual(response.status_code, 302)
event = Action.objects.first()
self.assertEqual(event.verb, event_role_edited.id)
self.assertEqual(event.target, self.test_role)
self.assertEqual(event.actor, self._test_case_user)
| 34.701754 | 80 | 0.734075 |
30179d1606b1f659ef1e815eb63a14f7fc43a757 | 2,928 | py | Python | utilities/dump-save-file.py | jhartz/gradefast | 0390eb605c56bfc8a02cfb34340fa4dbd52b4c44 | [
"MIT"
] | 5 | 2017-09-10T22:11:01.000Z | 2018-08-28T23:08:27.000Z | utilities/dump-save-file.py | jhartz/gradefast | 0390eb605c56bfc8a02cfb34340fa4dbd52b4c44 | [
"MIT"
] | 1 | 2018-09-06T12:53:45.000Z | 2018-09-11T15:37:26.000Z | utilities/dump-save-file.py | jhartz/gradefast | 0390eb605c56bfc8a02cfb34340fa4dbd52b4c44 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Utility script to dump the contents of a GradeFast save file.
Licensed under the MIT License. For more, see the LICENSE file.
Author: Jake Hartz <jake@hartz.io>
"""
import argparse
import json
import os
import pickle
import shelve
import sqlite3
import sys
from collections import OrderedDict
# Make sure we can access the GradeFast classes, for unpickling
sys.path.insert(1, os.path.join(os.path.dirname(__file__), ".."))
class DumpJSONEncoder(json.JSONEncoder):
"""
A custom JSONEncoder that encodes classes based on their __dict__ or __slots__ property, or
falls back to repr() if needed.
"""
def default(self, o):
if hasattr(o, "__slots__"):
return {prop: getattr(o, prop) for prop in o.__slots__}
if hasattr(o, "__dict__"):
return o.__dict__
return repr(o)
def dump_json(o):
for chunk in DumpJSONEncoder(indent=2).iterencode(o):
sys.stdout.write(chunk)
def dump_yaml(o):
import yaml
# FIXME: "encode to json --> decode from json --> encode to yaml" is hacky A.F.
yaml.dump(json.loads(DumpJSONEncoder().encode(o)), stream=sys.stdout)
def dump_tagged_yaml(o):
import yaml
yaml.dump(o, stream=sys.stdout)
def get_sqlite_data(filename):
conn = sqlite3.connect(filename)
try:
d = OrderedDict()
c = conn.cursor()
c.execute("SELECT namespace, data_key, data_value FROM gradefast "
"ORDER BY namespace, data_key")
for row in c:
namespace = str(row[0])
key = str(row[1])
value = pickle.loads(row[2])
if namespace not in d:
d[namespace] = OrderedDict()
d[namespace][key] = value
return d
finally:
conn.close()
def get_shelve_data(filename):
with shelve.open(filename, flag="c", protocol=4) as shelf:
d = OrderedDict()
for key in sorted(shelf.keys()):
d[key] = shelf[key]
return d
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--output", choices=["json", "yaml", "tagged-yaml"], default="json",
help="The output format")
parser.add_argument("--format", choices=["sqlite", "legacy"],
help="The GradeFast save file format")
parser.add_argument("save_file", metavar="save-file",
help="The path to a GradeFast save file")
args = parser.parse_args()
if args.format == "sqlite":
d = get_sqlite_data(args.save_file)
elif args.format == "legacy":
d = get_shelve_data(args.save_file)
else:
print("Please specify a format")
return
if args.output == "yaml":
dump_yaml(d)
elif args.output == "tagged-yaml":
dump_tagged_yaml(d)
else:
dump_json(d)
sys.stdout.write("\n")
if __name__ == "__main__":
main()
| 26.618182 | 95 | 0.615437 |
b2771cccec24c57721a531c19ac06e0c4d02a997 | 768 | py | Python | dffml/config/json.py | iamandeepsandhu/dffml | 16b3234f7850f73163de61e21741ac5816cbf531 | [
"MIT"
] | 1 | 2020-02-07T07:33:15.000Z | 2020-02-07T07:33:15.000Z | dffml/config/json.py | sourabhyadav999/dffml | beda53caf483f4d010d97795f13527b4b573fa37 | [
"MIT"
] | null | null | null | dffml/config/json.py | sourabhyadav999/dffml | beda53caf483f4d010d97795f13527b4b573fa37 | [
"MIT"
] | null | null | null | import json
from typing import Dict
from ..util.entrypoint import entrypoint
from ..util.cli.arg import Arg
from ..base import BaseConfig
from .config import BaseConfigLoaderContext, BaseConfigLoader
class JSONConfigLoaderContext(BaseConfigLoaderContext):
async def loadb(self, resource: bytes) -> Dict:
return json.loads(resource.decode())
async def dumpb(self, resource: Dict) -> bytes:
return json.dumps(resource, sort_keys=True, indent=4).encode()
@entrypoint("json")
class JSONConfigLoader(BaseConfigLoader):
CONTEXT = JSONConfigLoaderContext
@classmethod
def args(cls, args, *above) -> Dict[str, Arg]:
return args
@classmethod
def config(cls, config, *above) -> BaseConfig:
return BaseConfig()
| 26.482759 | 70 | 0.721354 |
f69c0c9863dbb269baa9ce903bc65a495e68e7b2 | 14,055 | py | Python | simple_tensor/networks/inception_v4.py | fatchur/simple_tensor | ebc66d46d54fcfb65ef104978a6feca0a156b9b3 | [
"MIT"
] | 21 | 2019-03-08T16:02:46.000Z | 2022-02-09T03:31:26.000Z | simple_tensor/networks/inception_v4.py | fatchur/simple_tensor | ebc66d46d54fcfb65ef104978a6feca0a156b9b3 | [
"MIT"
] | 3 | 2020-02-04T08:43:20.000Z | 2020-10-20T13:52:16.000Z | simple_tensor/networks/inception_v4.py | fatchur/simple_tensor | ebc66d46d54fcfb65ef104978a6feca0a156b9b3 | [
"MIT"
] | 5 | 2019-11-30T03:40:04.000Z | 2021-12-26T07:01:53.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition of the Inception V4 architecture.
As described in http://arxiv.org/abs/1602.07261.
Inception-v4, Inception-ResNet and the Impact of Residual Connections
on Learning
Christian Szegedy, Sergey Ioffe, Vincent Vanhoucke, Alex Alemi
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
#import inception_utils
slim = tf.contrib.slim
def block_inception_a(inputs, scope=None, reuse=None):
"""Builds Inception-A block for Inception v4 network."""
# By default use stride=1 and SAME padding
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1, padding='SAME'):
with tf.variable_scope(scope, 'BlockInceptionA', [inputs], reuse=reuse):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 96, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 64, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 96, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(inputs, 64, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 96, [1, 1], scope='Conv2d_0b_1x1')
return tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
def block_reduction_a(inputs, scope=None, reuse=None):
"""Builds Reduction-A block for Inception v4 network."""
# By default use stride=1 and SAME padding
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1, padding='SAME'):
with tf.variable_scope(scope, 'BlockReductionA', [inputs], reuse=reuse):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 384, [3, 3], stride=2, padding='VALID',
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 224, [3, 3], scope='Conv2d_0b_3x3')
branch_1 = slim.conv2d(branch_1, 256, [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(inputs, [3, 3], stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
return tf.concat(axis=3, values=[branch_0, branch_1, branch_2])
def block_inception_b(inputs, scope=None, reuse=None):
"""Builds Inception-B block for Inception v4 network."""
# By default use stride=1 and SAME padding
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1, padding='SAME'):
with tf.variable_scope(scope, 'BlockInceptionB', [inputs], reuse=reuse):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 224, [1, 7], scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, 256, [7, 1], scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 192, [7, 1], scope='Conv2d_0b_7x1')
branch_2 = slim.conv2d(branch_2, 224, [1, 7], scope='Conv2d_0c_1x7')
branch_2 = slim.conv2d(branch_2, 224, [7, 1], scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(branch_2, 256, [1, 7], scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
return tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
def block_reduction_b(inputs, scope=None, reuse=None):
"""Builds Reduction-B block for Inception v4 network."""
# By default use stride=1 and SAME padding
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1, padding='SAME'):
with tf.variable_scope(scope, 'BlockReductionB', [inputs], reuse=reuse):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, 192, [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 256, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 256, [1, 7], scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, 320, [7, 1], scope='Conv2d_0c_7x1')
branch_1 = slim.conv2d(branch_1, 320, [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(inputs, [3, 3], stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
return tf.concat(axis=3, values=[branch_0, branch_1, branch_2])
def block_inception_c(inputs, scope=None, reuse=None):
"""Builds Inception-C block for Inception v4 network."""
# By default use stride=1 and SAME padding
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1, padding='SAME'):
with tf.variable_scope(scope, 'BlockInceptionC', [inputs], reuse=reuse):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 256, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = tf.concat(axis=3, values=[
slim.conv2d(branch_1, 256, [1, 3], scope='Conv2d_0b_1x3'),
slim.conv2d(branch_1, 256, [3, 1], scope='Conv2d_0c_3x1')])
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 448, [3, 1], scope='Conv2d_0b_3x1')
branch_2 = slim.conv2d(branch_2, 512, [1, 3], scope='Conv2d_0c_1x3')
branch_2 = tf.concat(axis=3, values=[
slim.conv2d(branch_2, 256, [1, 3], scope='Conv2d_0d_1x3'),
slim.conv2d(branch_2, 256, [3, 1], scope='Conv2d_0e_3x1')])
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 256, [1, 1], scope='Conv2d_0b_1x1')
return tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
def inception_v4_base(inputs, final_endpoint='Mixed_7d', scope=None):
"""Creates the Inception V4 network up to the given final endpoint.
Args:
inputs: a 4-D tensor of size [batch_size, height, width, 3].
final_endpoint: specifies the endpoint to construct the network up to.
It can be one of [ 'Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
'Mixed_3a', 'Mixed_4a', 'Mixed_5a', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d',
'Mixed_5e', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d', 'Mixed_6e',
'Mixed_6f', 'Mixed_6g', 'Mixed_6h', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c',
'Mixed_7d']
scope: Optional variable_scope.
Returns:
logits: the logits outputs of the model.
end_points: the set of end_points from the inception model.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values,
"""
end_points = {}
def add_and_check_final(name, net):
end_points[name] = net
return name == final_endpoint
with tf.variable_scope(scope, 'InceptionV4', [inputs]):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
# 299 x 299 x 3
net = slim.conv2d(inputs, 32, [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
if add_and_check_final('Conv2d_1a_3x3', net): return net, end_points
# 149 x 149 x 32
net = slim.conv2d(net, 32, [3, 3], padding='VALID',
scope='Conv2d_2a_3x3')
if add_and_check_final('Conv2d_2a_3x3', net): return net, end_points
# 147 x 147 x 32
net = slim.conv2d(net, 64, [3, 3], scope='Conv2d_2b_3x3')
if add_and_check_final('Conv2d_2b_3x3', net): return net, end_points
# 147 x 147 x 64
with tf.variable_scope('Mixed_3a'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',
scope='MaxPool_0a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 96, [3, 3], stride=2, padding='VALID',
scope='Conv2d_0a_3x3')
net = tf.concat(axis=3, values=[branch_0, branch_1])
if add_and_check_final('Mixed_3a', net): return net, end_points
# 73 x 73 x 160
with tf.variable_scope('Mixed_4a'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, 96, [3, 3], padding='VALID',
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 64, [1, 7], scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, 64, [7, 1], scope='Conv2d_0c_7x1')
branch_1 = slim.conv2d(branch_1, 96, [3, 3], padding='VALID',
scope='Conv2d_1a_3x3')
net = tf.concat(axis=3, values=[branch_0, branch_1])
if add_and_check_final('Mixed_4a', net): return net, end_points
# 71 x 71 x 192
with tf.variable_scope('Mixed_5a'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 192, [3, 3], stride=2, padding='VALID',
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
net = tf.concat(axis=3, values=[branch_0, branch_1])
if add_and_check_final('Mixed_5a', net): return net, end_points
# 35 x 35 x 384
# 4 x Inception-A blocks
for idx in range(4):
block_scope = 'Mixed_5' + chr(ord('b') + idx)
net = block_inception_a(net, block_scope)
if add_and_check_final(block_scope, net): return net, end_points
# 35 x 35 x 384
# Reduction-A block
net = block_reduction_a(net, 'Mixed_6a')
if add_and_check_final('Mixed_6a', net): return net, end_points
# 17 x 17 x 1024
# 7 x Inception-B blocks
for idx in range(7):
block_scope = 'Mixed_6' + chr(ord('b') + idx)
net = block_inception_b(net, block_scope)
if add_and_check_final(block_scope, net): return net, end_points
# 17 x 17 x 1024
# Reduction-B block
net = block_reduction_b(net, 'Mixed_7a')
if add_and_check_final('Mixed_7a', net): return net, end_points
# 8 x 8 x 1536
# 3 x Inception-C blocks
for idx in range(3):
block_scope = 'Mixed_7' + chr(ord('b') + idx)
net = block_inception_c(net, block_scope)
if add_and_check_final(block_scope, net): return net, end_points
raise ValueError('Unknown final endpoint %s' % final_endpoint)
def inception_v4(inputs, num_classes=1001,
final_endpoint='Mixed_7d',
is_training=True,
dropout_keep_prob=0.8,
reuse=None,
scope='InceptionV4',
create_aux_logits=True):
"""Creates the Inception V4 model.
Args:
inputs: a 4-D tensor of size [batch_size, height, width, 3].
num_classes: number of predicted classes. If 0 or None, the logits layer
is omitted and the input features to the logits layer (before dropout)
are returned instead.
is_training: whether is training or not.
dropout_keep_prob: float, the fraction to keep before final layer.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
create_aux_logits: Whether to include the auxiliary logits.
Returns:
net: a Tensor with the logits (pre-softmax activations) if num_classes
is a non-zero integer, or the non-dropped input to the logits layer
if num_classes is 0 or None.
end_points: the set of end_points from the inception model.
"""
end_points = {}
with tf.variable_scope(scope, 'InceptionV4', [inputs], reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
net, end_points = inception_v4_base(inputs, final_endpoint=final_endpoint, scope=scope)
return net, end_points
| 49.664311 | 93 | 0.639701 |
74cfd14e406d0823d3c2432cad41a4a8e497ad7b | 415 | py | Python | pymf6/tests/functional/scenarios/data/base_data_b.py | hydrocomputing/pymf6 | 0f9e2716c7881b17e1030bca895fb0410d4586bb | [
"MIT"
] | 4 | 2019-07-04T12:20:25.000Z | 2020-06-30T02:56:07.000Z | pymf6/tests/functional/scenarios/data/base_data_b.py | hydrocomputing/pymf6 | 0f9e2716c7881b17e1030bca895fb0410d4586bb | [
"MIT"
] | 2 | 2020-02-13T18:34:03.000Z | 2020-06-02T15:27:53.000Z | pymf6/tests/functional/scenarios/data/base_data_b.py | hydrocomputing/pymf6 | 0f9e2716c7881b17e1030bca895fb0410d4586bb | [
"MIT"
] | null | null | null | """Data for the base model
"""
from copy import deepcopy
from .base_data_a import data
data = deepcopy(data)
data['wel'] = {
'abs': {
'name': 'Abstraction Well',
'coords': (0, 49, 100),
'rates': [0, 0, 0]
},
'inj': {
'name': 'Injection Well',
'coords': (1, 49, 200),
'rates': [0, 0, 0]
},
}
| 18.863636 | 39 | 0.412048 |
c9b5b3fcc4437519d42407e733e0fb8c8b3bb825 | 159 | py | Python | nostradamus/apps/extractor/routing.py | exactpro/nostradamus | 80df847a012374ad2b702cc9f9c9cb46c1153ee7 | [
"Apache-2.0"
] | 25 | 2019-12-18T05:32:41.000Z | 2022-03-23T12:16:49.000Z | nostradamus/apps/extractor/routing.py | Exactpro/nostradamus | 80df847a012374ad2b702cc9f9c9cb46c1153ee7 | [
"Apache-2.0"
] | 12 | 2018-12-24T14:56:50.000Z | 2019-11-29T16:53:49.000Z | nostradamus/apps/extractor/routing.py | exactpro/nostradamus | 80df847a012374ad2b702cc9f9c9cb46c1153ee7 | [
"Apache-2.0"
] | 7 | 2019-12-18T05:32:43.000Z | 2021-08-18T05:27:04.000Z | from django.urls import path
from apps.extractor.consumers import ExtractorConsumer
websocket_urlpatterns = [path("extractor_listener/", ExtractorConsumer)]
| 26.5 | 72 | 0.836478 |
fac12e03d1c78113dfed84fc2a68d3ee4da489ed | 190 | py | Python | chargebee/models/download.py | Uszczi/chargebee-python | f98a749961631aefd88e7d530f35cabd422d924b | [
"MIT"
] | null | null | null | chargebee/models/download.py | Uszczi/chargebee-python | f98a749961631aefd88e7d530f35cabd422d924b | [
"MIT"
] | null | null | null | chargebee/models/download.py | Uszczi/chargebee-python | f98a749961631aefd88e7d530f35cabd422d924b | [
"MIT"
] | null | null | null | import json
from chargebee.model import Model
from chargebee import request
from chargebee import APIError
class Download(Model):
fields = ["download_url", "valid_till", "mime_type"]
| 19 | 56 | 0.773684 |
19acf13e33249e19de8dcd066c690bfab2c215e2 | 3,177 | py | Python | collectors/StatsCollector.py | sapcc/vrops-exporter | c342f319e1f69590fa514ba07c783e586aeb223a | [
"Apache-2.0"
] | 18 | 2019-10-24T03:36:50.000Z | 2022-01-22T20:39:42.000Z | collectors/StatsCollector.py | richardtief/vrops-exporter | 7e7543f5561f85aab1828e4c9d6fada4b687639f | [
"Apache-2.0"
] | 55 | 2019-10-16T09:51:36.000Z | 2022-03-28T11:46:08.000Z | collectors/StatsCollector.py | richardtief/vrops-exporter | 7e7543f5561f85aab1828e4c9d6fada4b687639f | [
"Apache-2.0"
] | 19 | 2019-10-15T14:07:27.000Z | 2022-02-17T21:41:14.000Z | from BaseCollector import BaseCollector
import logging
import re
logger = logging.getLogger('vrops-exporter')
class StatsCollector(BaseCollector):
def get_resource_uuids(self):
raise NotImplementedError("Please Implement this method")
def get_labels(self, resource_id: str, project_ids: list):
raise NotImplementedError("Please Implement this method")
def collect(self):
logger.info(f'{self.name} starts with collecting the metrics')
token = self.get_target_tokens()
token = token.setdefault(self.target, '')
if not token:
logger.warning(f'skipping {self.target} in {self.name}, no token')
return
uuids = self.get_resource_uuids()
if not uuids:
logger.warning(f'skipping {self.target} in {self.name}, no resources')
return
metrics = self.generate_metrics(label_names=self.label_names)
project_ids = self.get_project_ids_by_target() if self.project_ids else []
values, api_responding, response_time = self.vrops.get_latest_stats_multiple(self.target,
token,
uuids,
[m for m in metrics],
self.name)
yield self.create_api_response_code_metric(self.name, api_responding)
yield self.create_api_response_time_metric(self.name, response_time)
if not values:
logger.warning(f'No values in the response for {self.name}. API code: {api_responding}')
return
values_received = set()
no_match_in_config = list()
for resource in values:
resource_id = resource.get('resourceId')
labels = self.get_labels(resource_id, project_ids)
if not labels:
continue
for value_entry in resource.get('stat-list', {}).get('stat', []):
statkey = value_entry.get('statKey', {}).get('key')
# Normalisation of keys retrieved from API (e.g. cpu:102|usage_average -> cpu|usage_average)
norm_statkey = re.sub("[^a-zA-Z|_ -]+", "", statkey)
values_received.add(norm_statkey)
metric_data = value_entry.get('data', [0])[0]
if norm_statkey in metrics:
metrics[norm_statkey]['gauge'].add_metric(labels=labels, value=metric_data)
else:
no_match_in_config.append([statkey, metric_data, labels])
# no match in config, bring into the right format
created_metrics = self.generate_metrics_enriched_by_api(no_match_in_config, label_names=self.label_names)
for metric in metrics:
yield metrics[metric]['gauge']
for metric in created_metrics:
logger.info(f'Created metrics enriched by API in {self.name}: {created_metrics[metric].name}')
yield created_metrics[metric]
| 43.520548 | 113 | 0.5757 |
3b107bbf0fe262ee4a51c1c8c213f87eafceecfe | 2,475 | py | Python | tests/hikari/test_presences.py | IkBenOlie5/hikari | 09502f05427ad92b05103bd1a56533296a593755 | [
"MIT"
] | 1 | 2021-09-05T18:15:24.000Z | 2021-09-05T18:15:24.000Z | tests/hikari/test_presences.py | IkBenOlie5/hikari | 09502f05427ad92b05103bd1a56533296a593755 | [
"MIT"
] | 34 | 2021-10-01T17:08:11.000Z | 2022-03-29T02:21:07.000Z | tests/hikari/test_presences.py | IkBenOlie5/hikari | 09502f05427ad92b05103bd1a56533296a593755 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2020 Nekokatt
# Copyright (c) 2021 davfsa
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import mock
import pytest
from hikari import presences
from hikari import snowflakes
from hikari.impl import bot
@pytest.fixture()
def mock_app():
return mock.Mock(spec_set=bot.BotApp)
def test_Activity_str_operator():
activity = presences.Activity(name="something", type=presences.ActivityType(1))
assert str(activity) == "something"
class TestMemberPresence:
@pytest.fixture()
def model(self, mock_app):
return presences.MemberPresence(
app=mock_app,
user_id=snowflakes.Snowflake(432),
guild_id=snowflakes.Snowflake(234),
visible_status=presences.Status.ONLINE,
activities=mock.Mock(presences.RichActivity),
client_status=mock.Mock(presences.ClientStatus),
)
@pytest.mark.asyncio()
async def test_fetch_user(self, model):
model.app.rest.fetch_user = mock.AsyncMock()
assert await model.fetch_user() is model.app.rest.fetch_user.return_value
model.app.rest.fetch_user.assert_awaited_once_with(432)
@pytest.mark.asyncio()
async def test_fetch_member(self, model):
model.app.rest.fetch_member = mock.AsyncMock()
assert await model.fetch_member() is model.app.rest.fetch_member.return_value
model.app.rest.fetch_member.assert_awaited_once_with(234, 432)
| 37.5 | 85 | 0.736162 |
d837e51010feb074f7e1ce6cde0fd2fde654364b | 16,916 | py | Python | var/spack/repos/builtin/packages/ascent/package.py | RemoteConnectionManager/spack | f2967b6c16effd26ce007cf86cadbb645c574f50 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 3 | 2019-06-27T13:26:50.000Z | 2019-07-01T16:24:54.000Z | var/spack/repos/builtin/packages/ascent/package.py | openbiox/spack | bb6ec7fb40c14b37e094a860e3625af53f633174 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 17 | 2018-09-20T18:32:50.000Z | 2019-12-04T16:58:12.000Z | var/spack/repos/builtin/packages/ascent/package.py | openbiox/spack | bb6ec7fb40c14b37e094a860e3625af53f633174 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 5 | 2019-07-30T09:42:14.000Z | 2021-01-25T05:39:20.000Z | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import sys
import os
import socket
import glob
import shutil
import llnl.util.tty as tty
from os import environ as env
def cmake_cache_entry(name, value, vtype=None):
"""
Helper that creates CMake cache entry strings used in
'host-config' files.
"""
if vtype is None:
if value == "ON" or value == "OFF":
vtype = "BOOL"
else:
vtype = "PATH"
return 'set({0} "{1}" CACHE {2} "")\n\n'.format(name, value, vtype)
class Ascent(Package):
"""Ascent is an open source many-core capable lightweight in situ
visualization and analysis infrastructure for multi-physics HPC
simulations."""
homepage = "https://github.com/Alpine-DAV/ascent"
git = "https://github.com/Alpine-DAV/ascent.git"
maintainers = ['cyrush']
version('develop',
branch='develop',
submodules=True)
###########################################################################
# package variants
###########################################################################
variant("shared", default=True, description="Build Ascent as shared libs")
variant('test', default=True, description='Enable Ascent unit tests')
variant("mpi", default=True, description="Build Ascent MPI Support")
# variants for language support
variant("python", default=True, description="Build Ascent Python support")
variant("fortran", default=True, description="Build Ascent Fortran support")
# variants for runtime features
variant("vtkh", default=True,
description="Build VTK-h filter and rendering support")
variant("openmp", default=(sys.platform != 'darwin'),
description="build openmp support")
variant("cuda", default=False, description="Build cuda support")
variant("mfem", default=False, description="Build MFEM filter support")
variant("adios", default=False, description="Build Adios filter support")
# variants for dev-tools (docs, etc)
variant("doc", default=False, description="Build Conduit's documentation")
###########################################################################
# package dependencies
###########################################################################
depends_on("cmake@3.9.2:3.9.999", type='build')
depends_on("conduit~python", when="~python")
depends_on("conduit+python", when="+python+shared")
depends_on("conduit~shared~python", when="~shared")
#######################
# Python
#######################
# we need a shared version of python b/c linking with static python lib
# causes duplicate state issues when running compiled python modules.
depends_on("python+shared", when="+python+shared")
extends("python", when="+python+shared")
depends_on("py-numpy", when="+python+shared", type=('build', 'run'))
#######################
# MPI
#######################
depends_on("mpi", when="+mpi")
depends_on("py-mpi4py", when="+mpi+python+shared")
#############################
# TPLs for Runtime Features
#############################
depends_on("vtkh@develop", when="+vtkh")
depends_on("vtkh@develop~openmp", when="+vtkh~openmp")
depends_on("vtkh@develop+cuda+openmp", when="+vtkh+cuda+openmp")
depends_on("vtkh@develop+cuda~openmp", when="+vtkh+cuda~openmp")
depends_on("vtkh@develop~shared", when="~shared+vtkh")
depends_on("vtkh@develop~shared~openmp", when="~shared+vtkh~openmp")
depends_on("vtkh@develop~shared+cuda", when="~shared+vtkh+cuda")
depends_on("vtkh@develop~shared+cuda~openmp", when="~shared+vtkh+cuda~openmp")
# mfem
depends_on("mfem+shared+mpi+conduit", when="+shared+mfem+mpi")
depends_on("mfem~shared+mpi+conduit", when="~shared+mfem+mpi")
depends_on("mfem+shared~mpi+conduit", when="+shared+mfem~mpi")
depends_on("mfem~shared~mpi+conduit", when="~shared+mfem~mpi")
depends_on("adios", when="+adios")
#######################
# Documentation related
#######################
depends_on("py-sphinx", when="+python+doc", type='build')
def setup_environment(self, spack_env, run_env):
spack_env.set('CTEST_OUTPUT_ON_FAILURE', '1')
def install(self, spec, prefix):
"""
Build and install Ascent.
"""
with working_dir('spack-build', create=True):
py_site_pkgs_dir = None
if "+python" in spec:
py_site_pkgs_dir = site_packages_dir
host_cfg_fname = self.create_host_config(spec,
prefix,
py_site_pkgs_dir)
cmake_args = []
# if we have a static build, we need to avoid any of
# spack's default cmake settings related to rpaths
# (see: https://github.com/LLNL/spack/issues/2658)
if "+shared" in spec:
cmake_args.extend(std_cmake_args)
else:
for arg in std_cmake_args:
if arg.count("RPATH") == 0:
cmake_args.append(arg)
cmake_args.extend(["-C", host_cfg_fname, "../src"])
print("Configuring Ascent...")
cmake(*cmake_args)
print("Building Ascent...")
make()
# run unit tests if requested
if "+test" in spec and self.run_tests:
print("Running Ascent Unit Tests...")
make("test")
print("Installing Ascent...")
make("install")
# install copy of host config for provenance
install(host_cfg_fname, prefix)
@run_after('install')
@on_package_attributes(run_tests=True)
def check_install(self):
"""
Checks the spack install of ascent using ascents's
using-with-cmake example
"""
print("Checking Ascent installation...")
spec = self.spec
install_prefix = spec.prefix
example_src_dir = join_path(install_prefix,
"examples",
"ascent",
"using-with-cmake")
print("Checking using-with-cmake example...")
with working_dir("check-ascent-using-with-cmake-example",
create=True):
cmake_args = ["-DASCENT_DIR={0}".format(install_prefix),
"-DCONDUIT_DIR={0}".format(spec['conduit'].prefix),
"-DVTKM_DIR={0}".format(spec['vtkm'].prefix),
"-DVTKH_DIR={0}".format(spec['vtkh'].prefix),
example_src_dir]
cmake(*cmake_args)
make()
example = Executable('./ascent_render_example')
example()
print("Checking using-with-make example...")
example_src_dir = join_path(install_prefix,
"examples",
"ascent",
"using-with-make")
example_files = glob.glob(join_path(example_src_dir, "*"))
with working_dir("check-ascent-using-with-make-example",
create=True):
for example_file in example_files:
shutil.copy(example_file, ".")
make("ASCENT_DIR={0}".format(install_prefix))
example = Executable('./ascent_render_example')
example()
def create_host_config(self, spec, prefix, py_site_pkgs_dir=None):
"""
This method creates a 'host-config' file that specifies
all of the options used to configure and build ascent.
For more details about 'host-config' files see:
http://ascent.readthedocs.io/en/latest/BuildingAscent.html
Note:
The `py_site_pkgs_dir` arg exists to allow a package that
subclasses this package provide a specific site packages
dir when calling this function. `py_site_pkgs_dir` should
be an absolute path or `None`.
This is necessary because the spack `site_packages_dir`
var will not exist in the base class. For more details
on this issue see: https://github.com/spack/spack/issues/6261
"""
#######################
# Compiler Info
#######################
c_compiler = env["SPACK_CC"]
cpp_compiler = env["SPACK_CXX"]
f_compiler = None
if self.compiler.fc:
# even if this is set, it may not exist so do one more sanity check
f_compiler = which(env["SPACK_FC"])
#######################################################################
# By directly fetching the names of the actual compilers we appear
# to doing something evil here, but this is necessary to create a
# 'host config' file that works outside of the spack install env.
#######################################################################
sys_type = spec.architecture
# if on llnl systems, we can use the SYS_TYPE
if "SYS_TYPE" in env:
sys_type = env["SYS_TYPE"]
##############################################
# Find and record what CMake is used
##############################################
if "+cmake" in spec:
cmake_exe = spec['cmake'].command.path
else:
cmake_exe = which("cmake")
if cmake_exe is None:
msg = 'failed to find CMake (and cmake variant is off)'
raise RuntimeError(msg)
cmake_exe = cmake_exe.path
host_cfg_fname = "%s-%s-%s-ascent.cmake" % (socket.gethostname(),
sys_type,
spec.compiler)
cfg = open(host_cfg_fname, "w")
cfg.write("##################################\n")
cfg.write("# spack generated host-config\n")
cfg.write("##################################\n")
cfg.write("# {0}-{1}\n".format(sys_type, spec.compiler))
cfg.write("##################################\n\n")
# Include path to cmake for reference
cfg.write("# cmake from spack \n")
cfg.write("# cmake executable path: %s\n\n" % cmake_exe)
#######################
# Compiler Settings
#######################
cfg.write("#######\n")
cfg.write("# using %s compiler spec\n" % spec.compiler)
cfg.write("#######\n\n")
cfg.write("# c compiler used by spack\n")
cfg.write(cmake_cache_entry("CMAKE_C_COMPILER", c_compiler))
cfg.write("# cpp compiler used by spack\n")
cfg.write(cmake_cache_entry("CMAKE_CXX_COMPILER", cpp_compiler))
cfg.write("# fortran compiler used by spack\n")
if "+fortran" in spec and f_compiler is not None:
cfg.write(cmake_cache_entry("ENABLE_FORTRAN", "ON"))
cfg.write(cmake_cache_entry("CMAKE_Fortran_COMPILER",
f_compiler.path))
else:
cfg.write("# no fortran compiler found\n\n")
cfg.write(cmake_cache_entry("ENABLE_FORTRAN", "OFF"))
# shared vs static libs
if "+shared" in spec:
cfg.write(cmake_cache_entry("BUILD_SHARED_LIBS", "ON"))
else:
cfg.write(cmake_cache_entry("BUILD_SHARED_LIBS", "OFF"))
#######################
# Unit Tests
#######################
if "+test" in spec:
cfg.write(cmake_cache_entry("ENABLE_TESTS", "ON"))
else:
cfg.write(cmake_cache_entry("ENABLE_TESTS", "OFF"))
#######################################################################
# Core Dependencies
#######################################################################
#######################
# Conduit
#######################
cfg.write("# conduit from spack \n")
cfg.write(cmake_cache_entry("CONDUIT_DIR", spec['conduit'].prefix))
#######################################################################
# Optional Dependencies
#######################################################################
#######################
# Python
#######################
cfg.write("# Python Support\n")
if "+python" in spec and "+shared" in spec:
cfg.write("# Enable python module builds\n")
cfg.write(cmake_cache_entry("ENABLE_PYTHON", "ON"))
cfg.write("# python from spack \n")
cfg.write(cmake_cache_entry("PYTHON_EXECUTABLE",
spec['python'].command.path))
# only set dest python site packages dir if passed
if py_site_pkgs_dir:
cfg.write(cmake_cache_entry("PYTHON_MODULE_INSTALL_PREFIX",
py_site_pkgs_dir))
else:
cfg.write(cmake_cache_entry("ENABLE_PYTHON", "OFF"))
if "+doc" in spec and "+python" in spec:
cfg.write(cmake_cache_entry("ENABLE_DOCS", "ON"))
cfg.write("# sphinx from spack \n")
sphinx_build_exe = join_path(spec['py-sphinx'].prefix.bin,
"sphinx-build")
cfg.write(cmake_cache_entry("SPHINX_EXECUTABLE", sphinx_build_exe))
else:
cfg.write(cmake_cache_entry("ENABLE_DOCS", "OFF"))
#######################
# MPI
#######################
cfg.write("# MPI Support\n")
if "+mpi" in spec:
cfg.write(cmake_cache_entry("ENABLE_MPI", "ON"))
cfg.write(cmake_cache_entry("MPI_C_COMPILER", spec['mpi'].mpicc))
cfg.write(cmake_cache_entry("MPI_CXX_COMPILER",
spec['mpi'].mpicxx))
cfg.write(cmake_cache_entry("MPI_Fortran_COMPILER",
spec['mpi'].mpifc))
mpiexe_bin = join_path(spec['mpi'].prefix.bin, 'mpiexec')
if os.path.isfile(mpiexe_bin):
# starting with cmake 3.10, FindMPI expects MPIEXEC_EXECUTABLE
# vs the older versions which expect MPIEXEC
if self.spec["cmake"].satisfies('@3.10:'):
cfg.write(cmake_cache_entry("MPIEXEC_EXECUTABLE",
mpiexe_bin))
else:
cfg.write(cmake_cache_entry("MPIEXEC",
mpiexe_bin))
else:
cfg.write(cmake_cache_entry("ENABLE_MPI", "OFF"))
#######################
# CUDA
#######################
cfg.write("# CUDA Support\n")
if "+cuda" in spec:
cfg.write(cmake_cache_entry("ENABLE_CUDA", "ON"))
else:
cfg.write(cmake_cache_entry("ENABLE_CUDA", "OFF"))
if "+openmp" in spec:
cfg.write(cmake_cache_entry("ENABLE_OPENMP", "ON"))
else:
cfg.write(cmake_cache_entry("ENABLE_OPENMP", "OFF"))
#######################
# VTK-h (and deps)
#######################
cfg.write("# vtk-h support \n")
if "+vtkh" in spec:
cfg.write("# vtk-m from spack\n")
cfg.write(cmake_cache_entry("VTKM_DIR", spec['vtkm'].prefix))
cfg.write("# vtk-h from spack\n")
cfg.write(cmake_cache_entry("VTKH_DIR", spec['vtkh'].prefix))
else:
cfg.write("# vtk-h not built by spack \n")
#######################
# MFEM
#######################
if "+mfem" in spec:
cfg.write("# mfem from spack \n")
cfg.write(cmake_cache_entry("MFEM_DIR", spec['mfem'].prefix))
else:
cfg.write("# mfem not built by spack \n")
#######################
# Adios
#######################
cfg.write("# adios support\n")
if "+adios" in spec:
cfg.write(cmake_cache_entry("ADIOS_DIR", spec['adios'].prefix))
else:
cfg.write("# adios not built by spack \n")
cfg.write("##################################\n")
cfg.write("# end spack generated host-config\n")
cfg.write("##################################\n")
cfg.close()
host_cfg_fname = os.path.abspath(host_cfg_fname)
tty.info("spack generated conduit host-config file: " + host_cfg_fname)
return host_cfg_fname
| 38.887356 | 82 | 0.50668 |
8e6863c18bfba564bf496b9bfceb3203d34b7834 | 939 | py | Python | setup.py | rafaellott/gcp-commons-utils | 933341918ed42dfce8bff28596fda108e752e1d3 | [
"MIT"
] | null | null | null | setup.py | rafaellott/gcp-commons-utils | 933341918ed42dfce8bff28596fda108e752e1d3 | [
"MIT"
] | null | null | null | setup.py | rafaellott/gcp-commons-utils | 933341918ed42dfce8bff28596fda108e752e1d3 | [
"MIT"
] | null | null | null | import os.path
from setuptools import setup, find_packages
# The directory containing this file
HERE = os.path.abspath(os.path.dirname(__file__))
# The text of the README file
with open(os.path.join(HERE, "README.md")) as fid:
README = fid.read()
# This call to setup() does all the work
setup(
name="gcp-commons-utils",
version="1.0.0",
description="Utils to be used along Google Cloud Platform components",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/thiagowig/gcp-commons-utils",
author="Thiago Fonseca",
author_email="dev.thiago@gmail.com",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
],
packages=find_packages(),
include_package_data=False,
install_requires=["google-cloud-firestore"],
)
| 30.290323 | 74 | 0.686901 |
7acd0b3c95e071da9c06de5a0c05be1698b44d3c | 9,272 | py | Python | scripts/setup_eclipse.py | amitpatra/nomulus | 48e5a4423c6843ee4c040c22f34cd62df5733413 | [
"Apache-2.0"
] | 1 | 2020-11-24T08:28:30.000Z | 2020-11-24T08:28:30.000Z | scripts/setup_eclipse.py | amitpatra/nomulus | 48e5a4423c6843ee4c040c22f34cd62df5733413 | [
"Apache-2.0"
] | null | null | null | scripts/setup_eclipse.py | amitpatra/nomulus | 48e5a4423c6843ee4c040c22f34cd62df5733413 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# Copyright 2016 The Nomulus Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script for generating eclipse .project and .classpath files."""
import os
import subprocess
import sys
def bazel_info(key):
"""Invokes the bazel info subcommand.
Invokes bazel info on the command line, parses the output and returns it
Args:
key: The argument that is passed to bazel info. See
http://bazel.io/docs/bazel-user-manual.html#info for example values.
Returns:
The output of the bazel info invocation as a string. If multiple lines
are returned by bazel info, only the first line is returned.
"""
bazel_process = subprocess.Popen(["bazel", "info", key],
stdout=subprocess.PIPE)
result = [line.strip() for line in iter(bazel_process.stdout.readline, "")]
return result[0]
def classpath_entry_xml(kind, path):
"""Generates an eclipse xml classpath entry.
Args:
kind: Kind of classpath entry.
Example values are 'lib', 'src', and 'con'
path: Absolute or relative path to the referenced resource.
Paths that are not absolute are relative to the project root.
Returns:
xml classpath entry element with the specified kind and path.
"""
return "<classpathentry kind=\"{kind}\" path=\"{path}\"/>".format(
kind=kind, path=path)
def classpath_xml(entries):
"""Produces the xml for an eclipse classpath file.
Args:
entries: list of dictionaries in the form of:
{
"kind": (str),
"path": (str)
}
Returns:
Contents of the eclipse .classpath file.
"""
entries_xml = "\n".join(
[" " + classpath_entry_xml(**entry) for entry in entries])
return ('<?xml version="1.0" encoding="UTF-8"?>\n'
"<classpath>\n"
"{entries}"
"\n</classpath>").format(entries=entries_xml)
def build_classpath():
"""Builds eclipse classpath file.
Generates an eclipse .classpath file that has references to all of the
project source folders, autogenerated source code, and external binary
dependencies.
Returns:
Contents of the eclipse .classpath file.
"""
# source folder for autogenerated files must reference
# symlinked bazel-genfiles folder inside of the project.
bazel_genfiles = bazel_info("bazel-genfiles")
classpath_entries = [
{"kind": "con", "path": "org.eclipse.jdt.launching.JRE_CONTAINER"},
{"kind": "src", "path": "java"},
{"kind": "src", "path": "javatests"},
{"kind": "src", "path": "bazel-genfiles/java"},
{
"kind": "lib",
"path": ("%s/java/google/"
"registry/eclipse/eclipse_deps.jar" % bazel_genfiles)
},
{"kind": "output", "path": "bin"},
]
return classpath_xml(classpath_entries)
def build_project(project_name):
"""Builds eclipse project file.
Uses a very simple template to generate an eclipse .project file
with a configurable project name.
Args:
project_name: Name of the eclipse project. When importing the project
into an eclipse workspace, this is the name that will be shown.
Returns:
Contents of the eclipse .project file.
"""
template = """<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>{project_name}</name>
<comment>
</comment>
<projects>
</projects>
<buildSpec>
<buildCommand>
<name>org.python.pydev.PyDevBuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>org.eclipse.jdt.core.javabuilder</name>
<arguments>
</arguments>
</buildCommand>
</buildSpec>
<natures>
<nature>org.eclipse.jdt.core.javanature</nature>
<nature>org.python.pydev.pythonNature</nature>
</natures>
</projectDescription>"""
return template.format(project_name=project_name)
def factorypath_entry_xml(kind, entry_id):
"""Generates an eclipse xml factorypath entry.
Args:
kind: Kind of factorypath entry.
Example values are 'PLUGIN', 'WKSPJAR'
entry_id: Unique identifier for the factorypath entry
Returns:
xml factorypath entry element with the specified kind and id.
"""
return ("<factorypathentry kind=\"{kind}\" id=\"{entry_id}\" "
"enabled=\"true\" runInBatchMode=\"false\"/>").format(
kind=kind, entry_id=entry_id)
def factorypath_xml(entries):
"""Produces the xml for an eclipse factorypath file.
Args:
entries: list of dictionaries in the form of:
{
"kind": (str),
"entry_id": (str)
}
Returns:
Contents of the eclipse .factorypath file.
"""
entries_xml = "\n".join(
[" " + factorypath_entry_xml(**entry) for entry in entries])
return ("<factorypath>\n"
"{entries}"
"\n</factorypath>").format(entries=entries_xml)
def build_factorypath():
"""Builds eclipse factorypath file.
Generates an eclipse .factorypath file that links to the jar containing
all required annotation processors for the project.
Returns:
Contents of the eclipse .factorypath file.
"""
bazel_bin = bazel_info("bazel-bin")
annotations_jar = os.path.join(
bazel_bin,
"java/google/registry/eclipse"
"/annotation_processors_ide_deploy.jar")
factorypath_entries = [
{
"kind": "PLUGIN",
"entry_id": "org.eclipse.jst.ws.annotations.core",
},
{
"kind": "EXTJAR",
"entry_id": annotations_jar,
}
]
return factorypath_xml(factorypath_entries)
def build_dependencies():
"""Builds dependencies for producing eclipse project files.
Runs bazel build for the entire project and builds a single jar with all
binary dependencies for eclipse to compile the project.
Raises:
subprocess.CalledProcessError: A bazel build failed
"""
# Build entire project first
subprocess.check_call([
"bazel",
"build",
"//java/google/registry/...",
"//javatests/google/registry/...",
])
# Builds a giant jar of all compile-time dependencies of the project
subprocess.check_call([
"bazel",
"build",
"//java/google/registry/eclipse:eclipse_deps",
])
# Builds a jar with all annotation processors
subprocess.check_call([
"bazel",
"build",
"//java/google/registry/eclipse"
":annotation_processors_ide_deploy.jar",
])
def main():
"""Builds eclipse project files.
Before building the eclipse files, a working bazel build is required.
After building the eclipse dependencies jar and the tests, eclipse
project files are produced.
"""
build_dependencies()
workspace_directory = bazel_info("workspace")
classpath = build_classpath()
with open(os.path.join(workspace_directory, ".classpath"),
"w") as classpath_file:
classpath_file.write(classpath)
if len(sys.argv) > 1:
project_name = sys.argv[1]
else:
project_name = "domain-registry"
project = build_project(project_name)
with open(os.path.join(workspace_directory, ".project"),
"w") as project_file:
project_file.write(project)
factorypath = build_factorypath()
with open(os.path.join(workspace_directory, ".factorypath"),
"w") as factorypath_file:
factorypath_file.write(factorypath)
if not os.path.exists(".settings"):
os.makedirs(".settings")
# XXX: Avoid wiping out existing settings from org.eclipse.jdt.core.prefs
with open(os.path.join(workspace_directory,
".settings",
"org.eclipse.jdt.core.prefs"), "w") as prefs_file:
prefs_file.write("\n".join([
"eclipse.preferences.version=1",
"org.eclipse.jdt.core.compiler.processAnnotations=enabled",
]))
with open(os.path.join(workspace_directory,
".settings",
"org.eclipse.jdt.apt.core.prefs"),
"w") as prefs_file:
prefs_file.write("\n".join([
"eclipse.preferences.version=1",
"org.eclipse.jdt.apt.aptEnabled=true",
"org.eclipse.jdt.apt.genSrcDir=autogenerated",
"org.eclipse.jdt.apt.reconcileEnabled=true",
]))
if __name__ == "__main__":
main()
| 31.753425 | 79 | 0.620255 |
3899decf8f0f3979efdbfde4bbc071fb5b2d1573 | 4,207 | py | Python | proxmox_hetzner_autoconfigure/util/util.py | johnknott/proxmox-hetzner-autoconfigure | 5b4d613abf3c8a7e07e2f51d687786d557d5602b | [
"MIT"
] | 12 | 2020-11-15T15:45:24.000Z | 2022-02-20T03:51:07.000Z | proxmox_hetzner_autoconfigure/util/util.py | johnknott/proxmox-hetzner-autoconfigure | 5b4d613abf3c8a7e07e2f51d687786d557d5602b | [
"MIT"
] | 1 | 2021-02-17T14:12:46.000Z | 2021-09-04T08:42:22.000Z | proxmox_hetzner_autoconfigure/util/util.py | johnknott/proxmox-hetzner-autoconfigure | 5b4d613abf3c8a7e07e2f51d687786d557d5602b | [
"MIT"
] | 9 | 2020-07-31T17:51:19.000Z | 2022-03-12T13:40:27.000Z | """Collection of utility methods"""
import os
import re
import platform
from ipaddress import IPv4Network, IPv4Address
from dialog import Dialog
from jinja2 import Environment, FileSystemLoader
IP_REGEX = r"\b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\b"
CIDR_REGEX = r"\b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\/\d{1,2}\b"
EMAIL_REGEX = r"[^@]+@[^@]+\.[^@]+"
DOMAIN_REGEX = r".*\.\w+"
NOT_EMPTY = r"^(?!\s*$).+"
ANSI_RED = r"\Zb\Z1"
ANSI_GREEN = r"\Zb\Z3"
ANSI_WHITE = r"\Zb\Z2"
ANSI_RESET = r"\Zn"
dialog = Dialog(dialog="dialog", autowidgetsize=True)
shared_globals = {}
def is_proxmox_machine():
"""Is this a Linux machine with Proxmox installed?"""
return platform.system() == "Linux" and os.popen("which pveversion").read().strip()
def main_ip():
"""Returns the detected main IP of this machine"""
return os.popen("hostname -i | awk '{print $1}'").read().strip()
def gateway_ip():
"""Returns the detected gateway IP of this machine"""
return os.popen("ip route | grep default | awk '{print $3}'").read().strip()
def render_template(file, template_name, binds):
"""Renders a jinja2 template and returns it as a string"""
dir_path = os.path.dirname(os.path.realpath(file))
env = Environment(loader=FileSystemLoader(dir_path))
template = env.get_template(f"{template_name}.jinja2")
if hasattr(binds, "_asdict"):
binds = binds._asdict()
return template.render(binds)
def wrap_as_heredoc(content, filename):
"""Wraps a string in a heredoc and adds code to write it to a file"""
return render_template(__file__, "heredoc", {"content": content, "filename": filename})
def build_script(configurators):
"""
Loops over configurators, calling gather_input on them and then renders the script sections
in a template.
"""
def gather_input(configurator):
cfg = configurator.Config()
dialog.set_background_title(cfg.description)
return {"config": cfg, "input": cfg.gather_input()}
def generate_script(input_data):
cfg = input_data["config"]
return {"name": cfg.description, "content": cfg.generate_script(input_data["input"])}
sections = map(generate_script, map(gather_input, configurators))
return render_template(__file__, "install", {"sections": sections, "shared_globals": shared_globals})
def input_regex(message, regex, regex_fail, **kwargs):
"""Helper method to ask the user for input and only proceed if the input matches a regex"""
text_value = error = ""
kwargs = kwargs if kwargs else {}
kwargs["colors"] = True
while not re.match(regex, text_value):
message_with_error = f"{message}\n{ANSI_RED}{error}{ANSI_RESET}" if error else message
code, text_value = dialog.inputbox(message_with_error, **kwargs)
error = regex_fail
if code != "ok":
return None
return text_value
def input_network(message, **kwargs):
"""Helper method to ask the user for input and only proceed if the input matches a regex"""
net_addr = error = ""
kwargs = kwargs if kwargs else {}
kwargs["colors"] = True
while True:
try:
message_with_error = f"{message}\n{ANSI_RED}{error}{ANSI_RESET}" if error else message
code, net_addr = dialog.inputbox(message_with_error, **kwargs)
if code != "ok":
return None
if not re.match(CIDR_REGEX, net_addr):
raise Exception("Please enter in the format x.x.x.x/x")
return str(IPv4Network(net_addr))
except Exception as err:
error = str(err)
def input_ip(message, **kwargs):
"""Helper method to ask the user for input and only proceed if the input matches a regex"""
ip_addr = error = ""
kwargs = kwargs if kwargs else {}
kwargs["colors"] = True
while True:
try:
message_with_error = f"{message}\n{ANSI_RED}{error}{ANSI_RESET}" if error else message
code, ip_addr = dialog.inputbox(message_with_error, **kwargs)
if code != "ok":
return None
return str(IPv4Address(ip_addr))
except Exception as err:
error = str(err)
| 31.871212 | 105 | 0.646304 |
9256873ececcd924252e4f328da56628572ea505 | 25,614 | py | Python | test/test_cli_options.py | Josef-Friedrich/audiorename.py | d95a02ac7176d8bc7010b74ce2d34fe6bedb91bd | [
"MIT"
] | null | null | null | test/test_cli_options.py | Josef-Friedrich/audiorename.py | d95a02ac7176d8bc7010b74ce2d34fe6bedb91bd | [
"MIT"
] | null | null | null | test/test_cli_options.py | Josef-Friedrich/audiorename.py | d95a02ac7176d8bc7010b74ce2d34fe6bedb91bd | [
"MIT"
] | null | null | null | """Test all command line options."""
import unittest
import audiorename
import os
import shutil
import tempfile
import helper
# --best-format
# --delete
# --backup
# --backup-folder
class TestBestFormat(unittest.TestCase):
def setUp(self):
self.target = tempfile.mkdtemp()
self.high_quality = self.get_quality('flac.flac')
self.low_quality = self.get_quality('mp3_320.mp3')
self.backup_cwd = os.path.join(os.getcwd(), '_audiorename_backups')
self.backup_folder = tempfile.mkdtemp()
self.backup_args = ('--backup', '--backup-folder',
self.backup_folder)
def tearDown(self):
try:
shutil.rmtree(self.backup_cwd)
except OSError:
pass
def move(self, source, *args):
audiorename.execute('--best-format', '--one-line', '--target',
self.target, '--format', 'test-file',
source, *args)
def backup_path(self, file_name):
return os.path.join(self.backup_cwd, file_name)
def get_quality(self, filename):
return helper.copy_to_tmp('quality', filename)
def test_delete_source(self):
with helper.Capturing() as output:
self.move(self.high_quality, '--delete')
self.move(self.low_quality, '--delete')
self.assertTrue('Delete […]' + self.low_quality in
helper.join(output))
self.assertFalse(os.path.exists(self.high_quality))
self.assertFalse(os.path.exists(self.low_quality))
def test_delete_target(self):
with helper.Capturing() as output:
self.move(self.low_quality, '--delete')
self.move(self.high_quality, '--delete')
self.assertTrue('Delete […]test-file.mp3' in
helper.join(output))
self.assertFalse(os.path.exists(self.high_quality))
self.assertFalse(os.path.exists(self.low_quality))
def test_backup_source(self):
with helper.Capturing() as output:
self.move(self.high_quality, '--backup')
self.move(self.low_quality, '--backup')
self.assertTrue('Backup […]' + self.low_quality in
helper.join(output))
self.assertFalse(os.path.exists(self.high_quality))
self.assertFalse(os.path.exists(self.low_quality))
backup_path = self.backup_path(os.path.basename(self.low_quality))
self.assertTrue(os.path.exists(backup_path))
os.remove(backup_path)
def test_backup_target(self):
with helper.Capturing() as output:
self.move(self.low_quality, '--backup')
self.move(self.high_quality, '--backup')
self.assertTrue('Backup […]test-file.mp3' in
helper.join(output))
self.assertFalse(os.path.exists(self.high_quality))
self.assertFalse(os.path.exists(self.low_quality))
backup_path = self.backup_path('test-file.mp3')
self.assertTrue(os.path.exists(backup_path))
os.remove(backup_path)
def test_backup_folder_source(self):
with helper.Capturing() as output:
self.move(self.high_quality, *self.backup_args)
self.move(self.low_quality, *self.backup_args)
self.assertTrue('Backup […]' + self.low_quality in
helper.join(output))
self.assertFalse(os.path.exists(self.high_quality))
self.assertFalse(os.path.exists(self.low_quality))
backup_file = os.path.join(self.backup_folder,
os.path.basename(self.low_quality))
self.assertTrue(os.path.exists(backup_file))
os.remove(backup_file)
def test_backup_folder_target(self):
with helper.Capturing() as output:
self.move(self.low_quality, *self.backup_args)
self.move(self.high_quality, *self.backup_args)
self.assertTrue('Backup […]test-file.mp3' in
helper.join(output))
self.assertFalse(os.path.exists(self.high_quality))
self.assertFalse(os.path.exists(self.low_quality))
backup_file = os.path.join(self.backup_folder,
os.path.basename('test-file.mp3'))
self.assertTrue(os.path.exists(backup_file))
os.remove(backup_file)
# --classical
class TestClassical(unittest.TestCase):
def assertDryRun(self, folder, track, test):
self.assertEqual(helper.dry_run([
'--classical',
helper.get_testfile('classical', folder, track)
]), test)
d = '/d/Debussy_Claude/'
e = 'Estampes-L-100_[Jean-Claude-Pennetier]'
p = 'Pour-le-piano-L-95_[Jean-Claude-Pennetier]'
def test_debussy_01(self):
self.assertDryRun(
'Debussy_Estampes-etc', '01.mp3',
self.d + self.e + '/01_Pagodes.mp3'
)
def test_debussy_02(self):
self.assertDryRun(
'Debussy_Estampes-etc', '02.mp3',
self.d + self.e + '/02_Soiree-dans-Grenade.mp3'
)
def test_debussy_03(self):
self.assertDryRun(
'Debussy_Estampes-etc', '03.mp3',
self.d + self.e + '/03_Jardins-sous-la-pluie.mp3'
)
def test_debussy_04(self):
self.assertDryRun(
'Debussy_Estampes-etc', '04.mp3',
self.d + self.p + '/04_Prelude.mp3'
)
m = '/m/Mozart_Wolfgang-Amadeus/'
mp1 = '[OrpChaOrc]'
mp2 = '[OrpChaOrc]'
h1 = 'Concerto-for-French-Horn-no-1-in-D-major-K_' + mp1
h2 = 'Concerto-for-Horn-no-2-in-E-flat-major-K-417_' + mp2
def test_mozart_01(self):
self.assertDryRun(
'Mozart_Horn-concertos', '01.mp3',
self.m + self.h1 + '/01_I-Allegro_fa140702.mp3'
)
def test_mozart_02(self):
self.assertDryRun(
'Mozart_Horn-concertos', '02.mp3',
self.m + self.h1 + '/02_II-Rondo-Allegro_a897e98e.mp3'
)
def test_mozart_03(self):
self.assertDryRun(
'Mozart_Horn-concertos', '03.mp3',
self.m + self.h2 + '/03_I-Allegro_d557146b.mp3'
)
def test_mozart_04(self):
self.assertDryRun(
'Mozart_Horn-concertos', '04.mp3',
self.m + self.h2 + '/04_II-Andante_001c2df3.mp3'
)
s = '/s/Schubert_Franz/'
w = 'Die-Winterreise-op-89-D-911_[Fischer-Dieskau-Moore]/'
def test_schubert_01(self):
self.assertDryRun(
'Schubert_Winterreise', '01.mp3',
self.s + self.w + '01_Gute-Nacht_311cb6a3.mp3'
)
def test_schubert_02(self):
self.assertDryRun(
'Schubert_Winterreise', '02.mp3',
self.s + self.w + '02_Die-Wetterfahne_5b9644f0.mp3'
)
def test_schubert_03(self):
self.assertDryRun(
'Schubert_Winterreise', '03.mp3',
self.s + self.w + '03_Gefrorne-Traenen_4b78f893.mp3'
)
def test_schubert_04(self):
self.assertDryRun(
'Schubert_Winterreise', '04.mp3',
self.s + self.w + '04_Erstarrung_63bc8e2a.mp3'
)
t = '/t/Tchaikovsky_Pyotr-Ilyich/'
lake = 'Swan-Lake-op-20_[Svetlanov-StaAcaSym]/'
def test_tschaikowski_01(self):
self.assertDryRun(
'Tschaikowski_Swan-Lake', '1-01.mp3',
self.t + self.lake + '1-01_Introduction-Moderato-assai-'
'Allegro-ma-non-troppo-Tempo-I_3f6fc6b3.mp3'
)
def test_tschaikowski_02(self):
self.assertDryRun(
'Tschaikowski_Swan-Lake', '1-02.mp3',
self.t + self.lake + '1-02_Act-I-no-1-Scene-Allegro-giusto_'
'29413f6c.mp3'
)
def test_tschaikowski_03(self):
self.assertDryRun(
'Tschaikowski_Swan-Lake', '1-03.mp3',
self.t + self.lake + '1-03_Act-I-no-2-Valse-Tempo-di-valse_'
'5303b318.mp3'
)
def test_tschaikowski_04(self):
self.assertDryRun(
'Tschaikowski_Swan-Lake', '1-04.mp3',
self.t + self.lake + '1-04_Act-I-no-3-Scene-Allegro-moderato_'
'4d5781a4.mp3'
)
wr = '/w/Wagner_Richard/'
mn = 'Die-Meistersinger-von-Nuernberg_[Karajan-StaDre]/'
def test_wagner_01(self):
self.assertDryRun(
'Wagner_Meistersinger', '01.mp3',
self.wr + self.mn + '1-01_Vorspiel_313c5f00.mp3'
)
def test_wagner_02(self):
self.assertDryRun(
'Wagner_Meistersinger', '02.mp3',
self.wr + self.mn + '1-02_Akt-I-Szene-I-Da-zu-dir-der-Heiland-'
'kam-Gemeinde_cdd9f298.mp3'
)
def test_wagner_03(self):
self.assertDryRun(
'Wagner_Meistersinger', '03.mp3',
self.wr + self.mn + '1-03_Akt-I-Szene-I-Verweilt-Ein-Wort-'
'Walther-Eva-Magdalene_adab7b8c.mp3'
)
def test_wagner_04(self):
self.assertDryRun(
'Wagner_Meistersinger', '04.mp3',
self.wr + self.mn + '1-04_Akt-I-Szene-I-Da-bin-ich-David-'
'Magdalene-Walther-Eva_f3f0231f.mp3'
)
# --copy
class TestBasicCopy(unittest.TestCase):
def setUp(self):
self.tmp_album = helper.copy_to_tmp('files', 'album.mp3')
with helper.Capturing():
audiorename.execute('--copy', self.tmp_album)
self.tmp_compilation = helper.copy_to_tmp('files', 'compilation.mp3')
with helper.Capturing():
audiorename.execute('--copy', self.tmp_compilation)
def test_album(self):
self.assertTrue(helper.is_file(self.tmp_album))
self.assertTrue(
os.path.isfile(helper.dir_cwd + helper.path_album)
)
def test_compilation(self):
self.assertTrue(os.path.isfile(self.tmp_compilation))
self.assertTrue(
os.path.isfile(
helper.dir_cwd + helper.path_compilation
)
)
def tearDown(self):
shutil.rmtree(helper.dir_cwd + '/_compilations/')
shutil.rmtree(helper.dir_cwd + '/t/')
# --debug
class TestDebug(unittest.TestCase):
def test_debug(self):
tmp = helper.get_testfile('files', 'album.mp3')
with helper.Capturing() as output:
audiorename.execute(
'--debug',
tmp
)
self.assertTrue('ar_combined_year : 2001' in str(output))
# --delete
class TestDeleteExisting(unittest.TestCase):
def test_delete(self):
tmp1 = helper.copy_to_tmp('files', 'album.mp3')
tmp2 = helper.copy_to_tmp('files', 'album.mp3')
target = tempfile.mkdtemp()
self.assertTrue(os.path.isfile(tmp1))
self.assertTrue(os.path.isfile(tmp2))
with helper.Capturing() as output1:
audiorename.execute(
'--delete',
'--target',
target,
tmp1
)
self.assertTrue('Move' in helper.join(output1))
self.assertFalse(os.path.isfile(tmp1))
self.assertTrue(os.path.isfile(tmp2))
with helper.Capturing() as output2:
audiorename.execute(
'--delete',
'--target',
target,
tmp2
)
self.assertTrue('Delete' in helper.join(output2))
self.assertFalse(os.path.isfile(tmp1))
self.assertFalse(os.path.isfile(tmp2))
# --dry-run
class TestDryRun(unittest.TestCase):
def setUp(self):
self.tmp_album = helper.copy_to_tmp('files', 'album.mp3')
with helper.Capturing() as self.output_album:
audiorename.execute('--dry-run', self.tmp_album)
self.tmp_compilation = helper.copy_to_tmp('files', 'compilation.mp3')
with helper.Capturing() as self.output_compilation:
audiorename.execute('--dry-run', self.tmp_compilation)
def test_output_album(self):
self.assertTrue(helper.has(self.output_album, 'Dry run'))
self.assertTrue(helper.has(self.output_album, self.tmp_album))
def test_output_compilation(self):
self.assertTrue(helper.has(self.output_compilation, 'Dry run'))
self.assertTrue(
helper.has(self.output_compilation, self.tmp_compilation)
)
def test_album(self):
self.assertTrue(helper.is_file(self.tmp_album))
self.assertFalse(
os.path.isfile(helper.dir_cwd + helper.path_album)
)
def test_compilation(self):
self.assertTrue(helper.is_file(self.tmp_compilation))
self.assertFalse(
os.path.isfile(
helper.dir_cwd + helper.path_compilation
)
)
# --enrich-metadata
class TestEnrichMetadata(unittest.TestCase):
@unittest.skipIf(helper.SKIP_API_CALLS, 'Disable if API not available')
def test_pass(self):
tmp = helper.copy_to_tmp('classical', 'without_work.mp3')
from audiorename.meta import Meta
orig = Meta(tmp)
orig.composer = None
orig.composer_sort = None
orig.save()
orig = Meta(tmp)
self.assertEqual(orig.composer_sort, None)
self.assertEqual(orig.composer, None)
self.assertEqual(orig.mb_workhierarchy_ids, None)
self.assertEqual(orig.mb_workid, None)
self.assertEqual(orig.work_hierarchy, None)
self.assertEqual(orig.work, None)
with helper.Capturing() as output:
audiorename.execute('--enrich-metadata', '--no-rename', tmp)
self.assertTrue('Enrich metadata' in helper.join(output))
enriched = Meta(tmp)
self.assertEqual(enriched.composer_sort, 'Wagner, Richard')
self.assertEqual(enriched.composer, 'Richard Wagner')
self.assertEqual(enriched.mb_workhierarchy_ids,
'4d644732-9876-4b0d-9c2c-b6a738d6530e/'
'6b198406-4fbf-3d61-82db-0b7ef195a7fe')
self.assertEqual(enriched.mb_workid,
'6b198406-4fbf-3d61-82db-0b7ef195a7fe')
self.assertEqual(enriched.work_hierarchy,
'Die Meistersinger von Nürnberg, WWV 96 -> '
'Die Meistersinger von Nürnberg, WWV 96: '
'Vorspiel')
self.assertEqual(enriched.work,
'Die Meistersinger von Nürnberg, WWV 96: '
'Vorspiel')
# --field-skip
class TestSkipIfEmpty(unittest.TestCase):
def setUp(self):
with helper.Capturing() as self.album:
audiorename.execute(
'--field-skip',
'lol',
helper.copy_to_tmp('files', 'album.mp3')
)
with helper.Capturing() as self.compilation:
audiorename.execute(
'--field-skip',
'album',
'-d',
'-c',
'/tmp/c',
helper.copy_to_tmp('files', 'compilation.mp3')
)
def test_album(self):
self.assertTrue(helper.has(self.album, 'No field'))
def test_compilation(self):
self.assertTrue(helper.has(self.compilation, 'Dry run'))
# --classical_format string
class TestClassicalFormat(unittest.TestCase):
def assertDryRun(self, folder, track, test):
self.assertEqual(helper.dry_run([
'--classical', '--format-classical', '$ar_combined_composer/'
'${ar_combined_disctrack}_%shorten{$ar_classical_title,64}',
helper.get_testfile('classical', folder, track)
]), test)
def test_debussy_01(self):
self.assertDryRun(
'Debussy_Estampes-etc', '01.mp3',
'/Debussy_Claude/01_Pagodes.mp3'
)
# --classical_format string
class TestGenreClassical(unittest.TestCase):
def assertDryRun(self, folder, track, test):
self.assertEqual(helper.dry_run([
'--genre-classical', 'classical,', '--format-classical',
'$ar_combined_composer/'
'${ar_combined_disctrack}_%shorten{$ar_classical_title,64}',
helper.get_testfile('classical', folder, track)
]), test)
def test_debussy_01(self):
self.assertDryRun(
'Debussy_Estampes-etc', '01.mp3',
'/Debussy_Claude/01_Pagodes.mp3'
)
# --format
class TestCustomFormats(unittest.TestCase):
def setUp(self):
with helper.Capturing():
audiorename.execute(
'--format',
'tmp/$title - $artist',
helper.copy_to_tmp('files', 'album.mp3')
)
with helper.Capturing():
audiorename.execute(
'--compilation',
'tmp/comp_$title - $artist',
helper.copy_to_tmp('files', 'compilation.mp3')
)
def test_format(self):
self.assertTrue(os.path.isfile(
helper.dir_cwd + '/tmp/full - the artist.mp3'
))
def test_compilation(self):
self.assertTrue(os.path.isfile(
helper.dir_cwd + '/tmp/comp_full - the artist.mp3'
))
def tearDown(self):
shutil.rmtree(helper.dir_cwd + '/tmp/')
# --job-info
class TestJobInfo(unittest.TestCase):
def get_job_info(self, *args):
with helper.Capturing() as output:
audiorename.execute('--dry-run', '--job-info',
helper.get_testfile('mixed_formats'), *args)
return '\n'.join(output)
def test_dry_run(self):
output = self.get_job_info()
self.assertTrue('Versions: ' in output)
self.assertTrue('audiorename=' in output)
self.assertTrue('phrydy=' in output)
self.assertTrue('tmep=' in output)
self.assertTrue('Source: ' in output)
self.assertTrue('Target: ' in output)
self.assertFalse('Backup folder: ' in output)
def test_verbose(self):
output = self.get_job_info('--verbose')
self.assertTrue('Default: ' in output)
self.assertTrue('Compilation: ' in output)
self.assertTrue('Soundtrack: ' in output)
def test_backup(self):
output = self.get_job_info('--backup')
self.assertTrue('_audiorename_backups' in output)
def test_backup_folder(self):
output = self.get_job_info('--backup', '--backup-folder', '/tmp')
self.assertTrue('Backup folder: /tmp' in output)
# --mb-track-listing
class TestMbTrackListing(unittest.TestCase):
def mb_track_listing(self, folder, track):
with helper.Capturing() as output:
audiorename.execute(
'--mb-track-listing',
helper.get_testfile('classical', folder, track)
)
return output[0]
def test_debussy(self):
audiorename.audiofile.counter = 0
result = self.mb_track_listing('Debussy_Estampes-etc', '01.mp3')
self.assertEqual(result,
'1. Estampes/Images/Pour le Piano: Estampes: '
'Pagodes (0:00)')
def test_schubert(self):
self.assertEqual(self.mb_track_listing('Schubert_Winterreise',
'01.mp3'),
'1. Winterreise: Winterreise, D. 911: Gute Nacht '
'(0:00)')
def test_folder(self):
with helper.Capturing() as output:
audiorename.execute(
'--mb-track-listing',
helper.get_testfile('classical', 'Schubert_Winterreise')
)
self.assertEqual(
output[0],
'1. Winterreise: Winterreise, D. 911: Gute Nacht (0:00)'
)
self.assertEqual(
output[23],
'24. Winterreise: Winterreise, D. 911: Der Leiermann (0:00)'
)
# --soundtrack
class TestSoundtrack(unittest.TestCase):
def assertDryRun(self, folder, track, test):
self.assertEqual(helper.dry_run([
'--soundtrack',
'$ar_initial_album/'
'%shorten{$ar_combined_album}'
'%ifdef{ar_combined_year,_${ar_combined_year}}/'
'${ar_combined_disctrack}_${artist}_%shorten{$title}',
helper.get_testfile('soundtrack', folder, track)
]), test)
def test_pulp_01(self):
self.assertDryRun(
'Pulp-Fiction', '01.mp3',
'/p/Pulp-Fiction_1994/01_[dialogue]_Pumpkin-and-Honey-Bunny.mp3'
)
def test_pulp_02(self):
self.assertDryRun(
'Pulp-Fiction', '02.mp3',
'/p/Pulp-Fiction_1994/02_Dick-Dale-and-His-Del-Tones_Misirlou.mp3'
)
def test_pulp_03(self):
self.assertDryRun(
'Pulp-Fiction', '03.mp3',
'/p/Pulp-Fiction_1994/03_Kool-The-Gang_Jungle-Boogie.mp3'
)
def test_pulp_04(self):
self.assertDryRun(
'Pulp-Fiction', '04.mp3',
'/p/Pulp-Fiction_1994/'
'04_[dialogue]_Royale-With-Cheese-dialogue.mp3'
)
def test_pulp_05(self):
self.assertDryRun(
'Pulp-Fiction', '05.mp3',
'/p/Pulp-Fiction_1994/'
'05_The-Brothers-Johnson_Strawberry-Letter-23.mp3'
)
def test_pulp_06(self):
self.assertDryRun(
'Pulp-Fiction', '06.mp3',
'/p/Pulp-Fiction_1994/'
'06_[dialogue]_Ezekiel-2517-dialogue-Samuel-L.mp3'
)
def test_pulp_07(self):
self.assertDryRun(
'Pulp-Fiction', '07.mp3',
'/p/Pulp-Fiction_1994/07_Al-Green_Lets-Stay-Together.mp3'
)
def test_pulp_08(self):
self.assertDryRun(
'Pulp-Fiction', '08.mp3',
'/p/Pulp-Fiction_1994/08_The-Tornadoes_Bustin-Surfboards.mp3'
)
def test_pulp_09(self):
self.assertDryRun(
'Pulp-Fiction', '09.mp3',
'/p/Pulp-Fiction_1994/09_The-Centurions_Bullwinkle-Part-II.mp3'
)
def test_pulp_10(self):
self.assertDryRun(
'Pulp-Fiction', '10.mp3',
'/p/Pulp-Fiction_1994/'
'10_Dusty-Springfield_Son-of-a-Preacher-Man.mp3'
)
# --source-as-target
class TestSourceAsTarget(unittest.TestCase):
def setUp(self):
self.tmp_album = helper.copy_to_tmp('files', 'album.mp3')
self.dir_album = os.path.dirname(self.tmp_album)
with helper.Capturing():
audiorename.execute(
'--source-as-target',
'-f',
'a',
self.tmp_album
)
self.tmp_compilation = helper.copy_to_tmp('files', 'compilation.mp3')
with helper.Capturing():
audiorename.execute(
'--source-as-target',
'-c',
'c',
self.tmp_compilation
)
def test_album(self):
self.assertTrue(helper.is_file(self.dir_album + '/a.mp3'))
# --stats
class TestStats(unittest.TestCase):
def test_dry_run(self):
with helper.Capturing() as output:
audiorename.execute('--dry-run', '--stats',
helper.get_testfile('mixed_formats'))
self.assertTrue('Execution time:' in helper.join(output))
self.assertTrue('Counter: move=3' in helper.join(output))
def test_no_counts(self):
tmp = tempfile.mkdtemp()
with helper.Capturing() as output:
audiorename.execute('--dry-run', '--stats', tmp)
self.assertTrue('Counter: Nothing to count!' in helper.join(output))
shutil.rmtree(tmp)
# --target
class TestTarget(unittest.TestCase):
def setUp(self):
self.tmp_dir = tempfile.mkdtemp()
self.tmp_album = helper.copy_to_tmp('files', 'album.mp3')
with helper.Capturing():
audiorename.execute(
'--target',
self.tmp_dir,
'-f',
'album',
self.tmp_album
)
self.tmp_compilation = helper.copy_to_tmp('files', 'compilation.mp3')
with helper.Capturing():
audiorename.execute(
'--target',
self.tmp_dir,
'-c',
'compilation',
self.tmp_compilation
)
def test_album(self):
self.assertTrue(helper.is_file(self.tmp_dir + '/album.mp3'))
def test_compilation(self):
self.assertTrue(helper.is_file(self.tmp_dir + '/compilation.mp3'))
def tearDown(self):
shutil.rmtree(self.tmp_dir)
# --verbose
class TestVerbose(unittest.TestCase):
def test_verbose(self):
tmp = helper.copy_to_tmp('files', 'album.mp3')
target = tempfile.mkdtemp()
with helper.Capturing() as output:
audiorename.execute(
'--copy',
'--verbose',
'--target',
target,
tmp
)
# '[Copy: ] /tmp/tmpisugl3hp/album.mp3'
# ' -> /tmp/tmpcwqxsfgx/t/the album artist/the
# album_2001/4-02_full.mp3']
self.assertTrue(target in helper.join(output))
def test_non_verbose(self):
tmp = helper.copy_to_tmp('files', 'album.mp3')
target = tempfile.mkdtemp()
with helper.Capturing() as output:
audiorename.execute(
'--copy',
'--target',
target,
tmp
)
# '[Copy: ] /tmp/tmpycwB06/album.mp3'
# ' -> /t/the album artist/the album_2001/4-02_full.mp3'
self.assertFalse(target in output[1])
if __name__ == '__main__':
unittest.main()
| 32.0175 | 78 | 0.577575 |
ec55979252242f614c0230d8e03ce4b201ab3eb5 | 8,669 | py | Python | herepy/map_image_api.py | mnogoruk/HerePy | 4fb91310d1ca5993f2ce89c9f73461f523c754ec | [
"MIT"
] | null | null | null | herepy/map_image_api.py | mnogoruk/HerePy | 4fb91310d1ca5993f2ce89c9f73461f523c754ec | [
"MIT"
] | null | null | null | herepy/map_image_api.py | mnogoruk/HerePy | 4fb91310d1ca5993f2ce89c9f73461f523c754ec | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import sys
import json
import requests
from typing import List, Optional
from herepy.here_api import HEREApi
from herepy.utils import Utils
from herepy import MapImageResourceType, MapImageFormatType
from herepy.error import HEREError, InvalidRequestError, UnauthorizedError
class MapImageApi(HEREApi):
"""A python interface into the HERE Map Image API"""
def __init__(self, api_key: str = None, timeout: int = None):
"""Returns a MapImageApi instance.
Args:
api_key (str):
API key taken from HERE Developer Portal.
timeout (int):
Timeout limit for requests.
"""
super(MapImageApi, self).__init__(api_key, timeout)
self._base_url = "https://image.maps.ls.hereapi.com/mia/1.6/mapview"
def __get_error_from_response(self, json_data):
if "error" in json_data:
error_description = json_data["error_description"]
if json_data["error"] == "Unauthorized":
return UnauthorizedError(error_description)
error_type = json_data.get("Type")
error_message = json_data.get(
"Message",
error_description + ", error occured on " + sys._getframe(1).f_code.co_name,
)
if error_type == "Invalid Request":
return InvalidRequestError(error_message)
else:
return HEREError(error_message)
def get_mapimage(
self,
top_left: List[float] = None,
bottom_right: List[float] = None,
coordinates: List[float] = None,
city_name: Optional[str] = None,
country_name: Optional[str] = None,
center: List[float] = None,
encoded_geo_coordinate: str = None,
encoded_geo_center_coordinate: str = None,
image_format: MapImageFormatType = MapImageFormatType.png,
image_height: Optional[int] = None,
show_position: bool = False,
maxhits: int = 1,
label_language: str = "eng",
second_label_language: Optional[str] = None,
house_number: Optional[str] = None,
zoom: int = 8,
map_scheme: Optional[int] = None,
uncertainty: Optional[str] = None,
nodot: Optional[bool] = None,
):
"""Retrieves the map image with given parameters.
Args:
top_left (List[float]):
List contains latitude and longitude in order for the bounding box parameter.
Note: If poi or poix are given, then this parameter is ignored.
Note: If this parameter is provided, it ignores tx, tx.xy, ctr, ectr.
Note: If this parameter is provided then the geo search parameters are ignored, such as co.
bottom_right (List[float]):
List contains latitude and longitude in order for the bounding box parameter.
Note: If poi or poix are given, then this parameter is ignored.
Note: If this parameter is provided, it ignores tx, tx.xy, ctr, ectr.
Note: If this parameter is provided then the geo search parameters are ignored, such as co.
coordinates (List[float]):
List contains latitude and longitude in order.
city_name (Optional[str]):
City name for address based search. UTF-8 encoded and URL-encoded.
country_name (Optional[str]):
Country name for address based search. UTF-8 encoded and URL-encoded.
center (List[float]):
Map center point geo coordinate. If the position is on the border of the map, the dot might be cropped.
encoded_geo_coordinate (str):
Encoded equivalent of position geo coordinate parameter c. Parameter c is ignored if this parameter is specified.
encoded_geo_center_coordinate (str):
Encoded equivalent of map center point geo coordinate parameter ctr. Parameter ctr is ignored if this parameter is present.
image_format (MapImageFormatType):
Image format. It is possible to request the map image.
image_height (Optional[int]):
Result image height in pixels, maximum 2048. Height and width parameter can be provided independently,
i.e. there is no need to enter both to resize the image.
show_position (bool):
Flag for showing address or position information box inside the map image
(if address is available or position is allowed to be shown).
Note: If geo search parameters such as co are provided, then the information shown
is related to those parameter's values, if valid.
maxhits (int):
Maximum number of search results to return. Applies only when some kind of search
is performed which can return multiple results. Set to 1 to show directly the first
matching result without any results listing.
label_language (str):
Map label language. Specifies the language to be used to display descriptions of details inside the map image.
If the parameter is not provided, the default language depends on the highest prioritized locale of the
client's Accept-Language HTTP header which is currently supported.
If no map language based on HTTP header can be determined, the server configured default is used.
Note: Some MARC three-letter language codes are supported, please check https://developer.here.com/documentation/map-image/dev_guide/topics/resource-map.html
for more details.
second_label_language (Optional[str]):
Second language to be used, only for dual labelling, therefore a ml language must also be present Map label language.
Note: Some MARC three-letter language codes are supported, please check https://developer.here.com/documentation/map-image/dev_guide/topics/resource-map.html
for more details.
house_number (Optional[str]):
House number on the street for address based search.
zoom (int):
Zoom level for the map image.
map_scheme (Optional[int]):
Determines the map scheme to use for the map image.
uncertainty (Optional[str]):
The parameter u specifies position uncertainty, which is shown as a filled circle around a
location defined in terms of its latitude and longitude. The value of the parameter u indicates
the radius of the circle representing uncertainty. In this case, the radius is set to 5 myriameters,
which is 50000 meters.
nodot (Optional[bool]):
If provided map image will be without dots.
Returns:
Map image as bytes.
Raises:
HEREError
"""
data = {
"z": zoom,
"apiKey": self._api_key,
}
if top_left and bottom_right:
data["bbox"] = str.format(
"{0},{1};{2},{3}",
top_left[0],
top_left[1],
bottom_right[0],
bottom_right[1],
)
if coordinates:
data["c"] = str.format("{0},{1}", coordinates[0], coordinates[1])
if city_name:
data["ci"] = city_name
if country_name:
data["co"] = country_name
if center:
data["ctr"] = str.format("{0},{1}", center[0], center[1])
if encoded_geo_coordinate:
data["e"] = encoded_geo_coordinate
if encoded_geo_center_coordinate:
data["ectr"] = encoded_geo_center_coordinate
if map_scheme:
data["t"] = map_scheme
if uncertainty:
data["u"] = uncertainty
if nodot:
data["nodot"] = None
if image_height:
data["h"] = image_height
if house_number:
data["n"] = house_number
data["f"] = image_format._value_
data["i"] = show_position
data["maxhits"] = maxhits
data["ml"] = label_language
if second_label_language:
data["ml2"] = second_label_language
url = Utils.build_url(self._base_url, extra_params=data)
response = requests.get(url, timeout=self._timeout)
if isinstance(response.content, bytes):
try:
json_data = json.loads(response.content.decode("utf8"))
if "error" in json_data:
error = self.__get_error_from_response(json_data)
raise error
except UnicodeDecodeError as err:
print("Map image downloaded")
return response.content
| 46.859459 | 169 | 0.626139 |
537a242af14ce39ccb8187adcc972fa34df8da77 | 19,190 | py | Python | tests/jobs/test_local_task_job.py | wegamekinglc/incubator-airflow | fc174635b0729253a86e8c877d6d8551a815a2cb | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2 | 2021-07-30T17:14:05.000Z | 2021-08-03T13:51:25.000Z | tests/jobs/test_local_task_job.py | wegamekinglc/incubator-airflow | fc174635b0729253a86e8c877d6d8551a815a2cb | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 4 | 2021-06-28T20:57:42.000Z | 2022-02-26T02:11:11.000Z | tests/jobs/test_local_task_job.py | wegamekinglc/incubator-airflow | fc174635b0729253a86e8c877d6d8551a815a2cb | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2021-03-03T01:44:08.000Z | 2021-03-03T01:44:08.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import multiprocessing
import os
import time
import unittest
import uuid
from multiprocessing import Lock, Value
from unittest import mock
from unittest.mock import patch
import pytest
from airflow import settings
from airflow.exceptions import AirflowException, AirflowFailException
from airflow.executors.sequential_executor import SequentialExecutor
from airflow.jobs.local_task_job import LocalTaskJob
from airflow.models.dag import DAG
from airflow.models.dagbag import DagBag
from airflow.models.taskinstance import TaskInstance
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python import PythonOperator
from airflow.task.task_runner.standard_task_runner import StandardTaskRunner
from airflow.utils import timezone
from airflow.utils.net import get_hostname
from airflow.utils.session import create_session
from airflow.utils.state import State
from airflow.utils.timeout import timeout
from tests.test_utils.asserts import assert_queries_count
from tests.test_utils.db import clear_db_jobs, clear_db_runs
from tests.test_utils.mock_executor import MockExecutor
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
TEST_DAG_FOLDER = os.environ['AIRFLOW__CORE__DAGS_FOLDER']
class TestLocalTaskJob(unittest.TestCase):
def setUp(self):
clear_db_jobs()
clear_db_runs()
patcher = patch('airflow.jobs.base_job.sleep')
self.addCleanup(patcher.stop)
self.mock_base_job_sleep = patcher.start()
def tearDown(self) -> None:
clear_db_jobs()
clear_db_runs()
def test_localtaskjob_essential_attr(self):
"""
Check whether essential attributes
of LocalTaskJob can be assigned with
proper values without intervention
"""
dag = DAG(
'test_localtaskjob_essential_attr', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'}
)
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(
run_id="test", state=State.SUCCESS, execution_date=DEFAULT_DATE, start_date=DEFAULT_DATE
)
ti = dr.get_task_instance(task_id=op1.task_id)
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
essential_attr = ["dag_id", "job_type", "start_date", "hostname"]
check_result_1 = [hasattr(job1, attr) for attr in essential_attr]
assert all(check_result_1)
check_result_2 = [getattr(job1, attr) is not None for attr in essential_attr]
assert all(check_result_2)
@patch('os.getpid')
def test_localtaskjob_heartbeat(self, mock_pid):
session = settings.Session()
dag = DAG('test_localtaskjob_heartbeat', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(
run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session,
)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = "blablabla"
session.commit()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
with pytest.raises(AirflowException):
job1.heartbeat_callback() # pylint: disable=no-value-for-parameter
mock_pid.return_value = 1
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.merge(ti)
session.commit()
job1.heartbeat_callback(session=None)
mock_pid.return_value = 2
with pytest.raises(AirflowException):
job1.heartbeat_callback() # pylint: disable=no-value-for-parameter
def test_heartbeat_failed_fast(self):
"""
Test that task heartbeat will sleep when it fails fast
"""
self.mock_base_job_sleep.side_effect = time.sleep
with create_session() as session:
dagbag = DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag_id = 'test_heartbeat_failed_fast'
task_id = 'test_heartbeat_failed_fast_op'
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
dag.create_dagrun(
run_id="test_heartbeat_failed_fast_run",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session,
)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.commit()
job = LocalTaskJob(task_instance=ti, executor=MockExecutor(do_update=False))
job.heartrate = 2
heartbeat_records = []
job.heartbeat_callback = lambda session: heartbeat_records.append(job.latest_heartbeat)
job._execute()
assert len(heartbeat_records) > 2
for i in range(1, len(heartbeat_records)):
time1 = heartbeat_records[i - 1]
time2 = heartbeat_records[i]
# Assert that difference small enough
delta = (time2 - time1).total_seconds()
assert abs(delta - job.heartrate) < 0.05
@pytest.mark.quarantined
def test_mark_success_no_kill(self):
"""
Test that ensures that mark_success in the UI doesn't cause
the task to fail, and that the task exits
"""
dagbag = DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_mark_success')
task = dag.get_task('task1')
session = settings.Session()
dag.clear()
dag.create_dagrun(
run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session,
)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True)
process = multiprocessing.Process(target=job1.run)
process.start()
ti.refresh_from_db()
for _ in range(0, 50):
if ti.state == State.RUNNING:
break
time.sleep(0.1)
ti.refresh_from_db()
assert State.RUNNING == ti.state
ti.state = State.SUCCESS
session.merge(ti)
session.commit()
process.join(timeout=10)
assert not process.is_alive()
ti.refresh_from_db()
assert State.SUCCESS == ti.state
def test_localtaskjob_double_trigger(self):
dagbag = DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_localtaskjob_double_trigger')
task = dag.get_task('test_localtaskjob_double_trigger_task')
session = settings.Session()
dag.clear()
dr = dag.create_dagrun(
run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session,
)
ti = dr.get_task_instance(task_id=task.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.merge(ti)
session.commit()
ti_run = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti_run.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti_run, executor=SequentialExecutor())
with patch.object(StandardTaskRunner, 'start', return_value=None) as mock_method:
job1.run()
mock_method.assert_not_called()
ti = dr.get_task_instance(task_id=task.task_id, session=session)
assert ti.pid == 1
assert ti.state == State.RUNNING
session.close()
@pytest.mark.quarantined
def test_localtaskjob_maintain_heart_rate(self):
dagbag = DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_localtaskjob_double_trigger')
task = dag.get_task('test_localtaskjob_double_trigger_task')
session = settings.Session()
dag.clear()
dag.create_dagrun(
run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session,
)
ti_run = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti_run.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti_run, executor=SequentialExecutor())
# this should make sure we only heartbeat once and exit at the second
# loop in _execute()
return_codes = [None, 0]
def multi_return_code():
return return_codes.pop(0)
time_start = time.time()
with patch.object(StandardTaskRunner, 'start', return_value=None) as mock_start:
with patch.object(StandardTaskRunner, 'return_code') as mock_ret_code:
mock_ret_code.side_effect = multi_return_code
job1.run()
assert mock_start.call_count == 1
assert mock_ret_code.call_count == 2
time_end = time.time()
assert self.mock_base_job_sleep.call_count == 1
assert job1.state == State.SUCCESS
# Consider we have patched sleep call, it should not be sleeping to
# keep up with the heart rate in other unpatched places
#
# We already make sure patched sleep call is only called once
assert time_end - time_start < job1.heartrate
session.close()
def test_mark_failure_on_failure_callback(self):
"""
Test that ensures that mark_failure in the UI fails
the task, and executes on_failure_callback
"""
# use shared memory value so we can properly track value change even if
# it's been updated across processes.
failure_callback_called = Value('i', 0)
task_terminated_externally = Value('i', 1)
def check_failure(context):
with failure_callback_called.get_lock():
failure_callback_called.value += 1
assert context['dag_run'].dag_id == 'test_mark_failure'
assert context['exception'] == "task marked as failed externally"
def task_function(ti):
with create_session() as session:
assert State.RUNNING == ti.state
ti.log.info("Marking TI as failed 'externally'")
ti.state = State.FAILED
session.merge(ti)
session.commit()
time.sleep(10)
# This should not happen -- the state change should be noticed and the task should get killed
with task_terminated_externally.get_lock():
task_terminated_externally.value = 0
with DAG(dag_id='test_mark_failure', start_date=DEFAULT_DATE) as dag:
task = PythonOperator(
task_id='test_state_succeeded1',
python_callable=task_function,
on_failure_callback=check_failure,
)
dag.clear()
with create_session() as session:
dag.create_dagrun(
run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session,
)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
with timeout(30):
# This should be _much_ shorter to run.
# If you change this limit, make the timeout in the callbable above bigger
job1.run()
ti.refresh_from_db()
assert ti.state == State.FAILED
assert failure_callback_called.value == 1
assert task_terminated_externally.value == 1
@patch('airflow.utils.process_utils.subprocess.check_call')
@patch.object(StandardTaskRunner, 'return_code')
def test_failure_callback_only_called_once(self, mock_return_code, _check_call):
"""
Test that ensures that when a task exits with failure by itself,
failure callback is only called once
"""
# use shared memory value so we can properly track value change even if
# it's been updated across processes.
failure_callback_called = Value('i', 0)
callback_count_lock = Lock()
def failure_callback(context):
with callback_count_lock:
failure_callback_called.value += 1
assert context['dag_run'].dag_id == 'test_failure_callback_race'
assert isinstance(context['exception'], AirflowFailException)
def task_function(ti):
raise AirflowFailException()
dag = DAG(dag_id='test_failure_callback_race', start_date=DEFAULT_DATE)
task = PythonOperator(
task_id='test_exit_on_failure',
python_callable=task_function,
on_failure_callback=failure_callback,
dag=dag,
)
dag.clear()
with create_session() as session:
dag.create_dagrun(
run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session,
)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
# Simulate race condition where job1 heartbeat ran right after task
# state got set to failed by ti.handle_failure but before task process
# fully exits. See _execute loop in airflow/jobs/local_task_job.py.
# In this case, we have:
# * task_runner.return_code() is None
# * ti.state == State.Failed
#
# We also need to set return_code to a valid int after job1.terminating
# is set to True so _execute loop won't loop forever.
def dummy_return_code(*args, **kwargs):
return None if not job1.terminating else -9
mock_return_code.side_effect = dummy_return_code
with timeout(10):
# This should be _much_ shorter to run.
# If you change this limit, make the timeout in the callbable above bigger
job1.run()
ti.refresh_from_db()
assert ti.state == State.FAILED # task exits with failure state
assert failure_callback_called.value == 1
def test_mark_success_on_success_callback(self):
"""
Test that ensures that where a task is marked suceess in the UI
on_success_callback gets executed
"""
# use shared memory value so we can properly track value change even if
# it's been updated across processes.
success_callback_called = Value('i', 0)
task_terminated_externally = Value('i', 1)
shared_mem_lock = Lock()
def success_callback(context):
with shared_mem_lock:
success_callback_called.value += 1
assert context['dag_run'].dag_id == 'test_mark_success'
dag = DAG(dag_id='test_mark_success', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'})
def task_function(ti):
# pylint: disable=unused-argument
time.sleep(60)
# This should not happen -- the state change should be noticed and the task should get killed
with shared_mem_lock:
task_terminated_externally.value = 0
task = PythonOperator(
task_id='test_state_succeeded1',
python_callable=task_function,
on_success_callback=success_callback,
dag=dag,
)
session = settings.Session()
dag.clear()
dag.create_dagrun(
run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session,
)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
job1.task_runner = StandardTaskRunner(job1)
settings.engine.dispose()
process = multiprocessing.Process(target=job1.run)
process.start()
for _ in range(0, 25):
ti.refresh_from_db()
if ti.state == State.RUNNING:
break
time.sleep(0.2)
assert ti.state == State.RUNNING
ti.state = State.SUCCESS
session.merge(ti)
session.commit()
process.join(timeout=10)
assert success_callback_called.value == 1
assert task_terminated_externally.value == 1
assert not process.is_alive()
@pytest.fixture()
def clean_db_helper():
yield
clear_db_jobs()
clear_db_runs()
@pytest.mark.usefixtures("clean_db_helper")
class TestLocalTaskJobPerformance:
@pytest.mark.parametrize("return_codes", [[0], 9 * [None] + [0]]) # type: ignore
@mock.patch("airflow.jobs.local_task_job.get_task_runner")
def test_number_of_queries_single_loop(self, mock_get_task_runner, return_codes):
unique_prefix = str(uuid.uuid4())
dag = DAG(dag_id=f'{unique_prefix}_test_number_of_queries', start_date=DEFAULT_DATE)
task = DummyOperator(task_id='test_state_succeeded1', dag=dag)
dag.clear()
dag.create_dagrun(run_id=unique_prefix, state=State.NONE)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
mock_get_task_runner.return_value.return_code.side_effects = return_codes
job = LocalTaskJob(task_instance=ti, executor=MockExecutor())
with assert_queries_count(13):
job.run()
| 36.552381 | 107 | 0.640594 |
b0955ed3adbc400bc6577378127da21167c9c583 | 867 | py | Python | injector/AFDProxyInjector.py | batteryshark/AfdProxy | 2a80a969a4caa197c3c8825e40ea8e8f0b855396 | [
"MIT"
] | 22 | 2019-07-04T14:13:36.000Z | 2020-10-14T12:27:26.000Z | injector/AFDProxyInjector.py | batteryshark/AfdProxy | 2a80a969a4caa197c3c8825e40ea8e8f0b855396 | [
"MIT"
] | null | null | null | injector/AFDProxyInjector.py | batteryshark/AfdProxy | 2a80a969a4caa197c3c8825e40ea8e8f0b855396 | [
"MIT"
] | 10 | 2019-12-24T05:05:31.000Z | 2020-04-23T22:21:45.000Z | # Redirection of AFD Sockets via Process Monitoring / Injection
import os
import time
from ProcessMonitor import ProcessMonitor
from ContextHijack import inject
# Get Our Socket Library Paths
base_path = os.path.dirname(os.path.abspath(__file__))
AFDPROXY_32_PATH = os.path.join(base_path, "deps", "afdproxy32.dll")
AFDPROXY_64_PATH = os.path.join(base_path, "deps", "afdproxy64.dll")
# Write our Injection Callback
def inject_afdproxy(proc_info):
inject([AFDPROXY_32_PATH, AFDPROXY_64_PATH, proc_info['pid']])
class SocketRedirector(object):
def __init__(self, targets, child_aware=False, full_paths=False):
self.ctx = ProcessMonitor(targets, inject_afdproxy, child_aware, full_paths)
def __del__(self):
del self.ctx
if __name__ == "__main__":
sr = SocketRedirector(["notepad.exe"], True, False)
time.sleep(20)
del sr
| 28.9 | 84 | 0.748558 |
85038ffb9a98a95270c95ef454a5ecac8e3233b8 | 3,437 | py | Python | tests/test_plugin_helper.py | jupyterhub/nbgitpuller-downloader-plugins | 646857507fb21a6f2a7222bdd7610d7184492e69 | [
"MIT"
] | null | null | null | tests/test_plugin_helper.py | jupyterhub/nbgitpuller-downloader-plugins | 646857507fb21a6f2a7222bdd7610d7184492e69 | [
"MIT"
] | 3 | 2021-12-21T01:55:15.000Z | 2022-01-19T14:06:32.000Z | tests/test_plugin_helper.py | jupyterhub/nbgitpuller-downloader-plugins | 646857507fb21a6f2a7222bdd7610d7184492e69 | [
"MIT"
] | 1 | 2021-12-21T00:00:00.000Z | 2021-12-21T00:00:00.000Z | import os
import pytest
import shutil
import nbgitpuller_downloader_plugins_util.plugin_helper as ph
from aioresponses import aioresponses
test_files_dir = os.getcwd() + "/tests/test_files"
archive_base = "/tmp/test_files"
repo_parent_dir = "/tmp/fake/"
temp_download_repo = "/tmp/download/"
temp_archive_download = "/tmp/archive_download/"
provider = "dropbox_test"
url = "http://test/this/repo"
CACHED_ORIGIN_NON_GIT_REPO = ".nbgitpuller/targets/"
origin_repo = f"{repo_parent_dir}{CACHED_ORIGIN_NON_GIT_REPO}{provider}/{url}/"
repo_zip = 'file://' + archive_base + ".zip"
repo_tgz = 'file://' + archive_base + ".tar.gz"
@pytest.fixture
async def test_configuration():
shutil.make_archive(archive_base, 'zip', test_files_dir)
shutil.make_archive(archive_base, 'gztar', test_files_dir)
os.makedirs(temp_archive_download, exist_ok=True)
os.makedirs(repo_parent_dir, exist_ok=True)
os.makedirs(temp_download_repo, exist_ok=True)
yield "test finishing"
os.remove(archive_base + ".zip")
os.remove(archive_base + ".tar.gz")
if os.path.isfile(temp_archive_download + "downloaded.zip"):
os.remove(temp_archive_download + "downloaded.zip")
shutil.rmtree(repo_parent_dir)
shutil.rmtree(temp_download_repo)
shutil.rmtree(temp_archive_download)
def test_extract_file_extension():
url = "https://example.org/master/materials-sp20-external.tgz"
ext = ph.extract_file_extension(url)
assert "tgz" in ext
@pytest.mark.asyncio
async def test_initialize_local_repo(test_configuration):
yield_str = ""
async for line in ph.initialize_local_repo(origin_repo):
yield_str += line
assert "init --bare" in yield_str
assert os.path.isdir(origin_repo)
@pytest.mark.asyncio
async def test_clone_local_origin_repo(test_configuration):
async for line in ph.initialize_local_repo(origin_repo):
pass
yield_str = ""
async for line in ph.clone_local_origin_repo(origin_repo, temp_download_repo):
yield_str += line
assert "Cloning into" in yield_str
assert os.path.isdir(temp_download_repo + ".git")
@pytest.mark.asyncio
async def test_execute_unarchive(test_configuration):
yield_str = ""
async for line in ph.execute_unarchive("zip", archive_base + ".zip", temp_download_repo):
yield_str += line
assert os.path.isfile("/tmp/download/test.txt")
@pytest.mark.asyncio
async def test_push_to_local_origin(test_configuration):
async for line in ph.initialize_local_repo(origin_repo):
pass
async for line in ph.clone_local_origin_repo(origin_repo, temp_download_repo):
pass
async for line in ph.execute_unarchive("zip", archive_base + ".zip", temp_download_repo):
pass
yield_str = ""
async for line in ph.push_to_local_origin(temp_download_repo):
yield_str += line
assert "[new branch]" in yield_str
@pytest.mark.asyncio
async def test_download_archive(test_configuration):
args = {}
args["repo"] = "http://example.org/mocked-download-url"
with aioresponses() as mocked:
mocked.get(args["repo"], status=200, body=b'Pretend you are zip file being downloaded')
yield_str = ""
async for line in ph.download_archive(args["repo"], temp_archive_download + "downloaded.zip"):
yield_str += line
assert 'Downloading archive' in yield_str
assert os.path.isfile(temp_archive_download + "downloaded.zip")
| 33.696078 | 102 | 0.731161 |
9b40b5c13c0f83ea813196c2f996b233409318ed | 3,690 | py | Python | aiozk/recipes/allocator.py | micro-fan/aiozk | 976a208b9cd648820c7edfa93e92082c6b1f4757 | [
"MIT"
] | 22 | 2019-08-21T11:39:21.000Z | 2021-08-18T11:21:23.000Z | aiozk/recipes/allocator.py | micro-fan/aiozk | 976a208b9cd648820c7edfa93e92082c6b1f4757 | [
"MIT"
] | 44 | 2019-08-12T08:39:57.000Z | 2021-10-08T19:38:46.000Z | aiozk/recipes/allocator.py | tipsi/aiozk | 976a208b9cd648820c7edfa93e92082c6b1f4757 | [
"MIT"
] | 15 | 2017-01-13T12:19:54.000Z | 2019-06-10T22:38:28.000Z | import asyncio
import collections
import itertools
import json
from .data_watcher import DataWatcher
from .party import Party
from .lock import Lock
from .recipe import Recipe
class Allocator(Recipe):
sub_recipes = {
"party": (Party, ["member_path", "name"]),
"lock": (Lock, ["lock_path"]),
"data_watcher": DataWatcher,
}
def __init__(self, base_path, name, allocator_fn=None):
self.name = name
super().__init__(base_path)
if allocator_fn is None:
allocator_fn = round_robin
self.allocator_fn = allocator_fn
self.active = False
self.full_allocation = collections.defaultdict(set)
self.full_set = set()
@property
def lock_path(self):
return self.base_path + "/lock"
@property
def member_path(self):
return self.base_path + "/members"
@property
def allocation(self):
return self.full_allocation[self.name]
def validate(self, new_allocation):
as_list = []
for subset in new_allocation.values():
as_list.extend(list(subset))
# make sure there are no duplicates among the subsets
assert len(as_list) == len(set(as_list)), (
"duplicate items found in allocation: %s" % self.full_allocation
)
# make sure there's no mismatch beween the full set and allocations
assert len(self.full_set.symmetric_difference(set(as_list))) == 0, (
"mismatch between full set and allocation: %s vs %s" % (
self.full_set, self.full_allocation
)
)
async def start(self):
self.active = True
await self.ensure_path()
await self.party.join()
self.data_watcher.add_callback(self.base_path, self.handle_data_change)
asyncio.create_task(self.monitor_member_changes())
async def add(self, new_item):
new_set = self.full_set.copy().add(new_item)
await self.update_set(new_set)
async def remove(self, new_item):
new_set = self.full_set.copy().remove(new_item)
await self.update_set(new_set)
async def update(self, new_items):
new_items = set(new_items)
data = json.dumps(list(new_items))
with (await self.lock.acquire()):
await self.client.set_data(self.base_path, data=data)
def monitor_member_changes(self):
while self.active:
yield self.party.wait_for_change()
if not self.active:
break
self.allocate()
def handle_data_change(self, new_set_data):
if new_set_data is None:
return
new_set_data = set(json.loads(new_set_data))
if new_set_data == self.full_set:
return
self.full_set = new_set_data
self.allocate()
def allocate(self):
new_allocation = self.allocator_fn(self.party.members, self.full_set)
self.validate(new_allocation)
self.full_allocation = new_allocation
async def stop(self):
await self.party.leave()
self.data_watcher.remove_callback(
self.base_path, self.handle_data_change
)
def round_robin(members, items):
"""
Default allocator with a round robin approach.
In this algorithm, each member of the group is cycled over and given an
item until there are no items left. This assumes roughly equal capacity
for each member and aims for even distribution of item counts.
"""
allocation = collections.defaultdict(set)
for member, item in zip(itertools.cycle(members), items):
allocation[member].add(item)
return allocation
| 27.333333 | 79 | 0.639024 |
b908df1e39f0bf722671ef39bee447aea42b3c3f | 1,355 | py | Python | trieste/models/gpflow/config.py | vishalbelsare/trieste | d25b66becd0cac47574b0f8e80bc28301aa0bf98 | [
"Apache-2.0"
] | 1 | 2021-10-02T19:53:48.000Z | 2021-10-02T19:53:48.000Z | trieste/models/gpflow/config.py | vishalbelsare/trieste | d25b66becd0cac47574b0f8e80bc28301aa0bf98 | [
"Apache-2.0"
] | null | null | null | trieste/models/gpflow/config.py | vishalbelsare/trieste | d25b66becd0cac47574b0f8e80bc28301aa0bf98 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import Any, Dict, Type
from gpflow.models import GPR, SGPR, SVGP, VGP
from ..config import ModelRegistry
from ..interfaces import TrainableProbabilisticModel
from .models import GaussianProcessRegression, SparseVariational, VariationalGaussianProcess
# Here we list all the GPflow models currently supported by model interfaces
# and optimizers, and register them for usage with ModelConfig.
_SUPPORTED_MODELS: Dict[Type[Any], Type[TrainableProbabilisticModel]] = {
GPR: GaussianProcessRegression,
SGPR: GaussianProcessRegression,
VGP: VariationalGaussianProcess,
SVGP: SparseVariational,
}
for model_type, wrapper in _SUPPORTED_MODELS.items():
ModelRegistry.register_model(model_type, wrapper)
| 38.714286 | 92 | 0.791882 |
ea5879446b2334c9ee659fd280780513b1222a0e | 411 | py | Python | services/sound.py | BinderEq/space-english | 9d9fa281c945812099a728ae01c8ba0df128142e | [
"BSD-3-Clause"
] | null | null | null | services/sound.py | BinderEq/space-english | 9d9fa281c945812099a728ae01c8ba0df128142e | [
"BSD-3-Clause"
] | null | null | null | services/sound.py | BinderEq/space-english | 9d9fa281c945812099a728ae01c8ba0df128142e | [
"BSD-3-Clause"
] | null | null | null | class Sound:
EXPL = 0
TAKE = 1
SHOOT = 2
def __init__(self, pygame):
pygame.mixer.music.set_volume(0.8)
self.sounds = []
self.sounds.append(pygame.mixer.Sound("sound/explosion.mp3"))
self.sounds.append(pygame.mixer.Sound("sound/take.mp3"))
self.sounds.append(pygame.mixer.Sound("sound/shoot.mp3"))
def play(self, num):
self.sounds[num].play()
| 31.615385 | 69 | 0.618005 |
b424cbbe17d5c4ac3e8b1e61976170b57442c8ab | 10,307 | py | Python | downstream/TextSGC/build_graph.py | GOOGLE-M/SGC | 78ad8d02b80808302e38559e2d0f430f66a809bd | [
"MIT"
] | null | null | null | downstream/TextSGC/build_graph.py | GOOGLE-M/SGC | 78ad8d02b80808302e38559e2d0f430f66a809bd | [
"MIT"
] | null | null | null | downstream/TextSGC/build_graph.py | GOOGLE-M/SGC | 78ad8d02b80808302e38559e2d0f430f66a809bd | [
"MIT"
] | null | null | null | import argparse
import itertools
import pickle as pkl
import random
from collections import Counter
from math import log
import numpy as np
import scipy.sparse as sp
from tqdm import tqdm
parser = argparse.ArgumentParser(description='Build Document Graph')
parser.add_argument('--dataset', type=str, default='20ng',
choices=['20ng', 'R8', 'R52', 'ohsumed', 'mr', 'yelp', 'ag_news'],
help='dataset name')
parser.add_argument('--embedding_dim', type=int, default=300,
help='word and document embedding size.')
args = parser.parse_args()
# build corpus
dataset = args.dataset
word_embeddings_dim = args.embedding_dim
word_vector_map = {} # TODO: modify this to use embedding
doc_name_list = []
train_val_ids = []
test_ids = []
label_names = set()
train_val_labels = []
test_labels = []
with open('data/' + dataset + '.txt', 'r') as f:
lines = f.readlines()
for id, line in enumerate(lines):
doc_name_list.append(line.strip())
_, data_name, data_label = line.strip().split("\t")
if data_name.find('test') != -1:
test_ids.append(id)
elif data_name.find('train') != -1:
train_val_ids.append(id)
label_names.add(data_label)
label_names = list(label_names)
label_names_to_index = {name: i for i, name in enumerate(label_names)}
for id, line in enumerate(lines):
_, data_name, data_label_name = line.strip().split("\t")
if data_name.find('test') != -1:
test_labels.append(label_names_to_index[data_label_name])
elif data_name.find('train') != -1:
train_val_labels.append(label_names_to_index[data_label_name])
with open('data/corpus/' + dataset + '_labels.txt', 'w') as f:
f.write('\n'.join(label_names))
print("Loaded labels and indices")
# Get document content, after removed words
doc_content_list = []
with open('data/corpus/' + dataset + '.clean.txt', 'r') as f:
lines = f.readlines()
doc_content_list = [l.strip() for l in lines]
print("Loaded document content")
# Build vocab
word_freq = Counter()
progress_bar = tqdm(doc_content_list)
progress_bar.set_postfix_str("building vocabulary")
for doc_words in progress_bar:
words = doc_words.split()
word_freq.update(words)
vocab, _ = zip(*word_freq.most_common())
# put words after documents
word_id_map = dict(zip(vocab, np.array(range(len(vocab))) + len(train_val_ids + test_ids)))
vocab_size = len(vocab)
with open('data/corpus/' + dataset + '_vocab.txt', 'w') as f:
vocab_str = '\n'.join(vocab)
f.write(vocab_str)
# split training and validation
idx = list(range(len(train_val_labels)))
random.shuffle(idx)
train_val_ids = [train_val_ids[i] for i in idx]
train_val_labels = [train_val_labels[i] for i in idx]
idx = list(range(len(test_labels)))
random.shuffle(idx)
test_ids = [test_ids[i] for i in idx]
test_labels = [test_labels[i] for i in idx]
train_val_size = len(train_val_ids)
val_size = int(0.1 * train_val_size)
train_size = train_val_size - val_size
train_ids, val_ids = train_val_ids[:train_size], train_val_ids[train_size:]
train_labels, val_labels = train_val_labels[:train_size], train_val_labels[train_size:]
# Construct feature vectors
def average_word_vec(doc_id, doc_content_list, word_to_vector):
doc_vec = np.array([0.0 for k in range(word_embeddings_dim)])
doc_words = doc_content_list[doc_id]
words = doc_words.split()
for word in words:
if word in word_vector_map:
word_vector = word_vector_map[word]
doc_vec = doc_vec + np.array(word_vector)
doc_vec /= len(words)
return doc_vec
def construct_feature_label_matrix(doc_ids, doc_content_list, word_vector_map):
row_x = []
col_x = []
data_x = []
for i, doc_id in enumerate(doc_ids):
doc_vec = average_word_vec(doc_id, doc_content_list, word_vector_map)
for j in range(word_embeddings_dim):
row_x.append(i)
col_x.append(j)
data_x.append(doc_vec[j])
x = sp.csr_matrix((data_x, (row_x, col_x)), shape=(
real_train_size, word_embeddings_dim))
y = []
for label in train_labels:
one_hot = [0 for l in range(len(label_list))]
one_hot[label] = 1
y.append(one_hot)
y = np.array(y)
return x, y
# not used
# train_x, train_y = construct_feature_label_matrix(train_ids, doc_content_list, word_vector_map)
# val_x, val_y = construct_feature_label_matrix(val_ids, doc_content_list, word_vector_map)
# test_x, test_y = construct_feature_label_matrix(test_ids, doc_content_list, word_vector_map)
print("Finish building feature vectors")
# Creating word and word edges
def create_window(seq, n=2):
"""Returns a sliding window (of width n) over data from the iterable,
code taken from https://docs.python.org/release/2.3.5/lib/itertools-example.html"""
it = iter(seq)
result = tuple(itertools.islice(it, n))
if len(result) == n:
yield result
for elem in it:
result = result[1:] + (elem,)
yield result
# word co-occurence with context windows
def construct_context_windows(ids, doc_words_list, window_size=20):
windows = []
for id in ids:
doc_words = doc_content_list[id]
words = doc_words.split()
length = len(words)
if length <= window_size:
windows.append(words)
else:
windows += list(create_window(words, window_size))
return windows
def count_word_window_freq(windows):
word_window_freq = Counter()
progress_bar = tqdm(windows)
progress_bar.set_postfix_str("constructing context window")
for window in progress_bar:
word_window_freq.update(set(window))
return word_window_freq
def count_word_pair_count(windows):
word_pair_count = Counter()
progress_bar = tqdm(windows)
progress_bar.set_postfix_str("counting word pair frequency")
for window in progress_bar:
word_pairs = list(itertools.permutations(window, 2))
word_pair_count.update(word_pairs)
return word_pair_count
def build_word_word_graph(num_window, word_id_map, word_window_freq, word_pair_count):
row = []
col = []
weight = []
# pmi as weights
for pair, count in word_pair_count.items():
i, j = pair
word_freq_i = word_window_freq[i]
word_freq_j = word_window_freq[j]
pmi = log((1.0 * count / num_window) /
(1.0 * word_freq_i * word_freq_j / (num_window * num_window)))
if pmi <= 0:
continue
row.append(word_id_map[i])
col.append(word_id_map[j])
weight.append(pmi)
return row, col, weight
def calc_word_doc_freq(ids, doc_content_list):
# Count number of documents that contain a word
word_doc_list = {} # mapping from word to document id
word_doc_freq = Counter()
for doc_id in ids:
doc_words = doc_content_list[doc_id]
words = set(doc_words.split())
word_doc_freq.update(words)
return word_doc_freq
def calc_doc_word_freq(ids, doc_content_list):
doc_word_freq = Counter()
for doc_id in ids:
doc_words = doc_content_list[doc_id]
words = doc_words.split()
word_ids = [word_id_map[word] for word in words]
doc_word_pairs = zip([doc_id for _ in word_ids], word_ids)
doc_word_freq.update(doc_word_pairs)
return doc_word_freq
def build_doc_word_graph(ids, doc_words_list, doc_word_freq, word_doc_freq, phase='B'):
row = []
col = []
weight = []
for i, doc_id in enumerate(ids):
doc_words = doc_words_list[doc_id]
words = set(doc_words.split())
doc_word_set = set()
for word in words:
word_id = word_id_map[word]
key = (doc_id, word_id)
freq = doc_word_freq[key]
idf = log(1.0 * len(ids) /
word_doc_freq[word])
w = freq * idf
if phase == "B":
row.append(doc_id)
col.append(word_id)
weight.append(w)
elif phase == "C":
row.append(word_id)
col.append(doc_id)
weight.append(w)
else:
raise ValueError("wrong phase")
return row, col, weight
def concat_graph(*args):
rows, cols, weights = zip(*args)
row = list(itertools.chain(*rows))
col = list(itertools.chain(*cols))
weight = list(itertools.chain(*weights))
return row, col, weight
def export_graph(graph, node_size, phase=""):
row, col, weight = graph
adj = sp.csr_matrix(
(weight, (row, col)), shape=(node_size, node_size))
if phase == "":
path = "data/ind.{}.adj".format(dataset)
else:
path = "data/ind.{}.{}.adj".format(dataset, phase)
with open(path, 'wb') as f:
pkl.dump(adj, f)
ids = train_val_ids + test_ids
windows = construct_context_windows(ids, doc_content_list)
word_window_freq = count_word_window_freq(windows)
word_pair_count = count_word_pair_count(windows)
D = build_word_word_graph(len(windows), word_id_map, word_window_freq, word_pair_count)
doc_word_freq = calc_doc_word_freq(ids, doc_content_list)
word_doc_freq = calc_word_doc_freq(ids, doc_content_list)
B = build_doc_word_graph(ids, doc_content_list, doc_word_freq, word_doc_freq, phase="B")
C = build_doc_word_graph(ids, doc_content_list, doc_word_freq, word_doc_freq, phase="C")
node_size = len(vocab) + len(train_val_ids) + len(test_ids)
export_graph(concat_graph(B, C, D), node_size, phase="BCD")
export_graph(concat_graph(B, C), node_size, phase="BC")
export_graph(concat_graph(B, D), node_size, phase="BD")
export_graph(B, node_size, phase="B")
# dump objects
f = open("data/ind.{}.{}.x".format(dataset, "train"), 'wb')
pkl.dump(train_ids, f)
f.close()
f = open("data/ind.{}.{}.y".format(dataset, "train"), 'wb')
pkl.dump(train_labels, f)
f.close()
f = open("data/ind.{}.{}.x".format(dataset, "val"), 'wb')
pkl.dump(val_ids, f)
f.close()
f = open("data/ind.{}.{}.y".format(dataset, "val"), 'wb')
pkl.dump(val_labels, f)
f.close()
f = open("data/ind.{}.{}.x".format(dataset, "test"), 'wb')
pkl.dump(test_ids, f)
f.close()
f = open("data/ind.{}.{}.y".format(dataset, "test"), 'wb')
pkl.dump(test_labels, f)
f.close()
| 32.514196 | 97 | 0.665373 |
b8e46c09e6c8417542ff54c0a7f7f94371f91495 | 784 | py | Python | tests/data_sources/optical_flow/test_optical_flow_model.py | JanEbbing/nowcasting_dataset | f907054a457987e6f6dbb13bfb65fc5359c6f680 | [
"MIT"
] | 15 | 2021-07-24T09:54:13.000Z | 2022-02-01T10:14:28.000Z | tests/data_sources/optical_flow/test_optical_flow_model.py | JanEbbing/nowcasting_dataset | f907054a457987e6f6dbb13bfb65fc5359c6f680 | [
"MIT"
] | 455 | 2021-06-11T10:37:49.000Z | 2022-03-24T14:51:47.000Z | tests/data_sources/optical_flow/test_optical_flow_model.py | JanEbbing/nowcasting_dataset | f907054a457987e6f6dbb13bfb65fc5359c6f680 | [
"MIT"
] | 10 | 2021-08-09T16:17:57.000Z | 2022-03-23T00:19:17.000Z | """Test Optical Flow model."""
import os
import tempfile
import numpy as np
import pytest
from nowcasting_dataset.data_sources.fake.batch import optical_flow_fake
from nowcasting_dataset.data_sources.optical_flow.optical_flow_model import OpticalFlow
def test_optical_flow_init(): # noqa: D103
_ = optical_flow_fake()
def test_optical_flow_validation(): # noqa: D103
sat = optical_flow_fake()
OpticalFlow.model_validation(sat)
sat.data[0, 0] = np.nan
with pytest.raises(Exception):
OpticalFlow.model_validation(sat)
def test_optical_flow_save(): # noqa: D103
with tempfile.TemporaryDirectory() as dirpath:
optical_flow_fake().save_netcdf(path=dirpath, batch_i=0)
assert os.path.exists(f"{dirpath}/opticalflow/000000.nc")
| 24.5 | 87 | 0.751276 |
0ae03516bafbb729bd8b13f84caf8ac143aeeed6 | 7,007 | py | Python | hybridbackend/tensorflow/data/parquet_dataset_v1.py | alibaba/HybridBackend | 498f74038fbc3be4ab1de6a8c3c2ef99f39af5e3 | [
"Apache-2.0"
] | 38 | 2021-12-01T06:54:36.000Z | 2022-03-23T11:23:21.000Z | hybridbackend/tensorflow/data/parquet_dataset_v1.py | alibaba/HybridBackend | 498f74038fbc3be4ab1de6a8c3c2ef99f39af5e3 | [
"Apache-2.0"
] | 15 | 2021-12-01T09:15:26.000Z | 2022-03-28T02:49:21.000Z | hybridbackend/tensorflow/data/parquet_dataset_v1.py | alibaba/HybridBackend | 498f74038fbc3be4ab1de6a8c3c2ef99f39af5e3 | [
"Apache-2.0"
] | 8 | 2021-12-02T01:16:14.000Z | 2022-01-28T04:51:16.000Z | # Copyright 2021 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
r'''Dataset that reads Parquet files.
This class is compatible with TensorFlow 1.12.
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.util import nest
from hybridbackend.tensorflow.data.parquet import parquet_fields
from hybridbackend.tensorflow.data.parquet import parquet_filenames_and_fields
from hybridbackend.tensorflow.pywrap import _ops
class _ParquetDatasetV1(dataset_ops.Dataset):
r'''A Parquet Dataset that reads batches from parquet files.
'''
def __init__(
self, filename, batch_size, fields,
partition_count=1,
partition_index=0,
drop_remainder=False):
r'''Create a `ParquetDataset`.
Args:
filename: A 0-D `tf.string` tensor containing one filename.
batch_size: Maxium number of samples in an output batch.
fields: List of DataFrame fields.
partition_count: (Optional.) Count of row group partitions.
partition_index: (Optional.) Index of row group partitions.
drop_remainder: (Optional.) If True, only keep batches with exactly
`batch_size` samples.
'''
self._filename = ops.convert_to_tensor(
filename, dtype=dtypes.string, name='filename')
self._batch_size = ops.convert_to_tensor(
batch_size, dtype=dtypes.int64, name='batch_size')
self._fields = fields
self._output_classes = {f.name: f.output_classes for f in self._fields}
self._output_types = {f.name: f.output_types for f in self._fields}
self._output_shapes = {f.name: f.output_shapes for f in self._fields}
self._field_names = nest.flatten({f.name: f.name for f in self._fields})
self._field_dtypes = nest.flatten({f.name: f.dtype for f in self._fields})
self._field_ragged_ranks = nest.flatten(
{f.name: f.ragged_rank for f in self._fields})
self._field_shapes = nest.flatten({f.name: f.shape for f in self._fields})
self._partition_count = partition_count
self._partition_index = partition_index
self._drop_remainder = drop_remainder
super().__init__()
def _as_variant_tensor(self):
return _ops.parquet_tabular_dataset(
self._filename,
self._batch_size,
field_names=self._field_names,
field_dtypes=self._field_dtypes,
field_ragged_ranks=self._field_ragged_ranks,
field_shapes=self._field_shapes,
partition_count=self._partition_count,
partition_index=self._partition_index,
drop_remainder=self._drop_remainder)
def _inputs(self):
return []
@property
def output_classes(self):
return self._output_classes
@property
def output_types(self):
return self._output_types
@property
def output_shapes(self):
return self._output_shapes
class ParquetDatasetV1(dataset_ops.Dataset):
r'''A Parquet Dataset that reads batches from parquet files.
'''
VERSION = 2001
@classmethod
def read_schema(cls, filename, fields=None, lower=False):
r'''Read schema from a parquet file.
Args:
filename: Path of the parquet file.
fields: Existing field definitions or field names.
lower: Convert field name to lower case if not found.
Returns:
Field definition list.
'''
return parquet_fields(filename, fields, lower=lower)
def __init__(
self, filenames,
batch_size=1,
fields=None,
partition_count=1,
partition_index=0,
drop_remainder=False,
num_parallel_reads=None,
num_sequential_reads=1):
r'''Create a `ParquetDataset`.
Args:
filenames: A 0-D or 1-D `tf.string` tensor containing one or more
filenames.
batch_size: (Optional.) Maxium number of samples in an output batch.
fields: (Optional.) List of DataFrame fields.
partition_count: (Optional.) Count of row group partitions.
partition_index: (Optional.) Index of row group partitions.
drop_remainder: (Optional.) If True, only keep batches with exactly
`batch_size` samples.
num_parallel_reads: (Optional.) A `tf.int64` scalar representing the
number of files to read in parallel. Defaults to reading files
sequentially.
num_sequential_reads: (Optional.) A `tf.int64` scalar representing the
number of batches to read in sequential. Defaults to 1.
'''
filenames, self._fields = parquet_filenames_and_fields(filenames, fields)
self._partition_count = partition_count
self._partition_index = partition_index
self._drop_remainder = drop_remainder
def _create_dataset(f):
f = ops.convert_to_tensor(f, dtypes.string, name='filename')
return _ParquetDatasetV1(
f, batch_size,
fields=self._fields,
partition_count=self._partition_count,
partition_index=self._partition_index,
drop_remainder=self._drop_remainder)
self._impl = self._build_dataset(
_create_dataset, filenames, num_parallel_reads, num_sequential_reads)
super().__init__()
@property
def fields(self):
return self._fields
@property
def partition_count(self):
return self._partition_count
@property
def partition_index(self):
return self._partition_index
@property
def drop_remainder(self):
return self._drop_remainder
def _as_variant_tensor(self):
return self._impl._as_variant_tensor() # pylint: disable=protected-access
def _inputs(self):
return self._impl._inputs() # pylint: disable=protected-access
@property
def output_shapes(self):
return self._impl.output_shapes
@property
def output_types(self):
return self._impl.output_types
@property
def output_classes(self):
return self._impl.output_classes
def _build_dataset(
self, dataset_creator, filenames,
num_parallel_reads=None, num_sequential_reads=1):
r'''Internal method to create a `ParquetDataset`.
'''
if num_parallel_reads is None:
return filenames.flat_map(dataset_creator)
return filenames.interleave(
dataset_creator,
cycle_length=num_parallel_reads if num_parallel_reads > 0 else 1,
block_length=num_sequential_reads,
num_parallel_calls=num_parallel_reads)
| 33.526316 | 79 | 0.720565 |
3caf400f0fe73470fa0384a4edbc0120e059cb63 | 4,699 | py | Python | Project/KNN_weighted_and_unweighted_7x7.py | TOBEKNOWNABBAS/AI106394 | 51aa967ab63f9cc7fc64f7b9017d23f70bd5cfe7 | [
"MIT"
] | null | null | null | Project/KNN_weighted_and_unweighted_7x7.py | TOBEKNOWNABBAS/AI106394 | 51aa967ab63f9cc7fc64f7b9017d23f70bd5cfe7 | [
"MIT"
] | null | null | null | Project/KNN_weighted_and_unweighted_7x7.py | TOBEKNOWNABBAS/AI106394 | 51aa967ab63f9cc7fc64f7b9017d23f70bd5cfe7 | [
"MIT"
] | null | null | null | import numpy as np
import sklearn as sk
import pandas as pd
from sklearn.naive_bayes import MultinomialNB
from sklearn.model_selection import train_test_split
from sklearn import svm
from sklearn.svm import SVC
from sklearn import metrics
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LinearRegression
from sklearn.metrics import classification_report, accuracy_score
import math
#function to perform convolution
def convolve2D(image, filter):
fX, fY = filter.shape # Get filter dimensions
fNby2 = (fX//2)
n = 28
nn = n - (fNby2 *2) #new dimension of the reduced image size
newImage = np.zeros((nn,nn)) #empty new 2D imange
for i in range(0,nn):
for j in range(0,nn):
newImage[i][j] = np.sum(image[i:i+fX, j:j+fY]*filter)//25
return newImage
#Read Data from CSV
train = pd.read_csv("train.csv")
X = train.drop('label',axis=1)
Y = train['label']
# print(X)
#Create Filter for convolution
filter = np.array([[1,1,1,1,1,1,1],
[1,1,1,1,1,1,1],
[1,1,1,1,1,1,1],
[1,1,1,1,1,1,1],
[1,1,1,1,1,1,1],
[1,1,1,1,1,1,1],
[1,1,1,1,1,1,1]])
#convert from dataframe to numpy array
X = X.to_numpy()
print(X.shape)
#new array with reduced number of features to store the small size images
sX = np.empty((0,484), int)
# img = X[6]
ss = 500 #subset size for dry runs change to 42000 to run on whole data
#Perform convolve on all images
for img in X[0:ss,:]:
img2D = np.reshape(img, (28,28))
# print(img2D.shape)
# print(img2D)
nImg = convolve2D(img2D,filter)
# print(nImg.shape)
# print(nImg)
nImg1D = np.reshape(nImg, (-1,484))
# print(nImg.shape)
sX = np.append(sX, nImg1D, axis=0)
Y = Y.to_numpy()
sY = Y[0:ss]
# print(sY)
print(sY.shape)
print(sX.shape)
sXTrain, sXTest, yTrain, yTest = train_test_split(sX,sY,test_size=0.2,random_state=0)
print(sXTest.shape,", ",yTest.shape)
print(sXTrain.shape,", ",yTrain.shape)
classifier = KNeighborsClassifier(n_neighbors=7,p=2,metric='euclidean')
classifier.(sXTrain,yTrain)
Y_pred = classifier.predict(sXTest)
print(classification_report(yTest,Y_pred))
print(accuracy_score(yTest,Y_pred))
import numpy as np
import sklearn as sk
import pandas as pd
from sklearn.naive_bayes import MultinomialNB
from sklearn.model_selection import train_test_split
from sklearn import svm
from sklearn.svm import SVC
from sklearn import metrics
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report, accuracy_score
import math
#function to perform convolution
def convolve2D(image, filter):
fX, fY = filter.shape # Get filter dimensions
fNby2 = (fX//2)
n = 28
nn = n - (fNby2 *2) #new dimension of the reduced image size
newImage = np.zeros((nn,nn)) #empty new 2D imange
for i in range(0,nn):
for j in range(0,nn):
newImage[i][j] = np.sum(image[i:i+fX, j:j+fY]*filter)//25
return newImage
#Read Data from CSV
train = pd.read_csv("train.csv")
X = train.drop('label',axis=1)
Y = train['label']
# print(X)
#Create Filter for convolution
filter = np.array([[1,1,1,1,1,1,1],
[1,2,2,2,2,2,1],
[1,2,3,3,3,2,1],
[1,2,3,4,3,2,1],
[1,2,3,3,3,2,1],
[1,2,2,2,2,2,1],
[1,1,1,1,1,1,1]])
#convert from dataframe to numpy array
X = X.to_numpy()
print(X.shape)
#new array with reduced number of features to store the small size images
sX = np.empty((0,484), int)
# img = X[6]
ss = 500 #subset size for dry runs change to 42000 to run on whole data
#Perform convolve on all images
for img in X[0:ss,:]:
img2D = np.reshape(img, (28,28))
# print(img2D.shape)
# print(img2D)
nImg = convolve2D(img2D,filter)
# print(nImg.shape)
# print(nImg)
nImg1D = np.reshape(nImg, (-1,484))
# print(nImg.shape)
sX = np.append(sX, nImg1D, axis=0)
Y = Y.to_numpy()
sY = Y[0:ss]
# print(sY)
print(sY.shape)
print(sX.shape)
sXTrain, sXTest, yTrain, yTest = train_test_split(sX,sY,test_size=0.2,random_state=0)
print(sXTest.shape,", ",yTest.shape)
print(sXTrain.shape,", ",yTrain.shape)
classifier = KNeighborsClassifier(n_neighbors=7,p=2,metric='euclidean')
classifier.fit(sXTrain,yTrain)
Y_pred = classifier.predict(sXTest)
print(classification_report(yTest,Y_pred))
print(accuracy_score(yTest,Y_pred))
| 28.652439 | 86 | 0.679081 |
45ad0787c7446ecafa8fb41d5e424b8af4f2ea15 | 1,647 | py | Python | dcf/compounding.py | sonntagsgesicht/dcf | 538e20ac5d4fe4762ee2762029e83443251dcfd6 | [
"Apache-2.0"
] | 16 | 2019-09-02T17:09:48.000Z | 2021-12-18T21:25:16.000Z | dcf/compounding.py | sonntagsgesicht/dcf | 538e20ac5d4fe4762ee2762029e83443251dcfd6 | [
"Apache-2.0"
] | null | null | null | dcf/compounding.py | sonntagsgesicht/dcf | 538e20ac5d4fe4762ee2762029e83443251dcfd6 | [
"Apache-2.0"
] | 4 | 2020-03-12T22:49:47.000Z | 2021-11-24T13:12:43.000Z | # -*- coding: utf-8 -*-
# dcf
# ---
# A Python library for generating discounted cashflows.
#
# Author: sonntagsgesicht, based on a fork of Deutsche Postbank [pbrisk]
# Version: 0.5, copyright Sunday, 21 November 2021
# Website: https://github.com/sonntagsgesicht/dcf
# License: Apache License 2.0 (see LICENSE file)
import math
def simple_compounding(rate_value, maturity_value):
return 1.0 / (1.0 + rate_value * maturity_value)
def simple_rate(df, period_fraction):
return (1.0 / df - 1.0) / period_fraction
def continuous_compounding(rate_value, maturity_value):
return math.exp(-1.0 * rate_value * maturity_value)
def continuous_rate(df, period_fraction):
if not df:
pass
return -math.log(df) / period_fraction
def periodic_compounding(rate_value, maturity_value, period_value):
return math.pow(1.0 + float(rate_value) / period_value, -period_value * maturity_value)
def periodic_rate(df, period_fraction, frequency):
return (math.pow(df, -1.0 / (period_fraction * frequency)) - 1.0) * frequency
def annually_compounding(rate_value, maturity_value):
return periodic_compounding(rate_value, maturity_value, 1)
def semi_compounding(rate_value, maturity_value):
return periodic_compounding(rate_value, maturity_value, 2)
def quarterly_compounding(rate_value, maturity_value):
return periodic_compounding(rate_value, maturity_value, 4)
def monthly_compounding(rate_value, maturity_value):
return periodic_compounding(rate_value, maturity_value, 12)
def daily_compounding(rate_value, maturity_value):
return periodic_compounding(rate_value, maturity_value, 365)
| 27.45 | 91 | 0.757134 |
9a750e2c7ebf57902dc990ad92c309c637711bf0 | 719 | py | Python | source/mit_experiments/search_oneproc.py | jaesikchoi/gpss-research | 2a64958a018f1668f7b8eedf33c4076a63af7868 | [
"MIT"
] | 151 | 2015-01-09T19:25:05.000Z | 2022-01-05T02:05:52.000Z | source/mit_experiments/search_oneproc.py | jaesikchoi/gpss-research | 2a64958a018f1668f7b8eedf33c4076a63af7868 | [
"MIT"
] | 1 | 2016-08-04T13:12:51.000Z | 2016-08-04T13:12:51.000Z | source/mit_experiments/search_oneproc.py | jaesikchoi/gpss-research | 2a64958a018f1668f7b8eedf33c4076a63af7868 | [
"MIT"
] | 59 | 2015-02-04T19:13:58.000Z | 2021-07-28T23:36:09.000Z | import os
import base
import config
import mit_job_controller as mjc
class Scheduler:
def evaluate_kernels(self, kernels, X, y):
scored_kernels = []
for i, k in enumerate(kernels):
print 'Evaluating %d of %d...' % (i+1, len(kernels))
sk = mjc.evaluate_kernel(k, X, y)
scored_kernels.append(sk)
return scored_kernels
def run(data_name, max_depth=3, params=None):
if not os.path.exists(config.TEMP_PATH):
os.mkdir(config.TEMP_PATH)
X, y = base.load_data(data_name)
scheduler = Scheduler()
if params is None:
params = base.SearchParams.default()
base.perform_search(X, y, scheduler, max_depth, params, verbose=True)
| 26.62963 | 73 | 0.649513 |
c3c41db2f56ae8085f3d86ff6647fedca539d403 | 2,578 | py | Python | sleekxmpp/plugins/xep_0257/stanza.py | E-Tahta/sleekxmpp | ed067c9412835c5fe44bf203936262bcec09ced4 | [
"BSD-3-Clause"
] | 499 | 2015-01-04T21:45:16.000Z | 2022-02-14T13:04:08.000Z | sleekxmpp/plugins/xep_0257/stanza.py | numanturle/SleekXMPP | 1aeefd88accf45947c6376e9fac3abae9cbba8aa | [
"BSD-3-Clause"
] | 159 | 2015-01-02T19:09:47.000Z | 2020-02-12T08:29:54.000Z | sleekxmpp/plugins/xep_0257/stanza.py | numanturle/SleekXMPP | 1aeefd88accf45947c6376e9fac3abae9cbba8aa | [
"BSD-3-Clause"
] | 209 | 2015-01-07T16:23:16.000Z | 2022-01-26T13:02:20.000Z | """
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2012 Nathanael C. Fritz, Lance J.T. Stout
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
from sleekxmpp.xmlstream import ElementBase, ET, register_stanza_plugin
class Certs(ElementBase):
name = 'items'
namespace = 'urn:xmpp:saslcert:1'
plugin_attrib = 'sasl_certs'
interfaces = set()
class CertItem(ElementBase):
name = 'item'
namespace = 'urn:xmpp:saslcert:1'
plugin_attrib = 'item'
plugin_multi_attrib = 'items'
interfaces = set(['name', 'x509cert', 'users'])
sub_interfaces = set(['name', 'x509cert'])
def get_users(self):
resources = self.xml.findall('{%s}users/{%s}resource' % (
self.namespace, self.namespace))
return set([res.text for res in resources])
def set_users(self, values):
users = self.xml.find('{%s}users' % self.namespace)
if users is None:
users = ET.Element('{%s}users' % self.namespace)
self.xml.append(users)
for resource in values:
res = ET.Element('{%s}resource' % self.namespace)
res.text = resource
users.append(res)
def del_users(self):
users = self.xml.find('{%s}users' % self.namespace)
if users is not None:
self.xml.remove(users)
class AppendCert(ElementBase):
name = 'append'
namespace = 'urn:xmpp:saslcert:1'
plugin_attrib = 'sasl_cert_append'
interfaces = set(['name', 'x509cert', 'cert_management'])
sub_interfaces = set(['name', 'x509cert'])
def get_cert_management(self):
manage = self.xml.find('{%s}no-cert-management' % self.namespace)
return manage is None
def set_cert_management(self, value):
self.del_cert_management()
if not value:
manage = ET.Element('{%s}no-cert-management' % self.namespace)
self.xml.append(manage)
def del_cert_management(self):
manage = self.xml.find('{%s}no-cert-management' % self.namespace)
if manage is not None:
self.xml.remove(manage)
class DisableCert(ElementBase):
name = 'disable'
namespace = 'urn:xmpp:saslcert:1'
plugin_attrib = 'sasl_cert_disable'
interfaces = set(['name'])
sub_interfaces = interfaces
class RevokeCert(ElementBase):
name = 'revoke'
namespace = 'urn:xmpp:saslcert:1'
plugin_attrib = 'sasl_cert_revoke'
interfaces = set(['name'])
sub_interfaces = interfaces
register_stanza_plugin(Certs, CertItem, iterable=True)
| 29.295455 | 74 | 0.636928 |
82d286a6508db77c3461dac2e61db0a361f28192 | 1,976 | py | Python | create_integ_test_docker_images.py | laurenyu/sagemaker-tensorflow-extensions | 14d15e7a292926a69aa590cad3fd2c9d68bd7df5 | [
"Apache-2.0"
] | 43 | 2018-07-18T20:07:11.000Z | 2022-02-19T20:38:58.000Z | create_integ_test_docker_images.py | laurenyu/sagemaker-tensorflow-extensions | 14d15e7a292926a69aa590cad3fd2c9d68bd7df5 | [
"Apache-2.0"
] | 91 | 2018-07-28T17:55:08.000Z | 2022-02-23T22:09:14.000Z | create_integ_test_docker_images.py | laurenyu/sagemaker-tensorflow-extensions | 14d15e7a292926a69aa590cad3fd2c9d68bd7df5 | [
"Apache-2.0"
] | 53 | 2018-07-18T20:07:15.000Z | 2022-02-07T22:47:06.000Z | from __future__ import absolute_import
import argparse
import base64
import subprocess
import docker
import boto3
import botocore
import glob
import sys
TF_VERSION = "1.15.2"
REGION = "us-west-2"
REPOSITORY_NAME = "sagemaker-tensorflow-extensions-test"
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('device', nargs='?', default='cpu')
args = parser.parse_args()
client = docker.from_env()
ecr_client = boto3.client('ecr', region_name=REGION)
token = ecr_client.get_authorization_token()
username, password = base64.b64decode(token['authorizationData'][0]['authorizationToken']).decode().split(':')
registry = token['authorizationData'][0]['proxyEndpoint']
subprocess.check_call([sys.executable, 'setup.py', 'sdist'])
[sdist_path] = glob.glob('dist/sagemaker_tensorflow-{}*'.format(TF_VERSION))
try:
ecr_client.create_repository(repositoryName=REPOSITORY_NAME)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'RepositoryAlreadyExistsException':
pass
else:
raise
python_version = str(sys.version_info[0])
tag = '{}/{}:{}-{}-{}'.format(registry, REPOSITORY_NAME, TF_VERSION, args.device, python_version)[8:]
# pull existing image for layer cache
try:
client.images.pull(tag, auth_config={'username': username, 'password': password})
except docker.errors.NotFound:
pass
client.images.build(
path='.',
dockerfile='test/integ/Dockerfile',
tag=tag,
cache_from=[tag],
buildargs={'sagemaker_tensorflow': sdist_path,
'device': args.device,
'python': '/usr/bin/python3',
'tensorflow_version': TF_VERSION,
'script': 'test/integ/scripts/estimator_script.py'})
client.images.push(tag, auth_config={'username': username, 'password': password})
print(tag)
| 33.491525 | 114 | 0.662955 |
1deed5e79449f94dbe676403b3bb47d724904043 | 4,668 | py | Python | mimic3models/phenotyping/hgru.py | xcgoner/mimic3-benchmarks | bbf4f19824f1ee8fcd32f37297efc17578f9658b | [
"MIT"
] | null | null | null | mimic3models/phenotyping/hgru.py | xcgoner/mimic3-benchmarks | bbf4f19824f1ee8fcd32f37297efc17578f9658b | [
"MIT"
] | null | null | null | mimic3models/phenotyping/hgru.py | xcgoner/mimic3-benchmarks | bbf4f19824f1ee8fcd32f37297efc17578f9658b | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from keras import backend as K
from keras.models import Model
from keras.layers import Input, Dense, GRU, Masking, Dropout, Add, Multiply, Concatenate, Lambda
from keras.layers.wrappers import Bidirectional, TimeDistributed
from mimic3models.keras_utils import LastTimestep
from mimic3models.keras_utils import ExtendMask
from mimic3models.phenotyping import utils
class Network(Model):
def __init__(self, dim, batch_norm, dropout, rec_dropout, task,
target_repl=False, deep_supervision=False, num_classes=1,
depth=1, input_dim=76, **kwargs):
print "==> not used params in network class:", kwargs.keys()
self.dim = dim
self.batch_norm = batch_norm
self.dropout = dropout
self.rec_dropout = rec_dropout
self.depth = depth
if task in ['decomp', 'ihm', 'ph']:
final_activation = 'sigmoid'
elif task in ['los']:
if num_classes == 1:
final_activation = 'relu'
else:
final_activation = 'softmax'
else:
return ValueError("Wrong value for task")
# Input layers and masking
X = Input(shape=(None, input_dim), name='X')
inputs = [X]
mX = Masking()(X)
if deep_supervision:
M = Input(shape=(None,), name='M')
inputs.append(M)
# Configurations
is_bidirectional = True
if deep_supervision:
is_bidirectional = False
# Main part of the network
for i in range(depth - 1):
num_units = dim
if is_bidirectional:
num_units = num_units // 2
gru = GRU(units=num_units,
activation='tanh',
return_sequences=True,
recurrent_dropout=rec_dropout,
dropout=dropout)
if is_bidirectional:
mX = Bidirectional(gru)(mX)
else:
mX = gru(mX)
# Output module of the network
return_sequences = (target_repl or deep_supervision)
L_lv1 = GRU(units=dim,
activation='tanh',
return_sequences=True,
dropout=dropout,
recurrent_dropout=rec_dropout)(mX)
L = L_lv1
if dropout > 0:
L = Dropout(dropout)(L)
label_struct = utils.read_hierarchical_labels('../../data/phenotyping/label_list.txt', '../../data/phenotyping/label_struct.json')
# only support 2 levels
num_superclass = len(label_struct.keys())
y_lv1 = {}
y_lv2 = {}
for class_lv1 in label_struct.keys():
y_lv1[class_lv1] = Dense(1, activation=final_activation)(Lambda(lambda x: x[:,-1,:])(L))
L_lv2_gru = GRU(units=dim,
activation='tanh',
return_sequences=return_sequences,
dropout=dropout,
recurrent_dropout=rec_dropout)(L_lv1)
if dropout > 0:
L_lv2_gru = Dropout(dropout)(L_lv2_gru)
y_lv2[class_lv1] = {}
for class_lv2 in label_struct[class_lv1]:
y_lv2[class_lv1][class_lv2] = Dense(1, activation=final_activation)(L_lv2_gru)
label_mapper = {}
for super_label in label_struct.keys():
label_mapper[super_label] = set(label_struct[super_label])
y_final = []
for i in range(25):
if (i in label_mapper[25]) and (i not in label_mapper[26]):
y_final.append(Multiply()([y_lv1[25], y_lv2[25][i]]))
elif (i not in label_mapper[25]) and (i in label_mapper[26]):
y_final.append(Multiply()([y_lv1[26], y_lv2[26][i]]))
elif (i in label_mapper[25]) and (i in label_mapper[26]):
y_final.append(Add()([Multiply()([y_lv1[25], y_lv2[25][i]]), Multiply()([y_lv1[26], y_lv2[26][i]])]))
y_final.append(y_lv1[25])
y_final.append(y_lv1[26])
y = Concatenate()(y_final)
outputs = [y]
return super(Network, self).__init__(inputs=inputs,
outputs=outputs)
def say_name(self):
self.network_class_name = "k_hgru"
return "{}.n{}{}{}{}.dep{}".format(self.network_class_name,
self.dim,
".bn" if self.batch_norm else "",
".d{}".format(self.dropout) if self.dropout > 0 else "",
".rd{}".format(self.rec_dropout) if self.rec_dropout > 0 else "",
self.depth) | 37.344 | 138 | 0.558269 |
429b6d2f2862aa7950e1e39e1a228f0ce9e720f4 | 1,247 | py | Python | crypy/pair/indicators.py | asmodehn/crypy | 351af6588f110612d5207a5fbb29d51bfa7c3268 | [
"MIT"
] | 2 | 2019-01-20T14:15:54.000Z | 2019-07-13T17:20:32.000Z | crypy/pair/indicators.py | asmodehn/crypy | 351af6588f110612d5207a5fbb29d51bfa7c3268 | [
"MIT"
] | 12 | 2019-05-07T09:27:34.000Z | 2019-06-04T12:36:41.000Z | crypy/pair/indicators.py | asmodehn/crypy | 351af6588f110612d5207a5fbb29d51bfa7c3268 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
def HA(df, ohlc=None):
"""
Function to compute Heiken Ashi Candles (HA)
Args :
df : Pandas DataFrame which contains ['date', 'open', 'high', 'low', 'close', 'volume'] columns
ohlc: List defining OHLC Column names (default ['Open', 'High', 'Low', 'Close'])
Returns :
df : Pandas DataFrame with new columns added for
Heiken Ashi Close (HA_$ohlc[3])
Heiken Ashi Open (HA_$ohlc[0])
Heiken Ashi High (HA_$ohlc[1])
Heiken Ashi Low (HA_$ohlc[2])
"""
ohlc = ['Open', 'High', 'Low', 'Close'] if ohlc is None else ohlc
ha_open = 'HA_' + ohlc[0]
ha_high = 'HA_' + ohlc[1]
ha_low = 'HA_' + ohlc[2]
ha_close = 'HA_' + ohlc[3]
df[ha_close] = (df[ohlc[0]] + df[ohlc[1]] + df[ohlc[2]] + df[ohlc[3]]) / 4
df[ha_open] = 0.00
for i in range(0, len(df)):
if i == 0:
df[ha_open].iat[i] = (df[ohlc[0]].iat[i] + df[ohlc[3]].iat[i]) / 2
else:
df[ha_open].iat[i] = (df[ha_open].iat[i - 1] + df[ha_close].iat[i - 1]) / 2
df[ha_high] = df[[ha_open, ha_close, ohlc[1]]].max(axis=1)
df[ha_low] = df[[ha_open, ha_close, ohlc[2]]].min(axis=1)
return df
| 30.414634 | 103 | 0.542903 |
967c1687b8d7a6108ddb18eed1ca53c1af0a4d63 | 2,271 | py | Python | tests/validation/test_validate.py | gwnoseworthy/pystac | c87f073bacc82ae5dfb125f74cb29774678dad11 | [
"Apache-2.0"
] | null | null | null | tests/validation/test_validate.py | gwnoseworthy/pystac | c87f073bacc82ae5dfb125f74cb29774678dad11 | [
"Apache-2.0"
] | null | null | null | tests/validation/test_validate.py | gwnoseworthy/pystac | c87f073bacc82ae5dfb125f74cb29774678dad11 | [
"Apache-2.0"
] | null | null | null | import json
import unittest
import jsonschema
import pystac
from pystac.serialization.common_properties import merge_common_properties
from pystac.validation import STACValidationError
from tests.utils import TestCases
class ValidateTest(unittest.TestCase):
def test_validate_current_version(self):
catalog = pystac.read_file(
TestCases.get_path('data-files/catalogs/test-case-1/'
'catalog.json'))
catalog.validate()
collection = pystac.read_file(
TestCases.get_path('data-files/catalogs/test-case-1/'
'/country-1/area-1-1/'
'collection.json'))
collection.validate()
item = pystac.read_file(TestCases.get_path('data-files/item/sample-item.json'))
item.validate()
def test_validate_examples(self):
for example in TestCases.get_examples_info():
stac_version = example['stac_version']
path = example['path']
valid = example['valid']
if stac_version < '0.8':
with open(path) as f:
stac_json = json.load(f)
self.assertTrue(len(pystac.validation.validate_dict(stac_json)) == 0)
else:
with self.subTest(path):
with open(path) as f:
stac_json = json.load(f)
# Check if common properties need to be merged
if stac_version < '1.0':
if example['object_type'] == pystac.STACObjectType.ITEM:
collection_cache = pystac.cache.CollectionCache()
merge_common_properties(stac_json, collection_cache, path)
if valid:
pystac.validation.validate_dict(stac_json)
else:
with self.assertRaises(STACValidationError):
try:
pystac.validation.validate_dict(stac_json)
except STACValidationError as e:
self.assertTrue(isinstance(e.source, jsonschema.ValidationError))
raise e
| 38.491525 | 97 | 0.548217 |
02240d49fd588b2cf7b5d599ab189270bcc656f3 | 3,192 | py | Python | laundry/inspector.py | aniav/python-laundry | 77b6e70f541a949acba74220c770cfa3fcf70079 | [
"MIT"
] | null | null | null | laundry/inspector.py | aniav/python-laundry | 77b6e70f541a949acba74220c770cfa3fcf70079 | [
"MIT"
] | null | null | null | laundry/inspector.py | aniav/python-laundry | 77b6e70f541a949acba74220c770cfa3fcf70079 | [
"MIT"
] | null | null | null | import os
import time
import numpy
from .comparators import (
jaccard_similarity_ratio, levenshtein_similarity_ratio,
sequence_matcher_similarity_ratio, sorensen_similarity_ratio)
LEVENSHTEIN = "Levenshtein"
SEQUENCE_MATCHER = "SequenceMatcher"
SORENSEN = "Sorensen"
JACCARD = "Jaccard"
COMPARATORS = [LEVENSHTEIN, SEQUENCE_MATCHER, SORENSEN, JACCARD]
NUM_DOCS = 1
resources_path = os.path.realpath(os.path.join(os.getcwd(), "resources"))
def get_cleaners():
# TODO: make this generic loading all cleaners from the cleaners dir
from .cleaners import boilerpipe, goose, justext, newspaper, tika
return [boilerpipe, justext, newspaper, tika]
def get_contents(index):
file_path = os.path.join(resources_path, "{}.html".format(index))
with open(file_path, "r", encoding="utf-8") as f:
return f.read()
def get_expected_content(index):
file_path = os.path.join(resources_path, "{}.txt".format(index))
with open(file_path, "r", encoding="utf-8") as f:
return f.read()
def check_cleaners_quality():
cleaners = get_cleaners()
timings = {cleaner.__file__ : list() for cleaner in cleaners}
results = {
comparator: {cleaner.__file__: list() for cleaner in cleaners}
for comparator in COMPARATORS}
for cleaner in cleaners:
cleaner_name = cleaner.__file__
print(cleaner_name, end="")
for i in [1]:
print(".", end="")
original_content = get_contents(i)
expected_content = get_expected_content(i)
start_time = time.time()
actual_content = cleaner.clean(original_content)
timings[cleaner_name].append(time.time() - start_time)
if not actual_content:
print("E", end="")
continue
results[LEVENSHTEIN][cleaner_name].append(
levenshtein_similarity_ratio(
actual_content, expected_content))
results[SEQUENCE_MATCHER][cleaner_name].append(
sequence_matcher_similarity_ratio(
actual_content, expected_content))
results[SORENSEN][cleaner_name].append(sorensen_similarity_ratio(
actual_content, expected_content))
results[JACCARD][cleaner_name].append(jaccard_similarity_ratio(
actual_content, expected_content))
print("") # Newline
row_format = "{:>15}" * (len(cleaners) + 1)
print(row_format.format("", *map(lambda x: x.__file__, cleaners)))
time_averages = [sum(timings[e.__file__])/NUM_DOCS for e in cleaners]
print(row_format.format("AGV TIME", *time_averages))
print(row_format.format("AGV QUALITY", *map(lambda x: "", cleaners)))
for name, types in results.items():
averages = [sum(types[e.__file__])/NUM_DOCS for e in cleaners]
print(row_format.format(name, *averages))
print(row_format.format("MEDIAN QUALITY", *map(lambda x: "", cleaners)))
for name, types in results.items():
averages = [numpy.median(types[e.__file__]) for e in cleaners]
print(row_format.format(name, *averages))
if __name__ == "__main__":
check_cleaners_quality() | 35.076923 | 77 | 0.662281 |
5e40cee36eb2e3dbbc38c7b5b5e18aa6317544d4 | 165 | py | Python | ABC/abc101-abc150/abc142/a.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | 2 | 2020-06-12T09:54:23.000Z | 2021-05-04T01:34:07.000Z | ABC/abc101-abc150/abc142/a.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | 961 | 2020-06-23T07:26:22.000Z | 2022-03-31T21:34:52.000Z | ABC/abc101-abc150/abc142/a.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
def main():
from math import ceil
n = int(input())
print(ceil(n / 2) / n)
if __name__ == '__main__':
main()
| 12.692308 | 27 | 0.472727 |
91e542d09632ec105d5b415d07298d6f54441f89 | 1,120 | py | Python | plot-class.py | drewlinsley/draw_classify | 88d348598d4d3eb20b534b88d186c2b1622066a4 | [
"MIT"
] | null | null | null | plot-class.py | drewlinsley/draw_classify | 88d348598d4d3eb20b534b88d186c2b1622066a4 | [
"MIT"
] | null | null | null | plot-class.py | drewlinsley/draw_classify | 88d348598d4d3eb20b534b88d186c2b1622066a4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from __future__ import division, print_function
import logging
import argparse
import numpy as np
import pylab
import matplotlib as mpl
import matplotlib.pyplot as plt
import cPickle as pickle
from pandas import DataFrame
from mpl_toolkits.mplot3d import Axes3D
from blocks.main_loop import MainLoop
from blocks.log.log import TrainingLogBase
FORMAT = '[%(asctime)s] %(name)-15s %(message)s'
DATEFMT = "%H:%M:%S"
logging.basicConfig(format=FORMAT, datefmt=DATEFMT, level=logging.INFO)
model_file = 'new_class_test-20160320-145543/new_class_test_log'
with open(model_file,"rb") as f:
model = pickle.load(f)
if isinstance(model, MainLoop):
log = model.log
elif isinstance(model, TrainingLogBase):
log = model
else:
print("Don't know how to handle unpickled %s" % type(model))
exit(1)
df = DataFrame.from_dict(log, orient='index')
#df = df.iloc[[0]+log.status._epoch_ends]
names = list(df)
nsp = int(np.ceil(np.sqrt(len(df.columns))))
for num in range(3,len(names)):
plt.subplot(nsp,nsp,num+1)
plt.plot(df[names[num]])
plt.title(names[num])
plt.show()
| 21.960784 | 71 | 0.73125 |
4d673057f21756f244da591bf2a0eb748a211387 | 3,979 | py | Python | gdrive_deploy/utilities.py | keni7385/gdrive_deploy | 0df0c3c07783bd3559e02097397bc221928fe369 | [
"MIT"
] | null | null | null | gdrive_deploy/utilities.py | keni7385/gdrive_deploy | 0df0c3c07783bd3559e02097397bc221928fe369 | [
"MIT"
] | 1 | 2021-03-16T13:32:24.000Z | 2021-03-21T07:10:38.000Z | gdrive_deploy/utilities.py | keni7385/gdrive_deploy | 0df0c3c07783bd3559e02097397bc221928fe369 | [
"MIT"
] | null | null | null | from googleapiclient import errors
from googleapiclient.http import MediaFileUpload
from googleapiclient.discovery import build
from httplib2 import Http
from oauth2client import file, client, tools
def get_authenticated(SCOPES, credential_file, service_name='drive',
api_version='v3'):
# The file token.json stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
token_file = credential_file + '.token'
store = file.Storage(token_file)
creds = store.get()
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets(credential_file, SCOPES)
creds = tools.run_flow(flow, store)
service = build(service_name, api_version, http=creds.authorize(Http()))
return service
def update_file(service, file_id, new_mime_type,
new_filename):
"""Update an existing file's metadata and content.
Args:
service: Drive API service instance.
file_id: ID of the file to update.
new_mime_type: New MIME type for the file.
new_filename: Filename of the new content to upload.
Returns:
Updated file metadata if successful, None otherwise.
"""
try:
# First retrieve the file from the API.
file = service.files().get(fileId=file_id).execute()
# File's new content.
media_body = MediaFileUpload(
new_filename, mimetype=new_mime_type, resumable=True)
# Send the request to the API.
updated_file = service.files().update(
fileId=file_id,
media_body=media_body).execute()
return updated_file
except errors.HttpError as error:
print('An error occurred: %s' % error)
return None
def insert_file(service, name, description, parent_id, mime_type, filename):
"""Insert new file.
Args:
service: Drive API service instance.
name: Name of the file to insert, including the extension.
description: Description of the file to insert.
parent_id: Parent folder's ID.
mime_type: MIME type of the file to insert.
filename: Filename of the file to insert.
Returns:
Inserted file metadata if successful, None otherwise.
"""
media_body = MediaFileUpload(filename, mimetype=mime_type, resumable=True)
body = {
'name': name,
'description': description,
'mimeType': mime_type
}
# Set the parent folder.
if parent_id:
body['parents'] = [{'id': parent_id}]
try:
file = service.files().create(
body=body,
media_body=media_body).execute()
# Uncomment the following line to print the File ID
# print 'File ID: %s' % file['id']
return file
except errors.HttpError as error:
print('An error occurred: %s' % error)
return None
def retrieve_all_files(service):
"""Retrieve a list of File resources.
Args:
service: Drive API service instance.
Returns:
List of File resources.
"""
result = []
page_token = None
while True:
try:
param = {}
if page_token:
param['pageToken'] = page_token
files = service.files().list(**param).execute()
result.extend(files['files'])
page_token = files.get('nextPageToken')
if not page_token:
break
except errors.HttpError as error:
print('An error occurred: %s' % error)
break
return result
def delete_file(service, file_id):
"""Permanently delete a file, skipping the trash.
Args:
service: Drive API service instance.
file_id: ID of the file to delete.
"""
try:
service.files().delete(fileId=file_id).execute()
except errors.HttpError as error:
print('An error occurred: %s' % error)
| 30.607692 | 79 | 0.630058 |
81caf48127cc85352036adfbb0f3aca90f09cac2 | 754 | py | Python | 56_merge_intervals.py | claytonjwong/leetcode-py | 16bbf8ac0ba5c80fe3ef67ade0d61a12991270a7 | [
"MIT"
] | 1 | 2020-07-15T14:16:23.000Z | 2020-07-15T14:16:23.000Z | 56_merge_intervals.py | claytonjwong/leetcode-py | 16bbf8ac0ba5c80fe3ef67ade0d61a12991270a7 | [
"MIT"
] | null | null | null | 56_merge_intervals.py | claytonjwong/leetcode-py | 16bbf8ac0ba5c80fe3ef67ade0d61a12991270a7 | [
"MIT"
] | null | null | null | #
# 56. Merge Intervals
#
# Q: https://leetcode.com/problems/merge-intervals/
# A: https://leetcode.com/problems/merge-intervals/discuss/940348/Kt-Js-Py3-Cpp-Sort-A-%2B-Track-Overlaps-via-Last-End
#
from typing import List
from functools import cmp_to_key
class Solution:
def merge(self, A: List[List[int]]) -> List[List[int]]:
A.sort(key = cmp_to_key(lambda a, b: a[1] - b[1] if a[0] == b[0] else a[0] - b[0]))
ans = [A[0]]
for [beg, end] in A:
lastIndex = len(ans) - 1
lastEnd = ans[lastIndex][1]
if beg <= lastEnd:
ans[lastIndex][1] = max(lastEnd, end) # overlap
else:
ans.append([beg, end]) # no overlap
return ans
| 32.782609 | 118 | 0.561008 |
c2b90a85c4a9ec5360fd93beacf62cbfee2e62ac | 478 | py | Python | modules/pmg_qt/syntax/__init__.py | dualword/pymol-open-source | abc307745d7d231af4f77f984ebd64f1b428cef8 | [
"CNRI-Python"
] | 636 | 2018-06-21T20:46:36.000Z | 2022-03-30T13:07:47.000Z | modules/pmg_qt/syntax/__init__.py | dualword/pymol-open-source | abc307745d7d231af4f77f984ebd64f1b428cef8 | [
"CNRI-Python"
] | 218 | 2018-06-25T00:10:59.000Z | 2022-03-23T14:15:48.000Z | modules/pmg_qt/syntax/__init__.py | dualword/pymol-open-source | abc307745d7d231af4f77f984ebd64f1b428cef8 | [
"CNRI-Python"
] | 192 | 2018-06-21T17:33:10.000Z | 2022-03-31T17:53:03.000Z | from pymol.Qt import QtGui
def textformat(color, style=''):
fmt = QtGui.QTextCharFormat()
fmt.setForeground(QtGui.QColor(color))
for word in style.split():
if word == 'bold':
fmt.setFontWeight(QtGui.QFont.Bold)
elif word == 'italic':
fmt.setFontItalic(True)
elif word.startswith('bg:'):
fmt.setBackground(QtGui.QColor(word[3:]))
else:
print('unhandled style:', word)
return fmt
| 25.157895 | 53 | 0.587866 |
bf1ac6f5fe55ce52d8e68418ba232ea941a9c57f | 11,278 | py | Python | theano/tensor/tests/test_blas_c.py | mdda/Theano | 6ca7b2b65000e371f009b617d41bc5a90f022d38 | [
"BSD-3-Clause"
] | null | null | null | theano/tensor/tests/test_blas_c.py | mdda/Theano | 6ca7b2b65000e371f009b617d41bc5a90f022d38 | [
"BSD-3-Clause"
] | null | null | null | theano/tensor/tests/test_blas_c.py | mdda/Theano | 6ca7b2b65000e371f009b617d41bc5a90f022d38 | [
"BSD-3-Clause"
] | null | null | null | import sys
import numpy
from unittest import TestCase
from nose.plugins.skip import SkipTest
import theano
import theano.tensor as tensor
from theano.tensor.blas_c import CGer
from theano.tensor.blas_scipy import ScipyGer
from theano.tensor.blas import Ger
from theano.tensor.blas_c import CGemv
from theano.tensor.blas_scipy import ScipyGer
from theano.tensor.blas import Gemv
from theano.tensor.blas_c import check_force_gemv_init
from theano.tests import unittest_tools
from theano.tests.unittest_tools import TestOptimizationMixin
from theano.tensor.tests.test_blas import BaseGemv, TestBlasStrides
mode_blas_opt = theano.compile.get_default_mode().including(
'BlasOpt', 'specialize', 'InplaceBlasOpt', 'c_blas')
class TestCGer(TestCase, TestOptimizationMixin):
def setUp(self, dtype='float64'):
if theano.config.blas.ldflags == "":
raise SkipTest("This test is useful only when Theano"
" is directly linked to blas.")
self.dtype = dtype
self.mode = theano.compile.get_default_mode().including('fast_run')
self.A = tensor.tensor(dtype=dtype, broadcastable=(False, False))
self.a = tensor.tensor(dtype=dtype, broadcastable=())
self.x = tensor.tensor(dtype=dtype, broadcastable=(False,))
self.y = tensor.tensor(dtype=dtype, broadcastable=(False,))
self.Aval = numpy.ones((2, 3), dtype=dtype)
self.xval = numpy.asarray([1, 2], dtype=dtype)
self.yval = numpy.asarray([1.5, 2.7, 3.9], dtype=dtype)
def function(self, inputs, outputs):
return theano.function(inputs, outputs,
mode=self.mode,
# allow_inplace=True,
)
def run_f(self, f):
f(self.Aval, self.xval, self.yval)
f(self.Aval[::-1, ::-1], self.xval, self.yval)
def b(self, bval):
return tensor.as_tensor_variable(numpy.asarray(bval, dtype=self.dtype))
def test_eq(self):
self.assertTrue(CGer(True) == CGer(True))
self.assertTrue(CGer(False) == CGer(False))
self.assertTrue(CGer(False) != CGer(True))
self.assertTrue(CGer(True) != ScipyGer(True))
self.assertTrue(CGer(False) != ScipyGer(False))
self.assertTrue(CGer(True) != Ger(True))
self.assertTrue(CGer(False) != Ger(False))
# assert that eq works for non-CGer instances
self.assertTrue(CGer(False) is not None)
self.assertTrue(CGer(True) is not None)
def test_hash(self):
self.assertTrue(hash(CGer(True)) == hash(CGer(True)))
self.assertTrue(hash(CGer(False)) == hash(CGer(False)))
self.assertTrue(hash(CGer(False)) != hash(CGer(True)))
def test_optimization_pipeline(self):
f = self.function([self.x, self.y], tensor.outer(self.x, self.y))
self.assertFunctionContains(f, CGer(destructive=True))
f(self.xval, self.yval) # DebugMode tests correctness
def test_optimization_pipeline_float(self):
self.setUp('float32')
f = self.function([self.x, self.y], tensor.outer(self.x, self.y))
self.assertFunctionContains(f, CGer(destructive=True))
f(self.xval, self.yval) # DebugMode tests correctness
def test_int_fails(self):
self.setUp('int32')
f = self.function([self.x, self.y], tensor.outer(self.x, self.y))
self.assertFunctionContains0(f, CGer(destructive=True))
self.assertFunctionContains0(f, CGer(destructive=False))
def test_A_plus_outer(self):
f = self.function([self.A, self.x, self.y],
self.A + tensor.outer(self.x, self.y))
self.assertFunctionContains(f, CGer(destructive=False))
self.run_f(f) # DebugMode tests correctness
def test_A_plus_scaled_outer(self):
f = self.function([self.A, self.x, self.y],
self.A + 0.1 * tensor.outer(self.x, self.y))
self.assertFunctionContains(f, CGer(destructive=False))
self.run_f(f) # DebugMode tests correctness
class TestCGemv(TestCase, TestOptimizationMixin):
"""Tests of CGemv specifically.
Generic tests of Gemv-compatibility, including both dtypes are
done below in TestCGemvFloat32 and TestCGemvFloat64
"""
def setUp(self, dtype='float64'):
if theano.config.blas.ldflags == "":
raise SkipTest("This test is useful only when Theano"
" is directly linked to blas.")
self.dtype = dtype
self.mode = theano.compile.get_default_mode().including('fast_run')
# matrix
self.A = tensor.tensor(dtype=dtype, broadcastable=(False, False))
self.Aval = numpy.ones((2, 3), dtype=dtype)
# vector
self.x = tensor.tensor(dtype=dtype, broadcastable=(False,))
self.y = tensor.tensor(dtype=dtype, broadcastable=(False,))
self.xval = numpy.asarray([1, 2], dtype=dtype)
self.yval = numpy.asarray([1.5, 2.7, 3.9], dtype=dtype)
# scalar
self.a = tensor.tensor(dtype=dtype, broadcastable=())
def test_optimizations_vm(self):
''' Test vector dot matrix '''
f = theano.function([self.x, self.A],
theano.dot(self.x, self.A),
mode=self.mode)
# Assert that the dot was optimized somehow
self.assertFunctionContains0(f, tensor.dot)
self.assertFunctionContains1(
f,
CGemv(inplace=True, force_init_beta=True)
)
# Assert they produce the same output
assert numpy.allclose(f(self.xval, self.Aval),
numpy.dot(self.xval, self.Aval))
# Test with negative strides on 2 dims
assert numpy.allclose(f(self.xval, self.Aval[::-1, ::-1]),
numpy.dot(self.xval, self.Aval[::-1, ::-1]))
def test_optimizations_mv(self):
''' Test matrix dot vector '''
f = theano.function([self.A, self.y],
theano.dot(self.A, self.y),
mode=self.mode)
# Assert that the dot was optimized somehow
self.assertFunctionContains0(f, tensor.dot)
self.assertFunctionContains1(
f,
CGemv(inplace=True, force_init_beta=True)
)
# Assert they produce the same output
assert numpy.allclose(f(self.Aval, self.yval),
numpy.dot(self.Aval, self.yval))
# Test with negative strides on 2 dims
assert numpy.allclose(f(self.Aval[::-1, ::-1], self.yval),
numpy.dot(self.Aval[::-1, ::-1], self.yval))
def test_force_gemv_init(self):
if check_force_gemv_init():
sys.stderr.write(
"WARNING: The current BLAS requires Theano to initialize"
+ " memory for some GEMV calls which will result in a minor"
+ " degradation in performance for such calls."
)
def t_gemv1(self, m_shp):
''' test vector2 + dot(matrix, vector1) '''
rng = numpy.random.RandomState(unittest_tools.fetch_seed())
v1 = theano.shared(numpy.array(rng.uniform(size=(m_shp[1],)),
dtype='float32'))
v2_orig = numpy.array(rng.uniform(size=(m_shp[0],)), dtype='float32')
v2 = theano.shared(v2_orig)
m = theano.shared(numpy.array(rng.uniform(size=m_shp),
dtype='float32'))
f = theano.function([], v2 + tensor.dot(m, v1),
mode=self.mode)
# Assert they produce the same output
assert numpy.allclose(f(),
numpy.dot(m.get_value(), v1.get_value()) + v2_orig)
topo = [n.op for n in f.maker.fgraph.toposort()]
assert topo == [CGemv(inplace=False)], topo
# test the inplace version
g = theano.function([], [],
updates=[(v2, v2 + theano.dot(m, v1))],
mode=self.mode)
# Assert they produce the same output
g()
assert numpy.allclose(v2.get_value(),
numpy.dot(m.get_value(), v1.get_value()) + v2_orig)
topo = [n.op for n in g.maker.fgraph.toposort()]
assert topo == [CGemv(inplace=True)]
# Do the same tests with a matrix with strides in both dimensions
m.set_value(
m.get_value(borrow=True)[::-1, ::-1],
borrow=True)
v2.set_value(v2_orig)
assert numpy.allclose(f(),
numpy.dot(m.get_value(), v1.get_value()) + v2_orig)
g()
assert numpy.allclose(v2.get_value(),
numpy.dot(m.get_value(), v1.get_value()) + v2_orig)
def test_gemv1(self):
self.t_gemv1((3, 2))
self.t_gemv1((1, 2))
self.t_gemv1((0, 2))
self.t_gemv1((3, 1))
self.t_gemv1((3, 0))
self.t_gemv1((1, 0))
self.t_gemv1((0, 1))
self.t_gemv1((0, 0))
def test_gemv_dimensions(self, dtype='float32'):
alpha = theano.shared(theano._asarray(1.0, dtype=dtype),
name='alpha')
beta = theano.shared(theano._asarray(1.0, dtype=dtype),
name='beta')
z = beta * self.y + alpha * tensor.dot(self.A, self.x)
f = theano.function([self.A, self.x, self.y], z,
mode=self.mode)
# Matrix value
A_val = numpy.ones((5, 3), dtype=dtype)
# Different vector length
ones_3 = numpy.ones(3, dtype=dtype)
ones_4 = numpy.ones(4, dtype=dtype)
ones_5 = numpy.ones(5, dtype=dtype)
ones_6 = numpy.ones(6, dtype=dtype)
f(A_val, ones_3, ones_5)
f(A_val[::-1, ::-1], ones_3, ones_5)
self.assertRaises(ValueError, f, A_val, ones_4, ones_5)
self.assertRaises(ValueError, f, A_val, ones_3, ones_6)
self.assertRaises(ValueError, f, A_val, ones_4, ones_6)
def test_multiple_inplace(self):
x = tensor.dmatrix('x')
y = tensor.dvector('y')
z = tensor.dvector('z')
f = theano.function([x, y, z],
[tensor.dot(y, x), tensor.dot(z,x)],
mode=mode_blas_opt)
vx = numpy.random.rand(3, 3)
vy = numpy.random.rand(3)
vz = numpy.random.rand(3)
out = f(vx, vy, vz)
assert numpy.allclose(out[0], numpy.dot(vy, vx))
assert numpy.allclose(out[1], numpy.dot(vz, vx))
assert len([n for n in f.maker.fgraph.apply_nodes
if isinstance(n.op, tensor.AllocEmpty)]) == 2
class TestCGemvFloat32(TestCase, BaseGemv, TestOptimizationMixin):
mode = mode_blas_opt
dtype = 'float32'
gemv = CGemv(inplace=False)
gemv_inplace = CGemv(inplace=True)
def setUp(self):
if theano.config.blas.ldflags == "":
raise SkipTest("This test is useful only when Theano"
" is directly linked to blas.")
class TestCGemvFloat64(TestCase, BaseGemv, TestOptimizationMixin):
mode = mode_blas_opt
dtype = 'float64'
gemv = CGemv(inplace=False)
gemv_inplace = CGemv(inplace=True)
def setUp(self):
if theano.config.blas.ldflags == "":
raise SkipTest("This test is useful only when Theano"
" is directly linked to blas.")
class TestBlasStridesC(TestBlasStrides):
mode = mode_blas_opt
| 37.344371 | 79 | 0.60321 |
4fc92fe72165dbc7fa17d0298331b24a49eb073c | 552 | py | Python | .history/chapter01/python_05_if_condition_20201128213833.py | KustomApe/nerdape | aef6fb2d1f8c364b26d91bf8570b4487a24de69a | [
"MIT"
] | null | null | null | .history/chapter01/python_05_if_condition_20201128213833.py | KustomApe/nerdape | aef6fb2d1f8c364b26d91bf8570b4487a24de69a | [
"MIT"
] | null | null | null | .history/chapter01/python_05_if_condition_20201128213833.py | KustomApe/nerdape | aef6fb2d1f8c364b26d91bf8570b4487a24de69a | [
"MIT"
] | null | null | null | """[if文について]
もし〜だったら、こうして
"""
# if 条件:
# 実行するブロック
# 条件によって処理を適応したい場合
# 3000kmごとにオイル交換しないといけない
distance = 3403
if distance > 3000:
print('オイル交換時期です')
total = 135241
if total /
# 文字列を比較する/リストを比較する
# if 'abc' == "ABC":
# print('同類です')
# if 'CDE' == 'CDE':
# print('同類です')
# if 'あいうえお' == 'あいうえお':
# print('同類です')
# 文字列を検索する/リストの要素を検索する
# if 'abc' in "ABC":
# print('ヒットしました!')
# if 'ドリフト' in '僕はドリフトが好きです':
# print('ヒットしました!')
# if 'japan' in 'japanese domestic market vehicle':
# print('ヒットしました!')
# else文
# elif文
| 15.333333 | 51 | 0.594203 |
fb59a07d9cb49de1999bfb7904aa25a30ecef2e6 | 2,970 | py | Python | examples/demonstration/tensorflow_asr/augmentations/augments.py | o74589055/tf_asr | 1801a035d15253fd25df4f9541457dd635f6d10d | [
"Apache-2.0"
] | 1 | 2021-03-20T09:21:49.000Z | 2021-03-20T09:21:49.000Z | examples/demonstration/tensorflow_asr/augmentations/augments.py | o74589055/tf_asr | 1801a035d15253fd25df4f9541457dd635f6d10d | [
"Apache-2.0"
] | null | null | null | examples/demonstration/tensorflow_asr/augmentations/augments.py | o74589055/tf_asr | 1801a035d15253fd25df4f9541457dd635f6d10d | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Huy Le Nguyen (@usimarit)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import nlpaug.flow as naf
from .signal_augment import SignalCropping, SignalLoudness, SignalMask, SignalNoise, \
SignalPitch, SignalShift, SignalSpeed, SignalVtlp
from .spec_augment import FreqMasking, TimeMasking, TFFreqMasking, TFTimeMasking
AUGMENTATIONS = {
"freq_masking": FreqMasking,
"time_masking": TimeMasking,
"noise": SignalNoise,
"masking": SignalMask,
"cropping": SignalCropping,
"loudness": SignalLoudness,
"pitch": SignalPitch,
"shift": SignalShift,
"speed": SignalSpeed,
"vtlp": SignalVtlp
}
TFAUGMENTATIONS = {
"freq_masking": TFFreqMasking,
"time_masking": TFTimeMasking,
}
class TFAugmentationExecutor:
def __init__(self, augmentations: list):
self.augmentations = augmentations
@tf.function
def augment(self, inputs):
outputs = inputs
for au in self.augmentations:
outputs = au.augment(outputs)
return outputs
class Augmentation:
def __init__(self, config: dict = None, use_tf: bool = False):
if not config: config = {}
if use_tf:
self.before = self.tf_parse(config.pop("before", {}))
self.after = self.tf_parse(config.pop("after", {}))
else:
self.before = self.parse(config.pop("before", {}))
self.after = self.parse(config.pop("after", {}))
@staticmethod
def parse(config: dict) -> list:
augmentations = []
for key, value in config.items():
au = AUGMENTATIONS.get(key, None)
if au is None:
raise KeyError(f"No augmentation named: {key}\n"
f"Available augmentations: {AUGMENTATIONS.keys()}")
aug = au(**value) if value is not None else au()
augmentations.append(aug)
return naf.Sometimes(augmentations)
@staticmethod
def tf_parse(config: dict) -> list:
augmentations = []
for key, value in config.items():
au = TFAUGMENTATIONS.get(key, None)
if au is None:
raise KeyError(f"No tf augmentation named: {key}\n"
f"Available tf augmentations: {TFAUGMENTATIONS.keys()}")
aug = au(**value) if value is not None else au()
augmentations.append(aug)
return TFAugmentationExecutor(augmentations)
| 34.137931 | 87 | 0.644781 |
ef55012da1ee452a1ec3cc98d7d2170071112d7d | 113,370 | py | Python | octavia/tests/functional/amphorae/backend/agent/api_server/test_server.py | mail2nsrajesh/octavia | 7466016ae982af2a560a94327f9e63a7e7151cc5 | [
"Apache-2.0"
] | 1 | 2019-01-11T06:18:38.000Z | 2019-01-11T06:18:38.000Z | octavia/tests/functional/amphorae/backend/agent/api_server/test_server.py | mail2nsrajesh/octavia | 7466016ae982af2a560a94327f9e63a7e7151cc5 | [
"Apache-2.0"
] | null | null | null | octavia/tests/functional/amphorae/backend/agent/api_server/test_server.py | mail2nsrajesh/octavia | 7466016ae982af2a560a94327f9e63a7e7151cc5 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import json
import os
import random
import socket
import stat
import subprocess
import mock
import netifaces
from oslo_config import fixture as oslo_fixture
from oslo_utils import uuidutils
import six
from octavia.amphorae.backends.agent import api_server
from octavia.amphorae.backends.agent.api_server import certificate_update
from octavia.amphorae.backends.agent.api_server import server
from octavia.amphorae.backends.agent.api_server import util
from octavia.common import config
from octavia.common import constants as consts
from octavia.common import utils as octavia_utils
from octavia.tests.common import utils as test_utils
import octavia.tests.unit.base as base
RANDOM_ERROR = 'random error'
OK = dict(message='OK')
class TestServerTestCase(base.TestCase):
app = None
def setUp(self):
super(TestServerTestCase, self).setUp()
with mock.patch('platform.linux_distribution',
return_value=['Ubuntu', 'Foo', 'Bar']):
self.ubuntu_test_server = server.Server()
self.ubuntu_app = self.ubuntu_test_server.app.test_client()
with mock.patch('platform.linux_distribution',
return_value=['centos', 'Foo', 'Bar']):
self.centos_test_server = server.Server()
self.centos_app = self.centos_test_server.app.test_client()
self.conf = self.useFixture(oslo_fixture.Config(config.cfg.CONF))
self.conf.config(group="haproxy_amphora", base_path='/var/lib/octavia')
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
'get_os_init_system', return_value=consts.INIT_SYSTEMD)
def test_ubuntu_haproxy_systemd(self, mock_init_system):
self._test_haproxy(consts.INIT_SYSTEMD, consts.UBUNTU,
mock_init_system)
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
'get_os_init_system', return_value=consts.INIT_SYSTEMD)
def test_centos_haproxy_systemd(self, mock_init_system):
self._test_haproxy(consts.INIT_SYSTEMD, consts.CENTOS,
mock_init_system)
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
'get_os_init_system', return_value=consts.INIT_SYSVINIT)
def test_ubuntu_haproxy_sysvinit(self, mock_init_system):
self._test_haproxy(consts.INIT_SYSVINIT, consts.UBUNTU,
mock_init_system)
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
'get_os_init_system', return_value=consts.INIT_UPSTART)
def test_ubuntu_haproxy_upstart(self, mock_init_system):
self._test_haproxy(consts.INIT_UPSTART, consts.UBUNTU,
mock_init_system)
@mock.patch('os.path.exists')
@mock.patch('os.makedirs')
@mock.patch('os.rename')
@mock.patch('subprocess.check_output')
def _test_haproxy(self, init_system, distro, mock_init_system,
mock_subprocess, mock_rename,
mock_makedirs, mock_exists):
self.assertIn(distro, [consts.UBUNTU, consts.CENTOS])
flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
mock_exists.return_value = True
file_name = '/var/lib/octavia/123/haproxy.cfg.new'
m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open
# happy case upstart file exists
with mock.patch('os.open') as mock_open, mock.patch.object(
os, 'fdopen', m) as mock_fdopen:
mock_open.return_value = 123
if distro == consts.UBUNTU:
rv = self.ubuntu_app.put('/' + api_server.VERSION +
'/listeners/amp_123/123/haproxy',
data='test')
elif distro == consts.CENTOS:
rv = self.centos_app.put('/' + api_server.VERSION +
'/listeners/amp_123/123/haproxy',
data='test')
mode = stat.S_IRUSR | stat.S_IWUSR
mock_open.assert_called_with(file_name, flags, mode)
mock_fdopen.assert_called_with(123, 'w')
self.assertEqual(202, rv.status_code)
m().write.assert_called_once_with('test')
mock_subprocess.assert_any_call(
"haproxy -c -L {peer} -f {config_file} -f {haproxy_ug}".format(
config_file=file_name,
haproxy_ug=consts.HAPROXY_USER_GROUP_CFG,
peer=(octavia_utils.
base64_sha1_string('amp_123').rstrip('='))).split(),
stderr=-2)
mock_rename.assert_called_with(
'/var/lib/octavia/123/haproxy.cfg.new',
'/var/lib/octavia/123/haproxy.cfg')
if init_system == consts.INIT_SYSTEMD:
mock_subprocess.assert_any_call(
"systemctl enable haproxy-123".split(),
stderr=subprocess.STDOUT)
elif init_system == consts.INIT_SYSVINIT:
mock_subprocess.assert_any_call(
"insserv /etc/init.d/haproxy-123".split(),
stderr=subprocess.STDOUT)
else:
self.assertIn(init_system, consts.VALID_INIT_SYSTEMS)
# exception writing
m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open
m.side_effect = IOError() # open crashes
with mock.patch('os.open'), mock.patch.object(os, 'fdopen', m):
if distro == consts.UBUNTU:
rv = self.ubuntu_app.put('/' + api_server.VERSION +
'/listeners/amp_123/123/haproxy',
data='test')
elif distro == consts.CENTOS:
rv = self.centos_app.put('/' + api_server.VERSION +
'/listeners/amp_123/123/haproxy',
data='test')
self.assertEqual(500, rv.status_code)
# check if files get created
mock_exists.return_value = False
if init_system == consts.INIT_SYSTEMD:
init_path = consts.SYSTEMD_DIR + '/haproxy-123.service'
elif init_system == consts.INIT_UPSTART:
init_path = consts.UPSTART_DIR + '/haproxy-123.conf'
elif init_system == consts.INIT_SYSVINIT:
init_path = consts.SYSVINIT_DIR + '/haproxy-123'
else:
self.assertIn(init_system, consts.VALID_INIT_SYSTEMS)
m = self.useFixture(test_utils.OpenFixture(init_path)).mock_open
# happy case upstart file exists
with mock.patch('os.open') as mock_open, mock.patch.object(
os, 'fdopen', m) as mock_fdopen:
mock_open.return_value = 123
if distro == consts.UBUNTU:
rv = self.ubuntu_app.put('/' + api_server.VERSION +
'/listeners/amp_123/123/haproxy',
data='test')
elif distro == consts.CENTOS:
rv = self.centos_app.put('/' + api_server.VERSION +
'/listeners/amp_123/123/haproxy',
data='test')
self.assertEqual(202, rv.status_code)
if init_system == consts.INIT_SYSTEMD:
mode = (stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP |
stat.S_IROTH)
else:
mode = (stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP |
stat.S_IROTH | stat.S_IXOTH)
mock_open.assert_called_with(init_path, flags, mode)
mock_fdopen.assert_called_with(123, 'w')
handle = mock_fdopen()
handle.write.assert_any_call('test')
# skip the template stuff
mock_makedirs.assert_called_with('/var/lib/octavia/123')
# unhappy case haproxy check fails
mock_exists.return_value = True
mock_subprocess.side_effect = [subprocess.CalledProcessError(
7, 'test', RANDOM_ERROR)]
with mock.patch('os.open') as mock_open, mock.patch.object(
os, 'fdopen', m) as mock_fdopen:
mock_open.return_value = 123
if distro == consts.UBUNTU:
rv = self.ubuntu_app.put('/' + api_server.VERSION +
'/listeners/amp_123/123/haproxy',
data='test')
elif distro == consts.CENTOS:
rv = self.centos_app.put('/' + api_server.VERSION +
'/listeners/amp_123/123/haproxy',
data='test')
self.assertEqual(400, rv.status_code)
self.assertEqual(
{'message': 'Invalid request', u'details': u'random error'},
json.loads(rv.data.decode('utf-8')))
mode = stat.S_IRUSR | stat.S_IWUSR
mock_open.assert_called_with(file_name, flags, mode)
mock_fdopen.assert_called_with(123, 'w')
handle = mock_fdopen()
handle.write.assert_called_with('test')
mock_subprocess.assert_called_with(
"haproxy -c -L {peer} -f {config_file} -f {haproxy_ug}".format(
config_file=file_name,
haproxy_ug=consts.HAPROXY_USER_GROUP_CFG,
peer=(octavia_utils.
base64_sha1_string('amp_123').rstrip('='))).split(),
stderr=-2)
mock_rename.assert_called_with(
'/var/lib/octavia/123/haproxy.cfg.new',
'/var/lib/octavia/123/haproxy.cfg.new-failed')
# unhappy path with bogus init system
mock_init_system.return_value = 'bogus'
with mock.patch('os.open') as mock_open, mock.patch.object(
os, 'fdopen', m) as mock_fdopen:
mock_open.return_value = 123
if distro == consts.UBUNTU:
rv = self.ubuntu_app.put('/' + api_server.VERSION +
'/listeners/amp_123/123/haproxy',
data='test')
elif distro == consts.CENTOS:
rv = self.ubuntu_app.put('/' + api_server.VERSION +
'/listeners/amp_123/123/haproxy',
data='test')
self.assertEqual(500, rv.status_code)
def test_ubuntu_start(self):
self._test_start(consts.UBUNTU)
def test_centos_start(self):
self._test_start(consts.CENTOS)
@mock.patch('os.path.exists')
@mock.patch('octavia.amphorae.backends.agent.api_server.listener.Listener.'
'vrrp_check_script_update')
@mock.patch('subprocess.check_output')
def _test_start(self, distro, mock_subprocess, mock_vrrp, mock_exists):
self.assertIn(distro, [consts.UBUNTU, consts.CENTOS])
if distro == consts.UBUNTU:
rv = self.ubuntu_app.put('/' + api_server.VERSION +
'/listeners/123/error')
elif distro == consts.CENTOS:
rv = self.centos_app.put('/' + api_server.VERSION +
'/listeners/123/error')
self.assertEqual(400, rv.status_code)
self.assertEqual(
{'message': 'Invalid Request',
'details': 'Unknown action: error', },
json.loads(rv.data.decode('utf-8')))
mock_exists.return_value = False
if distro == consts.UBUNTU:
rv = self.ubuntu_app.put('/' + api_server.VERSION +
'/listeners/123/start')
elif distro == consts.CENTOS:
rv = self.centos_app.put('/' + api_server.VERSION +
'/listeners/123/start')
self.assertEqual(404, rv.status_code)
self.assertEqual(
{'message': 'Listener Not Found',
'details': 'No listener with UUID: 123'},
json.loads(rv.data.decode('utf-8')))
mock_exists.assert_called_with('/var/lib/octavia/123/haproxy.cfg')
mock_exists.return_value = True
if distro == consts.UBUNTU:
rv = self.ubuntu_app.put('/' + api_server.VERSION +
'/listeners/123/start')
elif distro == consts.CENTOS:
rv = self.centos_app.put('/' + api_server.VERSION +
'/listeners/123/start')
self.assertEqual(202, rv.status_code)
self.assertEqual(
{'message': 'OK',
'details': 'Configuration file is valid\nhaproxy daemon for'
' 123 started'},
json.loads(rv.data.decode('utf-8')))
mock_subprocess.assert_called_with(
['/usr/sbin/service', 'haproxy-123', 'start'], stderr=-2)
mock_exists.return_value = True
mock_subprocess.side_effect = subprocess.CalledProcessError(
7, 'test', RANDOM_ERROR)
if distro == consts.UBUNTU:
rv = self.ubuntu_app.put('/' + api_server.VERSION +
'/listeners/123/start')
elif distro == consts.CENTOS:
rv = self.centos_app.put('/' + api_server.VERSION +
'/listeners/123/start')
self.assertEqual(500, rv.status_code)
self.assertEqual(
{
'message': 'Error starting haproxy',
'details': RANDOM_ERROR,
}, json.loads(rv.data.decode('utf-8')))
mock_subprocess.assert_called_with(
['/usr/sbin/service', 'haproxy-123', 'start'], stderr=-2)
def test_ubuntu_reload(self):
self._test_reload(consts.UBUNTU)
def test_centos_reload(self):
self._test_reload(consts.CENTOS)
@mock.patch('os.path.exists')
@mock.patch('octavia.amphorae.backends.agent.api_server.listener.Listener.'
'vrrp_check_script_update')
@mock.patch('octavia.amphorae.backends.agent.api_server.listener.Listener.'
'_check_haproxy_status')
@mock.patch('subprocess.check_output')
def _test_reload(self, distro, mock_subprocess, mock_haproxy_status,
mock_vrrp, mock_exists):
self.assertIn(distro, [consts.UBUNTU, consts.CENTOS])
# Process running so reload
mock_exists.return_value = True
mock_haproxy_status.return_value = consts.ACTIVE
if distro == consts.UBUNTU:
rv = self.ubuntu_app.put('/' + api_server.VERSION +
'/listeners/123/reload')
elif distro == consts.CENTOS:
rv = self.centos_app.put('/' + api_server.VERSION +
'/listeners/123/reload')
self.assertEqual(202, rv.status_code)
self.assertEqual(
{'message': 'OK',
'details': 'Listener 123 reloaded'},
json.loads(rv.data.decode('utf-8')))
mock_subprocess.assert_called_with(
['/usr/sbin/service', 'haproxy-123', 'reload'], stderr=-2)
# Process not running so start
mock_exists.return_value = True
mock_haproxy_status.return_value = consts.OFFLINE
if distro == consts.UBUNTU:
rv = self.ubuntu_app.put('/' + api_server.VERSION +
'/listeners/123/reload')
elif distro == consts.CENTOS:
rv = self.centos_app.put('/' + api_server.VERSION +
'/listeners/123/reload')
self.assertEqual(202, rv.status_code)
self.assertEqual(
{'message': 'OK',
'details': 'Configuration file is valid\nhaproxy daemon for'
' 123 started'},
json.loads(rv.data.decode('utf-8')))
mock_subprocess.assert_called_with(
['/usr/sbin/service', 'haproxy-123', 'start'], stderr=-2)
def test_ubuntu_info(self):
self._test_info(consts.UBUNTU)
def test_centos_info(self):
self._test_info(consts.CENTOS)
@mock.patch('socket.gethostname')
@mock.patch('subprocess.check_output')
def _test_info(self, distro, mock_subbprocess, mock_hostname):
self.assertIn(distro, [consts.UBUNTU, consts.CENTOS])
mock_hostname.side_effect = ['test-host']
mock_subbprocess.side_effect = [
b"""Package: haproxy
Status: install ok installed
Priority: optional
Section: net
Installed-Size: 803
Maintainer: Ubuntu Developers
Architecture: amd64
Version: 9.9.99-9
"""]
if distro == consts.UBUNTU:
rv = self.ubuntu_app.get('/' + api_server.VERSION + '/info')
elif distro == consts.CENTOS:
rv = self.centos_app.get('/' + api_server.VERSION + '/info')
self.assertEqual(200, rv.status_code)
self.assertEqual(dict(
api_version='0.5',
haproxy_version='9.9.99-9',
hostname='test-host'),
json.loads(rv.data.decode('utf-8')))
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
'get_os_init_system', return_value=consts.INIT_SYSTEMD)
def test_delete_ubuntu_listener_systemd(self, mock_init_system):
self._test_delete_listener(consts.INIT_SYSTEMD, consts.UBUNTU,
mock_init_system)
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
'get_os_init_system', return_value=consts.INIT_SYSTEMD)
def test_delete_centos_listener_systemd(self, mock_init_system):
self._test_delete_listener(consts.INIT_SYSTEMD, consts.CENTOS,
mock_init_system)
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
'get_os_init_system', return_value=consts.INIT_SYSVINIT)
def test_delete_ubuntu_listener_sysvinit(self, mock_init_system):
self._test_delete_listener(consts.INIT_SYSVINIT, consts.UBUNTU,
mock_init_system)
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
'get_os_init_system', return_value=consts.INIT_UPSTART)
def test_delete_ubuntu_listener_upstart(self, mock_init_system):
self._test_delete_listener(consts.INIT_UPSTART, consts.UBUNTU,
mock_init_system)
@mock.patch('os.path.exists')
@mock.patch('subprocess.check_output')
@mock.patch('octavia.amphorae.backends.agent.api_server.util.' +
'get_haproxy_pid')
@mock.patch('shutil.rmtree')
@mock.patch('os.remove')
def _test_delete_listener(self, init_system, distro, mock_init_system,
mock_remove, mock_rmtree, mock_pid,
mock_check_output, mock_exists):
self.assertIn(distro, [consts.UBUNTU, consts.CENTOS])
mock_exists.return_value = False
if distro == consts.UBUNTU:
rv = self.ubuntu_app.delete('/' + api_server.VERSION +
'/listeners/123')
elif distro == consts.CENTOS:
rv = self.centos_app.delete('/' + api_server.VERSION +
'/listeners/123')
self.assertEqual(404, rv.status_code)
self.assertEqual(
{'message': 'Listener Not Found',
'details': 'No listener with UUID: 123'},
json.loads(rv.data.decode('utf-8')))
mock_exists.assert_called_with('/var/lib/octavia/123/haproxy.cfg')
# service is stopped + no upstart script
mock_exists.side_effect = [True, False, False]
if distro == consts.UBUNTU:
rv = self.ubuntu_app.delete('/' + api_server.VERSION +
'/listeners/123')
elif distro == consts.CENTOS:
rv = self.centos_app.delete('/' + api_server.VERSION +
'/listeners/123')
self.assertEqual(200, rv.status_code)
self.assertEqual({u'message': u'OK'},
json.loads(rv.data.decode('utf-8')))
mock_rmtree.assert_called_with('/var/lib/octavia/123')
if init_system == consts.INIT_SYSTEMD:
mock_exists.assert_called_with(consts.SYSTEMD_DIR +
'/haproxy-123.service')
elif init_system == consts.INIT_UPSTART:
mock_exists.assert_called_with(consts.UPSTART_DIR +
'/haproxy-123.conf')
elif init_system == consts.INIT_SYSVINIT:
mock_exists.assert_called_with(consts.SYSVINIT_DIR +
'/haproxy-123')
else:
self.assertIn(init_system, consts.VALID_INIT_SYSTEMS)
mock_exists.assert_any_call('/var/lib/octavia/123/123.pid')
# service is stopped + upstart script
mock_exists.side_effect = [True, False, True]
if distro == consts.UBUNTU:
rv = self.ubuntu_app.delete('/' + api_server.VERSION +
'/listeners/123')
elif distro == consts.CENTOS:
rv = self.centos_app.delete('/' + api_server.VERSION +
'/listeners/123')
self.assertEqual(200, rv.status_code)
self.assertEqual({u'message': u'OK'},
json.loads(rv.data.decode('utf-8')))
if init_system == consts.INIT_SYSTEMD:
mock_remove.assert_called_with(consts.SYSTEMD_DIR +
'/haproxy-123.service')
elif init_system == consts.INIT_UPSTART:
mock_remove.assert_called_with(consts.UPSTART_DIR +
'/haproxy-123.conf')
elif init_system == consts.INIT_SYSVINIT:
mock_remove.assert_called_with(consts.SYSVINIT_DIR +
'/haproxy-123')
else:
self.assertIn(init_system, consts.VALID_INIT_SYSTEMS)
# service is running + upstart script
mock_exists.side_effect = [True, True, True, True]
mock_pid.return_value = '456'
if distro == consts.UBUNTU:
rv = self.ubuntu_app.delete('/' + api_server.VERSION +
'/listeners/123')
elif distro == consts.CENTOS:
rv = self.centos_app.delete('/' + api_server.VERSION +
'/listeners/123')
self.assertEqual(200, rv.status_code)
self.assertEqual({u'message': u'OK'},
json.loads(rv.data.decode('utf-8')))
mock_pid.assert_called_once_with('123')
mock_check_output.assert_any_call(
['/usr/sbin/service', 'haproxy-123', 'stop'], stderr=-2)
if init_system == consts.INIT_SYSTEMD:
mock_check_output.assert_any_call(
"systemctl disable haproxy-123".split(),
stderr=subprocess.STDOUT)
elif init_system == consts.INIT_UPSTART:
mock_remove.assert_any_call(consts.UPSTART_DIR +
'/haproxy-123.conf')
elif init_system == consts.INIT_SYSVINIT:
mock_check_output.assert_any_call(
"insserv -r /etc/init.d/haproxy-123".split(),
stderr=subprocess.STDOUT)
else:
self.assertIn(init_system, consts.VALID_INIT_SYSTEMS)
# service is running + stopping fails
mock_exists.side_effect = [True, True, True]
mock_check_output.side_effect = subprocess.CalledProcessError(
7, 'test', RANDOM_ERROR)
if distro == consts.UBUNTU:
rv = self.ubuntu_app.delete('/' + api_server.VERSION +
'/listeners/123')
elif distro == consts.CENTOS:
rv = self.centos_app.delete('/' + api_server.VERSION +
'/listeners/123')
self.assertEqual(500, rv.status_code)
self.assertEqual(
{'details': 'random error', 'message': 'Error stopping haproxy'},
json.loads(rv.data.decode('utf-8')))
# that's the last call before exception
mock_exists.assert_called_with('/proc/456')
def test_ubuntu_get_haproxy(self):
self._test_get_haproxy(consts.UBUNTU)
def test_centos_get_haproxy(self):
self._test_get_haproxy(consts.CENTOS)
@mock.patch('os.path.exists')
def _test_get_haproxy(self, distro, mock_exists):
self.assertIn(distro, [consts.UBUNTU, consts.CENTOS])
CONTENT = "bibble\nbibble"
mock_exists.side_effect = [False]
if distro == consts.UBUNTU:
rv = self.ubuntu_app.get('/' + api_server.VERSION +
'/listeners/123/haproxy')
elif distro == consts.CENTOS:
rv = self.centos_app.get('/' + api_server.VERSION +
'/listeners/123/haproxy')
self.assertEqual(404, rv.status_code)
mock_exists.side_effect = [True]
path = util.config_path('123')
self.useFixture(test_utils.OpenFixture(path, CONTENT))
if distro == consts.UBUNTU:
rv = self.ubuntu_app.get('/' + api_server.VERSION +
'/listeners/123/haproxy')
elif distro == consts.CENTOS:
rv = self.centos_app.get('/' + api_server.VERSION +
'/listeners/123/haproxy')
self.assertEqual(200, rv.status_code)
self.assertEqual(six.b(CONTENT), rv.data)
self.assertEqual('text/plain; charset=utf-8',
rv.headers['Content-Type'].lower())
def test_ubuntu_get_all_listeners(self):
self._test_get_all_listeners(consts.UBUNTU)
def test_get_all_listeners(self):
self._test_get_all_listeners(consts.CENTOS)
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
'get_listeners')
@mock.patch('octavia.amphorae.backends.agent.api_server.listener.Listener.'
'_check_listener_status')
@mock.patch('octavia.amphorae.backends.agent.api_server.listener.Listener.'
'_parse_haproxy_file')
def _test_get_all_listeners(self, distro, mock_parse, mock_status,
mock_listener):
self.assertIn(distro, [consts.UBUNTU, consts.CENTOS])
# no listeners
mock_listener.side_effect = [[]]
if distro == consts.UBUNTU:
rv = self.ubuntu_app.get('/' + api_server.VERSION + '/listeners')
elif distro == consts.CENTOS:
rv = self.centos_app.get('/' + api_server.VERSION + '/listeners')
self.assertEqual(200, rv.status_code)
self.assertFalse(json.loads(rv.data.decode('utf-8')))
# one listener ACTIVE
mock_listener.side_effect = [['123']]
mock_parse.side_effect = [{'mode': 'test'}]
mock_status.side_effect = [consts.ACTIVE]
if distro == consts.UBUNTU:
rv = self.ubuntu_app.get('/' + api_server.VERSION + '/listeners')
elif distro == consts.CENTOS:
rv = self.centos_app.get('/' + api_server.VERSION + '/listeners')
self.assertEqual(200, rv.status_code)
self.assertEqual(
[{'status': consts.ACTIVE, 'type': 'test', 'uuid': '123'}],
json.loads(rv.data.decode('utf-8')))
# two listener one ACTIVE, one ERROR
mock_listener.side_effect = [['123', '456']]
mock_parse.side_effect = [{'mode': 'test'}, {'mode': 'http'}]
mock_status.side_effect = [consts.ACTIVE, consts.ERROR]
if distro == consts.UBUNTU:
rv = self.ubuntu_app.get('/' + api_server.VERSION + '/listeners')
elif distro == consts.CENTOS:
rv = self.centos_app.get('/' + api_server.VERSION + '/listeners')
self.assertEqual(200, rv.status_code)
self.assertEqual(
[{'status': consts.ACTIVE, 'type': 'test', 'uuid': '123'},
{'status': consts.ERROR, 'type': '', 'uuid': '456'}],
json.loads(rv.data.decode('utf-8')))
def test_ubuntu_get_listener(self):
self._test_get_listener(consts.UBUNTU)
def test_centos_get_listener(self):
self._test_get_listener(consts.CENTOS)
@mock.patch('octavia.amphorae.backends.agent.api_server.listener.Listener.'
'_check_listener_status')
@mock.patch('octavia.amphorae.backends.agent.api_server.listener.Listener.'
'_parse_haproxy_file')
@mock.patch('octavia.amphorae.backends.utils.haproxy_query.HAProxyQuery')
@mock.patch('os.path.exists')
def _test_get_listener(self, distro, mock_exists, mock_query, mock_parse,
mock_status):
self.assertIn(distro, [consts.UBUNTU, consts.CENTOS])
# Listener not found
mock_exists.side_effect = [False]
if distro == consts.UBUNTU:
rv = self.ubuntu_app.get('/' + api_server.VERSION +
'/listeners/123')
elif distro == consts.CENTOS:
rv = self.centos_app.get('/' + api_server.VERSION +
'/listeners/123')
self.assertEqual(404, rv.status_code)
self.assertEqual(
{'message': 'Listener Not Found',
'details': 'No listener with UUID: 123'},
json.loads(rv.data.decode('utf-8')))
# Listener not ACTIVE
mock_parse.side_effect = [dict(mode='test')]
mock_status.side_effect = [consts.ERROR]
mock_exists.side_effect = [True]
if distro == consts.UBUNTU:
rv = self.ubuntu_app.get('/' + api_server.VERSION +
'/listeners/123')
elif distro == consts.CENTOS:
rv = self.centos_app.get('/' + api_server.VERSION +
'/listeners/123')
self.assertEqual(200, rv.status_code)
self.assertEqual(dict(
status=consts.ERROR,
type='',
uuid='123'), json.loads(rv.data.decode('utf-8')))
# Listener ACTIVE
mock_parse.side_effect = [dict(mode='test', stats_socket='blah')]
mock_status.side_effect = [consts.ACTIVE]
mock_exists.side_effect = [True]
mock_pool = mock.Mock()
mock_query.side_effect = [mock_pool]
mock_pool.get_pool_status.side_effect = [
{'tcp-servers': {
'status': 'DOWN',
'uuid': 'tcp-servers',
'members': [
{'id-34833': 'DOWN'},
{'id-34836': 'DOWN'}]}}]
if distro == consts.UBUNTU:
rv = self.ubuntu_app.get('/' + api_server.VERSION +
'/listeners/123')
elif distro == consts.CENTOS:
rv = self.centos_app.get('/' + api_server.VERSION +
'/listeners/123')
self.assertEqual(200, rv.status_code)
self.assertEqual(dict(
status=consts.ACTIVE,
type='test',
uuid='123',
pools=[dict(
status=consts.DOWN,
uuid='tcp-servers',
members=[
{u'id-34833': u'DOWN'},
{u'id-34836': u'DOWN'}])]),
json.loads(rv.data.decode('utf-8')))
def test_ubuntu_delete_cert(self):
self._test_delete_cert(consts.UBUNTU)
def test_centos_delete_cert(self):
self._test_delete_cert(consts.CENTOS)
@mock.patch('os.path.exists')
@mock.patch('os.remove')
def _test_delete_cert(self, distro, mock_remove, mock_exists):
self.assertIn(distro, [consts.UBUNTU, consts.CENTOS])
mock_exists.side_effect = [False]
if distro == consts.UBUNTU:
rv = self.ubuntu_app.delete('/' + api_server.VERSION +
'/listeners/123/certificates/test.pem')
elif distro == consts.CENTOS:
rv = self.centos_app.delete('/' + api_server.VERSION +
'/listeners/123/certificates/test.pem')
self.assertEqual(404, rv.status_code)
self.assertEqual(dict(
details='No certificate with filename: test.pem',
message='Certificate Not Found'),
json.loads(rv.data.decode('utf-8')))
mock_exists.assert_called_once_with(
'/var/lib/octavia/certs/123/test.pem')
# wrong file name
mock_exists.side_effect = [True]
if distro == consts.UBUNTU:
rv = self.ubuntu_app.delete('/' + api_server.VERSION +
'/listeners/123/certificates/test.bla')
elif distro == consts.CENTOS:
rv = self.centos_app.delete('/' + api_server.VERSION +
'/listeners/123/certificates/test.bla')
self.assertEqual(400, rv.status_code)
mock_exists.side_effect = [True]
if distro == consts.UBUNTU:
rv = self.ubuntu_app.delete('/' + api_server.VERSION +
'/listeners/123/certificates/test.pem')
elif distro == consts.CENTOS:
rv = self.centos_app.delete('/' + api_server.VERSION +
'/listeners/123/certificates/test.pem')
self.assertEqual(200, rv.status_code)
self.assertEqual(OK, json.loads(rv.data.decode('utf-8')))
mock_remove.assert_called_once_with(
'/var/lib/octavia/certs/123/test.pem')
def test_ubuntu_get_certificate_md5(self):
self._test_get_certificate_md5(consts.UBUNTU)
def test_centos_get_certificate_md5(self):
self._test_get_certificate_md5(consts.CENTOS)
@mock.patch('os.path.exists')
def _test_get_certificate_md5(self, distro, mock_exists):
self.assertIn(distro, [consts.UBUNTU, consts.CENTOS])
CONTENT = "TestTest"
mock_exists.side_effect = [False]
if distro == consts.UBUNTU:
rv = self.ubuntu_app.get('/' + api_server.VERSION +
'/listeners/123/certificates/test.pem')
elif distro == consts.CENTOS:
rv = self.centos_app.get('/' + api_server.VERSION +
'/listeners/123/certificates/test.pem')
self.assertEqual(404, rv.status_code)
self.assertEqual(dict(
details='No certificate with filename: test.pem',
message='Certificate Not Found'),
json.loads(rv.data.decode('utf-8')))
mock_exists.assert_called_with('/var/lib/octavia/certs/123/test.pem')
# wrong file name
mock_exists.side_effect = [True]
if distro == consts.UBUNTU:
rv = self.ubuntu_app.put('/' + api_server.VERSION +
'/listeners/123/certificates/test.bla',
data='TestTest')
elif distro == consts.CENTOS:
rv = self.centos_app.put('/' + api_server.VERSION +
'/listeners/123/certificates/test.bla',
data='TestTest')
self.assertEqual(400, rv.status_code)
mock_exists.return_value = True
mock_exists.side_effect = None
if distro == consts.UBUNTU:
path = self.ubuntu_test_server._listener._cert_file_path(
'123', 'test.pem')
elif distro == consts.CENTOS:
path = self.centos_test_server._listener._cert_file_path(
'123', 'test.pem')
self.useFixture(test_utils.OpenFixture(path, CONTENT))
if distro == consts.UBUNTU:
rv = self.ubuntu_app.get('/' + api_server.VERSION +
'/listeners/123/certificates/test.pem')
elif distro == consts.CENTOS:
rv = self.centos_app.get('/' + api_server.VERSION +
'/listeners/123/certificates/test.pem')
self.assertEqual(200, rv.status_code)
self.assertEqual(dict(md5sum=hashlib.md5(six.b(CONTENT)).hexdigest()),
json.loads(rv.data.decode('utf-8')))
def test_ubuntu_upload_certificate_md5(self):
self._test_upload_certificate_md5(consts.UBUNTU)
def test_centos_upload_certificate_md5(self):
self._test_upload_certificate_md5(consts.CENTOS)
@mock.patch('os.path.exists')
@mock.patch('os.makedirs')
def _test_upload_certificate_md5(self, distro, mock_makedir, mock_exists):
self.assertIn(distro, [consts.UBUNTU, consts.CENTOS])
# wrong file name
if distro == consts.UBUNTU:
rv = self.ubuntu_app.put('/' + api_server.VERSION +
'/listeners/123/certificates/test.bla',
data='TestTest')
elif distro == consts.CENTOS:
rv = self.centos_app.put('/' + api_server.VERSION +
'/listeners/123/certificates/test.bla',
data='TestTest')
self.assertEqual(400, rv.status_code)
mock_exists.return_value = True
if distro == consts.UBUNTU:
path = self.ubuntu_test_server._listener._cert_file_path(
'123', 'test.pem')
elif distro == consts.CENTOS:
path = self.centos_test_server._listener._cert_file_path(
'123', 'test.pem')
m = self.useFixture(test_utils.OpenFixture(path)).mock_open
with mock.patch('os.open'), mock.patch.object(os, 'fdopen', m):
if distro == consts.UBUNTU:
rv = self.ubuntu_app.put('/' + api_server.VERSION +
'/listeners/123/certificates/'
'test.pem', data='TestTest')
elif distro == consts.CENTOS:
rv = self.centos_app.put('/' + api_server.VERSION +
'/listeners/123/certificates/'
'test.pem', data='TestTest')
self.assertEqual(200, rv.status_code)
self.assertEqual(OK, json.loads(rv.data.decode('utf-8')))
handle = m()
handle.write.assert_called_once_with(six.b('TestTest'))
mock_exists.return_value = False
m = self.useFixture(test_utils.OpenFixture(path)).mock_open
with mock.patch('os.open'), mock.patch.object(os, 'fdopen', m):
if distro == consts.UBUNTU:
rv = self.ubuntu_app.put('/' + api_server.VERSION +
'/listeners/123/certificates/'
'test.pem', data='TestTest')
elif distro == consts.CENTOS:
rv = self.centos_app.put('/' + api_server.VERSION +
'/listeners/123/certificates/'
'test.pem', data='TestTest')
self.assertEqual(200, rv.status_code)
self.assertEqual(OK, json.loads(rv.data.decode('utf-8')))
handle = m()
handle.write.assert_called_once_with(six.b('TestTest'))
mock_makedir.assert_called_once_with('/var/lib/octavia/certs/123')
def test_ubuntu_upload_server_certificate(self):
self._test_upload_server_certificate(consts.UBUNTU)
def test_centos_upload_server_certificate(self):
self._test_upload_server_certificate(consts.CENTOS)
def _test_upload_server_certificate(self, distro):
certificate_update.BUFFER = 5 # test the while loop
path = '/etc/octavia/certs/server.pem'
m = self.useFixture(test_utils.OpenFixture(path)).mock_open
with mock.patch('os.open'), mock.patch.object(os, 'fdopen', m):
if distro == consts.UBUNTU:
rv = self.ubuntu_app.put('/' + api_server.VERSION +
'/certificate', data='TestTest')
elif distro == consts.CENTOS:
rv = self.centos_app.put('/' + api_server.VERSION +
'/certificate', data='TestTest')
self.assertEqual(202, rv.status_code)
self.assertEqual(OK, json.loads(rv.data.decode('utf-8')))
handle = m()
handle.write.assert_any_call(six.b('TestT'))
handle.write.assert_any_call(six.b('est'))
def test_ubuntu_plug_network(self):
self._test_plug_network(consts.UBUNTU)
self.conf.config(group="amphora_agent",
agent_server_network_file="/path/to/interfaces_file")
self._test_plug_network(consts.UBUNTU)
def test_centos_plug_network(self):
self._test_plug_network(consts.CENTOS)
self.conf.config(group="amphora_agent",
agent_server_network_file="/path/to/interfaces_file")
self._test_plug_network(consts.CENTOS)
@mock.patch('netifaces.interfaces')
@mock.patch('netifaces.ifaddresses')
@mock.patch('pyroute2.IPRoute')
@mock.patch('pyroute2.NetNS')
@mock.patch('subprocess.check_output')
@mock.patch('octavia.amphorae.backends.agent.api_server.'
'plug.Plug._netns_interface_exists')
def _test_plug_network(self, distro, mock_int_exists, mock_check_output,
mock_netns, mock_pyroute2, mock_ifaddress,
mock_interfaces):
self.assertIn(distro, [consts.UBUNTU, consts.CENTOS])
port_info = {'mac_address': '123'}
test_int_num = random.randint(0, 9999)
mock_int_exists.return_value = False
netns_handle = mock_netns.return_value.__enter__.return_value
netns_handle.get_links.return_value = [0] * test_int_num
test_int_num = str(test_int_num)
# Interface already plugged
mock_int_exists.return_value = True
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
"/plug/network",
content_type='application/json',
data=json.dumps(port_info))
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
"/plug/network",
content_type='application/json',
data=json.dumps(port_info))
self.assertEqual(409, rv.status_code)
self.assertEqual(dict(message="Interface already exists"),
json.loads(rv.data.decode('utf-8')))
mock_int_exists.return_value = False
# No interface at all
mock_interfaces.side_effect = [[]]
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
"/plug/network",
content_type='application/json',
data=json.dumps(port_info))
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
"/plug/network",
content_type='application/json',
data=json.dumps(port_info))
self.assertEqual(404, rv.status_code)
self.assertEqual(dict(details="No suitable network interface found"),
json.loads(rv.data.decode('utf-8')))
# No interface down
mock_interfaces.side_effect = [['blah']]
mock_ifaddress.side_effect = [[netifaces.AF_INET]]
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
"/plug/network",
content_type='application/json',
data=json.dumps(port_info))
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
"/plug/network",
content_type='application/json',
data=json.dumps(port_info))
self.assertEqual(404, rv.status_code)
self.assertEqual(dict(details="No suitable network interface found"),
json.loads(rv.data.decode('utf-8')))
mock_ifaddress.assert_called_once_with('blah')
# One Interface down, Happy Path
mock_interfaces.side_effect = [['blah']]
mock_ifaddress.side_effect = [[netifaces.AF_LINK],
{netifaces.AF_LINK: [{'addr': '123'}]}]
mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
if self.conf.conf.amphora_agent.agent_server_network_file:
file_name = self.conf.conf.amphora_agent.agent_server_network_file
flags = os.O_WRONLY | os.O_CREAT | os.O_APPEND
elif distro == consts.UBUNTU:
file_name = ('/etc/netns/{0}/network/interfaces.d/'
'eth{1}.cfg'.format(consts.AMPHORA_NAMESPACE,
test_int_num))
flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
elif distro == consts.CENTOS:
file_name = ('/etc/netns/{0}/sysconfig/network-scripts/'
'ifcfg-eth{1}'.format(consts.AMPHORA_NAMESPACE,
test_int_num))
flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open
with mock.patch('os.open') as mock_open, mock.patch.object(
os, 'fdopen', m) as mock_fdopen:
mock_open.return_value = 123
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
"/plug/network",
content_type='application/json',
data=json.dumps(port_info))
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
"/plug/network",
content_type='application/json',
data=json.dumps(port_info))
self.assertEqual(202, rv.status_code)
mock_open.assert_any_call(file_name, flags, mode)
mock_fdopen.assert_any_call(123, 'w')
plug_inf_file = '/var/lib/octavia/plugged_interfaces'
flags = os.O_RDWR | os.O_CREAT
mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
mock_open.assert_any_call(plug_inf_file, flags, mode)
mock_fdopen.assert_any_call(123, 'r+')
handle = m()
if distro == consts.UBUNTU:
handle.write.assert_any_call(
'\n# Generated by Octavia agent\n'
'auto eth{int}\n'
'iface eth{int} inet dhcp\n'
'auto eth{int}:0\n'
'iface eth{int}:0 inet6 auto\n'.format(int=test_int_num))
elif distro == consts.CENTOS:
handle.write.assert_any_call(
'\n# Generated by Octavia agent\n'
'NM_CONTROLLED="no"\n'
'DEVICE="eth{int}"\n'
'ONBOOT="yes"\n'
'TYPE="Ethernet"\n'
'USERCTL="yes"\n'
'IPV6INIT="no"\n'
'BOOTPROTO="dhcp"\n'
'PERSISTENT_DHCLIENT="1"\n'.format(int=test_int_num))
mock_check_output.assert_called_with(
['ip', 'netns', 'exec', consts.AMPHORA_NAMESPACE,
'ifup', 'eth' + test_int_num], stderr=-2)
# fixed IPs happy path
port_info = {'mac_address': '123', 'mtu': 1450, 'fixed_ips': [
{'ip_address': '10.0.0.5', 'subnet_cidr': '10.0.0.0/24'}]}
mock_interfaces.side_effect = [['blah']]
mock_ifaddress.side_effect = [[netifaces.AF_LINK],
{netifaces.AF_LINK: [{'addr': '123'}]}]
mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
if self.conf.conf.amphora_agent.agent_server_network_file:
file_name = self.conf.conf.amphora_agent.agent_server_network_file
flags = os.O_WRONLY | os.O_CREAT | os.O_APPEND
elif distro == consts.UBUNTU:
file_name = ('/etc/netns/{0}/network/interfaces.d/'
'eth{1}.cfg'.format(consts.AMPHORA_NAMESPACE,
test_int_num))
flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
elif distro == consts.CENTOS:
file_name = ('/etc/netns/{0}/sysconfig/network-scripts/'
'ifcfg-eth{1}'.format(consts.AMPHORA_NAMESPACE,
test_int_num))
flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open
with mock.patch('os.open') as mock_open, mock.patch.object(
os, 'fdopen', m) as mock_fdopen:
mock_open.return_value = 123
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
"/plug/network",
content_type='application/json',
data=json.dumps(port_info))
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
"/plug/network",
content_type='application/json',
data=json.dumps(port_info))
self.assertEqual(202, rv.status_code)
mock_open.assert_any_call(file_name, flags, mode)
mock_fdopen.assert_any_call(123, 'w')
plug_inf_file = '/var/lib/octavia/plugged_interfaces'
flags = os.O_RDWR | os.O_CREAT
mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
mock_open.assert_any_call(plug_inf_file, flags, mode)
mock_fdopen.assert_any_call(123, 'r+')
handle = m()
if distro == consts.UBUNTU:
handle.write.assert_any_call(
'\n\n# Generated by Octavia agent\n'
'auto eth{int}\n'
'iface eth{int} inet static\n'
'address 10.0.0.5\nbroadcast 10.0.0.255\n'
'netmask 255.255.255.0\n'
'mtu 1450\n'.format(int=test_int_num))
elif distro == consts.CENTOS:
handle.write.assert_any_call(
'\n\n# Generated by Octavia agent\n'
'NM_CONTROLLED="no"\n'
'DEVICE="eth{int}"\n'
'ONBOOT="yes"\n'
'TYPE="Ethernet"\n'
'USERCTL="yes"\n'
'IPV6INIT="no"\n'
'MTU="1450"\n'
'BOOTPROTO="static"\n'
'IPADDR="10.0.0.5"\n'
'NETMASK="255.255.255.0"\n'.format(int=test_int_num))
mock_check_output.assert_called_with(
['ip', 'netns', 'exec', consts.AMPHORA_NAMESPACE,
'ifup', 'eth' + test_int_num], stderr=-2)
# fixed IPs happy path IPv6
port_info = {'mac_address': '123', 'mtu': 1450, 'fixed_ips': [
{'ip_address': '2001:db8::2', 'subnet_cidr': '2001:db8::/32'}]}
mock_interfaces.side_effect = [['blah']]
mock_ifaddress.side_effect = [[netifaces.AF_LINK],
{netifaces.AF_LINK: [{'addr': '123'}]}]
mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
if self.conf.conf.amphora_agent.agent_server_network_file:
file_name = self.conf.conf.amphora_agent.agent_server_network_file
flags = os.O_WRONLY | os.O_CREAT | os.O_APPEND
elif distro == consts.UBUNTU:
file_name = ('/etc/netns/{0}/network/interfaces.d/'
'eth{1}.cfg'.format(consts.AMPHORA_NAMESPACE,
test_int_num))
flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
elif distro == consts.CENTOS:
file_name = ('/etc/netns/{0}/sysconfig/network-scripts/'
'ifcfg-eth{1}'.format(consts.AMPHORA_NAMESPACE,
test_int_num))
flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open
with mock.patch('os.open') as mock_open, mock.patch.object(
os, 'fdopen', m) as mock_fdopen:
mock_open.return_value = 123
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
"/plug/network",
content_type='application/json',
data=json.dumps(port_info))
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
"/plug/network",
content_type='application/json',
data=json.dumps(port_info))
self.assertEqual(202, rv.status_code)
mock_open.assert_any_call(file_name, flags, mode)
mock_fdopen.assert_any_call(123, 'w')
plug_inf_file = '/var/lib/octavia/plugged_interfaces'
flags = os.O_RDWR | os.O_CREAT
mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
mock_open.assert_any_call(plug_inf_file, flags, mode)
mock_fdopen.assert_any_call(123, 'r+')
handle = m()
if distro == consts.UBUNTU:
handle.write.assert_any_call(
'\n\n# Generated by Octavia agent\n'
'auto eth{int}\n'
'iface eth{int} inet6 static\n'
'address 2001:0db8:0000:0000:0000:0000:0000:0002\n'
'broadcast 2001:0db8:ffff:ffff:ffff:ffff:ffff:ffff\n'
'netmask 32\nmtu 1450\n'.format(int=test_int_num))
elif distro == consts.CENTOS:
handle.write.assert_any_call(
'\n\n# Generated by Octavia agent\n'
'NM_CONTROLLED="no"\nDEVICE="eth{int}"\n'
'ONBOOT="yes"\nTYPE="Ethernet"\nUSERCTL="yes"\n'
'IPV6INIT="yes"\nIPV6_MTU="1450"\n'
'IPV6_AUTOCONF="no"\n'
'IPV6ADDR="2001:0db8:0000:0000:0000:0000:'
'0000:0002"\n'.format(int=test_int_num))
mock_check_output.assert_called_with(
['ip', 'netns', 'exec', consts.AMPHORA_NAMESPACE,
'ifup', 'eth' + test_int_num], stderr=-2)
# fixed IPs, bogus IP
port_info = {'mac_address': '123', 'fixed_ips': [
{'ip_address': '10005', 'subnet_cidr': '10.0.0.0/24'}]}
mock_interfaces.side_effect = [['blah']]
mock_ifaddress.side_effect = [[netifaces.AF_LINK],
{netifaces.AF_LINK: [{'addr': '123'}]}]
flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
file_name = '/etc/netns/{0}/network/interfaces.d/eth{1}.cfg'.format(
consts.AMPHORA_NAMESPACE, test_int_num)
m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open
with mock.patch('os.open') as mock_open, mock.patch.object(
os, 'fdopen', m) as mock_fdopen:
mock_open.return_value = 123
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
"/plug/network",
content_type='application/json',
data=json.dumps(port_info))
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
"/plug/network",
content_type='application/json',
data=json.dumps(port_info))
self.assertEqual(400, rv.status_code)
# same as above but ifup fails
port_info = {'mac_address': '123', 'fixed_ips': [
{'ip_address': '10.0.0.5', 'subnet_cidr': '10.0.0.0/24'}]}
mock_interfaces.side_effect = [['blah']]
mock_ifaddress.side_effect = [[netifaces.AF_LINK],
{netifaces.AF_LINK: [{'addr': '123'}]}]
mock_check_output.side_effect = [subprocess.CalledProcessError(
7, 'test', RANDOM_ERROR), subprocess.CalledProcessError(
7, 'test', RANDOM_ERROR)]
m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open
with mock.patch('os.open'), mock.patch.object(os, 'fdopen', m):
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
"/plug/network",
content_type='application/json',
data=json.dumps(port_info))
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
"/plug/network",
content_type='application/json',
data=json.dumps(port_info))
self.assertEqual(500, rv.status_code)
self.assertEqual(
{'details': RANDOM_ERROR,
'message': 'Error plugging network'},
json.loads(rv.data.decode('utf-8')))
# Bad port_info tests
port_info = 'Bad data'
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
"/plug/network",
content_type='application/json',
data=json.dumps(port_info))
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
"/plug/network",
content_type='application/json',
data=json.dumps(port_info))
self.assertEqual(400, rv.status_code)
port_info = {'fixed_ips': [{'ip_address': '10.0.0.5',
'subnet_cidr': '10.0.0.0/24'}]}
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
"/plug/network",
content_type='application/json',
data=json.dumps(port_info))
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
"/plug/network",
content_type='application/json',
data=json.dumps(port_info))
self.assertEqual(400, rv.status_code)
def test_ubuntu_plug_network_host_routes(self):
self._test_plug_network_host_routes(consts.UBUNTU)
self.conf.config(group="amphora_agent",
agent_server_network_file="/path/to/interfaces_file")
def test_centos_plug_network_host_routes(self):
self._test_plug_network_host_routes(consts.CENTOS)
@mock.patch('netifaces.interfaces')
@mock.patch('netifaces.ifaddresses')
@mock.patch('pyroute2.IPRoute')
@mock.patch('pyroute2.NetNS')
@mock.patch('subprocess.check_output')
def _test_plug_network_host_routes(self, distro, mock_check_output,
mock_netns, mock_pyroute2,
mock_ifaddress, mock_interfaces):
self.assertIn(distro, [consts.UBUNTU, consts.CENTOS])
SUBNET_CIDR = '192.0.2.0/24'
BROADCAST = '192.0.2.255'
NETMASK = '255.255.255.0'
IP = '192.0.1.5'
MAC = '123'
DEST1 = '198.51.100.0/24'
DEST2 = '203.0.113.0/24'
NEXTHOP = '192.0.2.1'
netns_handle = mock_netns.return_value.__enter__.return_value
netns_handle.get_links.return_value = [{
'attrs': [['IFLA_IFNAME', consts.NETNS_PRIMARY_INTERFACE]]}]
port_info = {'mac_address': MAC, 'mtu': 1450, 'fixed_ips': [
{'ip_address': IP, 'subnet_cidr': SUBNET_CIDR,
'host_routes': [{'destination': DEST1, 'nexthop': NEXTHOP},
{'destination': DEST2, 'nexthop': NEXTHOP}]}]}
mock_interfaces.side_effect = [['blah']]
mock_ifaddress.side_effect = [[netifaces.AF_LINK],
{netifaces.AF_LINK: [{'addr': '123'}]}]
flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
if distro == consts.UBUNTU:
file_name = '/etc/netns/{0}/network/interfaces.d/{1}.cfg'.format(
consts.AMPHORA_NAMESPACE, consts.NETNS_PRIMARY_INTERFACE)
elif distro == consts.CENTOS:
file_name = ('/etc/netns/{0}/sysconfig/network-scripts/'
'ifcfg-{1}'.format(consts.AMPHORA_NAMESPACE,
consts.NETNS_PRIMARY_INTERFACE))
m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open
with mock.patch('os.open') as mock_open, mock.patch.object(
os, 'fdopen', m) as mock_fdopen:
mock_open.return_value = 123
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
"/plug/network",
content_type='application/json',
data=json.dumps(port_info))
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
"/plug/network",
content_type='application/json',
data=json.dumps(port_info))
self.assertEqual(202, rv.status_code)
mock_open.assert_any_call(file_name, flags, mode)
mock_fdopen.assert_any_call(123, 'w')
plug_inf_file = '/var/lib/octavia/plugged_interfaces'
flags = os.O_RDWR | os.O_CREAT
mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
mock_open.assert_any_call(plug_inf_file, flags, mode)
mock_fdopen.assert_any_call(123, 'r+')
handle = m()
if distro == consts.UBUNTU:
handle.write.assert_any_call(
'\n\n# Generated by Octavia agent\n'
'auto ' + consts.NETNS_PRIMARY_INTERFACE +
'\niface ' + consts.NETNS_PRIMARY_INTERFACE +
' inet static\n' +
'address ' + IP + '\nbroadcast ' + BROADCAST + '\n' +
'netmask ' + NETMASK + '\n' + 'mtu 1450\n' +
'up route add -net ' + DEST1 + ' gw ' + NEXTHOP +
' dev ' + consts.NETNS_PRIMARY_INTERFACE + '\n'
'down route del -net ' + DEST1 + ' gw ' + NEXTHOP +
' dev ' + consts.NETNS_PRIMARY_INTERFACE + '\n'
'up route add -net ' + DEST2 + ' gw ' + NEXTHOP +
' dev ' + consts.NETNS_PRIMARY_INTERFACE + '\n'
'down route del -net ' + DEST2 + ' gw ' + NEXTHOP +
' dev ' + consts.NETNS_PRIMARY_INTERFACE + '\n'
)
elif distro == consts.CENTOS:
handle.write.assert_any_call(
'\n\n# Generated by Octavia agent\n'
'NM_CONTROLLED="no"\nDEVICE="{int}"\n'
'ONBOOT="yes"\nTYPE="Ethernet"\n'
'USERCTL="yes"\nIPV6INIT="no"\nMTU="1450"\n'
'BOOTPROTO="static"\nIPADDR="{ip}"\n'
'NETMASK="{mask}"\n'.format(
int=consts.NETNS_PRIMARY_INTERFACE,
ip=IP,
mask=NETMASK))
mock_check_output.assert_called_with(
['ip', 'netns', 'exec', consts.AMPHORA_NAMESPACE,
'ifup', consts.NETNS_PRIMARY_INTERFACE], stderr=-2)
def test_ubuntu_plug_VIP4(self):
self._test_plug_VIP4(consts.UBUNTU)
self.conf.config(group="amphora_agent",
agent_server_network_file="/path/to/interfaces_file")
self._test_plug_VIP4(consts.UBUNTU)
self._test_plug_VIP4(consts.CENTOS)
self.conf.config(group="amphora_agent",
agent_server_network_file="/path/to/interfaces_file")
self._test_plug_VIP4(consts.CENTOS)
@mock.patch('shutil.copy2')
@mock.patch('pyroute2.NSPopen')
@mock.patch('octavia.amphorae.backends.agent.api_server.'
'plug.Plug._netns_interface_exists')
@mock.patch('netifaces.interfaces')
@mock.patch('netifaces.ifaddresses')
@mock.patch('pyroute2.IPRoute')
@mock.patch('pyroute2.netns.create')
@mock.patch('pyroute2.NetNS')
@mock.patch('subprocess.check_output')
@mock.patch('shutil.copytree')
@mock.patch('os.makedirs')
def _test_plug_VIP4(self, distro, mock_makedirs, mock_copytree,
mock_check_output, mock_netns, mock_netns_create,
mock_pyroute2, mock_ifaddress, mock_interfaces,
mock_int_exists, mock_nspopen, mock_copy2):
self.assertIn(distro, [consts.UBUNTU, consts.CENTOS])
subnet_info = {
'subnet_cidr': '203.0.113.0/24',
'gateway': '203.0.113.1',
'mac_address': '123'
}
# malformed ip
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
'/plug/vip/error',
data=json.dumps(subnet_info),
content_type='application/json')
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
'/plug/vip/error',
data=json.dumps(subnet_info),
content_type='application/json')
self.assertEqual(400, rv.status_code)
# No subnet info
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
'/plug/vip/error')
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
'/plug/vip/error')
self.assertEqual(400, rv.status_code)
# Interface already plugged
mock_int_exists.return_value = True
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
"/plug/vip/203.0.113.2",
content_type='application/json',
data=json.dumps(subnet_info))
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
"/plug/vip/203.0.113.2",
content_type='application/json',
data=json.dumps(subnet_info))
self.assertEqual(409, rv.status_code)
self.assertEqual(dict(message="Interface already exists"),
json.loads(rv.data.decode('utf-8')))
mock_int_exists.return_value = False
# No interface at all
mock_interfaces.side_effect = [[]]
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
"/plug/vip/203.0.113.2",
content_type='application/json',
data=json.dumps(subnet_info))
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
"/plug/vip/203.0.113.2",
content_type='application/json',
data=json.dumps(subnet_info))
self.assertEqual(404, rv.status_code)
self.assertEqual(dict(details="No suitable network interface found"),
json.loads(rv.data.decode('utf-8')))
# Two interfaces down
mock_interfaces.side_effect = [['blah', 'blah2']]
mock_ifaddress.side_effect = [['blabla'], ['blabla']]
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
"/plug/vip/203.0.113.2",
content_type='application/json',
data=json.dumps(subnet_info))
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
"/plug/vip/203.0.113.2",
content_type='application/json',
data=json.dumps(subnet_info))
self.assertEqual(404, rv.status_code)
self.assertEqual(dict(details="No suitable network interface found"),
json.loads(rv.data.decode('utf-8')))
# Happy Path IPv4, with VRRP_IP and host route
full_subnet_info = {
'subnet_cidr': '203.0.113.0/24',
'gateway': '203.0.113.1',
'mac_address': '123',
'vrrp_ip': '203.0.113.4',
'mtu': 1450,
'host_routes': [{'destination': '203.0.114.0/24',
'nexthop': '203.0.113.5'},
{'destination': '203.0.115.0/24',
'nexthop': '203.0.113.5'}]
}
mock_interfaces.side_effect = [['blah']]
mock_ifaddress.side_effect = [[netifaces.AF_LINK],
{netifaces.AF_LINK: [{'addr': '123'}]}]
mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
if self.conf.conf.amphora_agent.agent_server_network_file:
file_name = self.conf.conf.amphora_agent.agent_server_network_file
flags = os.O_WRONLY | os.O_CREAT | os.O_APPEND
elif distro == consts.UBUNTU:
file_name = ('/etc/netns/{netns}/network/interfaces.d/'
'{netns_int}.cfg'.format(
netns=consts.AMPHORA_NAMESPACE,
netns_int=consts.NETNS_PRIMARY_INTERFACE))
flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
elif distro == consts.CENTOS:
file_name = ('/etc/netns/{netns}/sysconfig/network-scripts/'
'ifcfg-{netns_int}'.format(
netns=consts.AMPHORA_NAMESPACE,
netns_int=consts.NETNS_PRIMARY_INTERFACE))
flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open
with mock.patch('os.open') as mock_open, mock.patch.object(
os, 'fdopen', m) as mock_fdopen:
mock_open.return_value = 123
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
"/plug/vip/203.0.113.2",
content_type='application/json',
data=json.dumps(full_subnet_info))
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
"/plug/vip/203.0.113.2",
content_type='application/json',
data=json.dumps(full_subnet_info))
self.assertEqual(202, rv.status_code)
mock_open.assert_any_call(file_name, flags, mode)
mock_fdopen.assert_any_call(123, 'w')
plug_inf_file = '/var/lib/octavia/plugged_interfaces'
flags = os.O_RDWR | os.O_CREAT
mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
mock_open.assert_any_call(plug_inf_file, flags, mode)
mock_fdopen.assert_any_call(123, 'r+')
handle = m()
if distro == consts.UBUNTU:
handle.write.assert_any_call(
'\n# Generated by Octavia agent\n'
'auto {netns_int} {netns_int}:0\n'
'iface {netns_int} inet static\n'
'address 203.0.113.4\n'
'broadcast 203.0.113.255\n'
'netmask 255.255.255.0\n'
'gateway 203.0.113.1\n'
'mtu 1450\n'
'up route add -net 203.0.114.0/24 gw 203.0.113.5 '
'dev {netns_int}\n'
'down route del -net 203.0.114.0/24 gw 203.0.113.5 '
'dev {netns_int}\n'
'up route add -net 203.0.115.0/24 gw 203.0.113.5 '
'dev {netns_int}\n'
'down route del -net 203.0.115.0/24 gw 203.0.113.5 '
'dev {netns_int}\n'
'\n'
'iface {netns_int}:0 inet static\n'
'address 203.0.113.2\n'
'broadcast 203.0.113.255\n'
'netmask 255.255.255.0'.format(
netns_int=consts.NETNS_PRIMARY_INTERFACE))
elif distro == consts.CENTOS:
handle.write.assert_any_call(
'\n# Generated by Octavia agent\n'
'NM_CONTROLLED="no"\nDEVICE="{netns_int}"\n'
'ONBOOT="yes"\nTYPE="Ethernet"\nUSERCTL="yes" \n'
'BOOTPROTO="static"\nIPADDR="203.0.113.4"\n'
'NETMASK="255.255.255.0"\nGATEWAY="203.0.113.1"\n'
'MTU="1450" \n'.format(
netns_int=consts.NETNS_PRIMARY_INTERFACE))
mock_check_output.assert_called_with(
['ip', 'netns', 'exec', consts.AMPHORA_NAMESPACE,
'ifup', '{netns_int}:0'.format(
netns_int=consts.NETNS_PRIMARY_INTERFACE)], stderr=-2)
# Verify sysctl was loaded
mock_nspopen.assert_called_once_with(
'amphora-haproxy', ['/sbin/sysctl', '--system'],
stdout=subprocess.PIPE)
# One Interface down, Happy Path IPv4
mock_interfaces.side_effect = [['blah']]
mock_ifaddress.side_effect = [[netifaces.AF_LINK],
{netifaces.AF_LINK: [{'addr': '123'}]}]
mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
if self.conf.conf.amphora_agent.agent_server_network_file:
file_name = self.conf.conf.amphora_agent.agent_server_network_file
flags = os.O_WRONLY | os.O_CREAT | os.O_APPEND
elif distro == consts.UBUNTU:
file_name = ('/etc/netns/{netns}/network/interfaces.d/'
'{netns_int}.cfg'.format(
netns=consts.AMPHORA_NAMESPACE,
netns_int=consts.NETNS_PRIMARY_INTERFACE))
flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
elif distro == consts.CENTOS:
file_name = ('/etc/netns/{netns}/sysconfig/network-scripts/'
'ifcfg-{netns_int}'.format(
netns=consts.AMPHORA_NAMESPACE,
netns_int=consts.NETNS_PRIMARY_INTERFACE))
flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open
with mock.patch('os.open') as mock_open, mock.patch.object(
os, 'fdopen', m) as mock_fdopen:
mock_open.return_value = 123
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
"/plug/vip/203.0.113.2",
content_type='application/json',
data=json.dumps(subnet_info))
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
"/plug/vip/203.0.113.2",
content_type='application/json',
data=json.dumps(subnet_info))
self.assertEqual(202, rv.status_code)
mock_open.assert_any_call(file_name, flags, mode)
mock_fdopen.assert_any_call(123, 'w')
plug_inf_file = '/var/lib/octavia/plugged_interfaces'
flags = os.O_RDWR | os.O_CREAT
mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
mock_open.assert_any_call(plug_inf_file, flags, mode)
mock_fdopen.assert_any_call(123, 'r+')
handle = m()
if distro == consts.UBUNTU:
handle.write.assert_any_call(
'\n# Generated by Octavia agent\n'
'auto {netns_int} {netns_int}:0\n\n'
'iface {netns_int} inet dhcp\n\n'
'iface {netns_int}:0 inet static\n'
'address 203.0.113.2\n'
'broadcast 203.0.113.255\n'
'netmask 255.255.255.0'.format(
netns_int=consts.NETNS_PRIMARY_INTERFACE))
elif distro == consts.CENTOS:
handle.write.assert_any_call(
'\n# Generated by Octavia agent\n'
'NM_CONTROLLED="no"\nDEVICE="{netns_int}"\n'
'ONBOOT="yes"\nTYPE="Ethernet"\nUSERCTL="yes" \n'
'BOOTPROTO="dhcp"\nPERSISTENT_DHCLIENT="1" \n'.format(
netns_int=consts.NETNS_PRIMARY_INTERFACE))
mock_check_output.assert_called_with(
['ip', 'netns', 'exec', consts.AMPHORA_NAMESPACE,
'ifup', '{netns_int}:0'.format(
netns_int=consts.NETNS_PRIMARY_INTERFACE)], stderr=-2)
mock_interfaces.side_effect = [['blah']]
mock_ifaddress.side_effect = [[netifaces.AF_LINK],
{netifaces.AF_LINK: [{'addr': '123'}]}]
mock_check_output.side_effect = [
'unplug1',
subprocess.CalledProcessError(
7, 'test', RANDOM_ERROR), subprocess.CalledProcessError(
7, 'test', RANDOM_ERROR)]
m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open
with mock.patch('os.open'), mock.patch.object(os, 'fdopen', m):
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
"/plug/vip/203.0.113.2",
content_type='application/json',
data=json.dumps(subnet_info))
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
"/plug/vip/203.0.113.2",
content_type='application/json',
data=json.dumps(subnet_info))
self.assertEqual(500, rv.status_code)
self.assertEqual(
{'details': RANDOM_ERROR,
'message': 'Error plugging VIP'},
json.loads(rv.data.decode('utf-8')))
def test_ubuntu_plug_VIP6(self):
self._test_plug_vip6(consts.UBUNTU)
def test_centos_plug_VIP6(self):
self._test_plug_vip6(consts.CENTOS)
@mock.patch('shutil.copy2')
@mock.patch('pyroute2.NSPopen')
@mock.patch('netifaces.interfaces')
@mock.patch('netifaces.ifaddresses')
@mock.patch('pyroute2.IPRoute')
@mock.patch('pyroute2.netns.create')
@mock.patch('pyroute2.NetNS')
@mock.patch('subprocess.check_output')
@mock.patch('shutil.copytree')
@mock.patch('os.makedirs')
def _test_plug_vip6(self, distro, mock_makedirs, mock_copytree,
mock_check_output, mock_netns, mock_netns_create,
mock_pyroute2, mock_ifaddress, mock_interfaces,
mock_nspopen, mock_copy2):
self.assertIn(distro, [consts.UBUNTU, consts.CENTOS])
subnet_info = {
'subnet_cidr': '2001:db8::/32',
'gateway': '2001:db8::1',
'mac_address': '123'
}
# malformed ip
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
'/plug/vip/error',
data=json.dumps(subnet_info),
content_type='application/json')
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
'/plug/vip/error',
data=json.dumps(subnet_info),
content_type='application/json')
self.assertEqual(400, rv.status_code)
# No subnet info
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
'/plug/vip/error',
data=json.dumps(subnet_info),
content_type='application/json')
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
'/plug/vip/error',
data=json.dumps(subnet_info),
content_type='application/json')
self.assertEqual(400, rv.status_code)
# No interface at all
mock_interfaces.side_effect = [[]]
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
"/plug/vip/2001:db8::2",
content_type='application/json',
data=json.dumps(subnet_info))
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
"/plug/vip/2001:db8::2",
content_type='application/json',
data=json.dumps(subnet_info))
self.assertEqual(404, rv.status_code)
self.assertEqual(dict(details="No suitable network interface found"),
json.loads(rv.data.decode('utf-8')))
# Two interfaces down
mock_interfaces.side_effect = [['blah', 'blah2']]
mock_ifaddress.side_effect = [['blabla'], ['blabla']]
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
"/plug/vip/2001:db8::2",
content_type='application/json',
data=json.dumps(subnet_info))
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
"/plug/vip/2001:db8::2",
content_type='application/json',
data=json.dumps(subnet_info))
self.assertEqual(404, rv.status_code)
self.assertEqual(dict(details="No suitable network interface found"),
json.loads(rv.data.decode('utf-8')))
# Happy Path IPv6, with VRRP_IP and host route
full_subnet_info = {
'subnet_cidr': '2001:db8::/32',
'gateway': '2001:db8::1',
'mac_address': '123',
'vrrp_ip': '2001:db8::4',
'mtu': 1450,
'host_routes': [{'destination': '2001:db9::/32',
'nexthop': '2001:db8::5'},
{'destination': '2001:db9::/32',
'nexthop': '2001:db8::5'}]
}
mock_interfaces.side_effect = [['blah']]
mock_ifaddress.side_effect = [[netifaces.AF_LINK],
{netifaces.AF_LINK: [{'addr': '123'}]}]
flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
if distro == consts.UBUNTU:
file_name = ('/etc/netns/{netns}/network/interfaces.d/'
'{netns_int}.cfg'.format(
netns=consts.AMPHORA_NAMESPACE,
netns_int=consts.NETNS_PRIMARY_INTERFACE))
elif distro == consts.CENTOS:
file_name = ('/etc/netns/{netns}/sysconfig/network-scripts/'
'ifcfg-{netns_int}'.format(
netns=consts.AMPHORA_NAMESPACE,
netns_int=consts.NETNS_PRIMARY_INTERFACE))
m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open
with mock.patch('os.open') as mock_open, mock.patch.object(
os, 'fdopen', m) as mock_fdopen:
mock_open.return_value = 123
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
"/plug/vip/2001:db8::2",
content_type='application/json',
data=json.dumps(full_subnet_info))
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
"/plug/vip/2001:db8::2",
content_type='application/json',
data=json.dumps(full_subnet_info))
self.assertEqual(202, rv.status_code)
mock_open.assert_any_call(file_name, flags, mode)
mock_fdopen.assert_any_call(123, 'w')
plug_inf_file = '/var/lib/octavia/plugged_interfaces'
flags = os.O_RDWR | os.O_CREAT
mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
mock_open.assert_any_call(plug_inf_file, flags, mode)
mock_fdopen.assert_any_call(123, 'r+')
handle = m()
if distro == consts.UBUNTU:
handle.write.assert_any_call(
'\n# Generated by Octavia agent\n'
'auto {netns_int} {netns_int}:0\n'
'iface {netns_int} inet6 static\n'
'address 2001:db8::4\n'
'broadcast 2001:0db8:ffff:ffff:ffff:ffff:ffff:ffff\n'
'netmask 32\n'
'gateway 2001:db8::1\n'
'mtu 1450\n'
'up route add -net 2001:db9::/32 gw 2001:db8::5 '
'dev {netns_int}\n'
'down route del -net 2001:db9::/32 gw 2001:db8::5 '
'dev {netns_int}\n'
'up route add -net 2001:db9::/32 gw 2001:db8::5 '
'dev {netns_int}\n'
'down route del -net 2001:db9::/32 gw 2001:db8::5 '
'dev {netns_int}\n'
'\n'
'iface {netns_int}:0 inet6 static\n'
'address 2001:0db8:0000:0000:0000:0000:0000:0002\n'
'broadcast 2001:0db8:ffff:ffff:ffff:ffff:ffff:ffff\n'
'netmask 32'.format(
netns_int=consts.NETNS_PRIMARY_INTERFACE))
elif distro == consts.CENTOS:
handle.write.assert_any_call(
'\n# Generated by Octavia agent\n'
'NM_CONTROLLED="no"\nDEVICE="{netns_int}"\n'
'ONBOOT="yes"\nTYPE="Ethernet"\nUSERCTL="yes"\n'
'IPV6INIT="yes"\nIPV6_DEFROUTE="yes"\n'
'IPV6_AUTOCONF="no"\nIPV6ADDR="2001:db8::4/32"\n'
'IPV6_DEFAULTGW="2001:db8::1"\nIPV6_MTU="1450" \n'
'IPV6ADDR_SECONDARIES="2001:0db8:0000:0000:0000:0000:'
'0000:0002/32"\n'.format(
netns_int=consts.NETNS_PRIMARY_INTERFACE))
if distro == consts.UBUNTU:
mock_check_output.assert_called_with(
['ip', 'netns', 'exec', consts.AMPHORA_NAMESPACE,
'ifup', '{netns_int}:0'.format(
netns_int=consts.NETNS_PRIMARY_INTERFACE)], stderr=-2)
elif distro == consts.CENTOS:
mock_check_output.assert_called_with(
['ip', 'netns', 'exec', consts.AMPHORA_NAMESPACE,
'ifup', '{netns_int}'.format(
netns_int=consts.NETNS_PRIMARY_INTERFACE)], stderr=-2)
# Verify sysctl was loaded
mock_nspopen.assert_called_once_with(
'amphora-haproxy', ['/sbin/sysctl', '--system'],
stdout=subprocess.PIPE)
# One Interface down, Happy Path IPv6
mock_interfaces.side_effect = [['blah']]
mock_ifaddress.side_effect = [[netifaces.AF_LINK],
{netifaces.AF_LINK: [{'addr': '123'}]}]
flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
if distro == consts.UBUNTU:
file_name = ('/etc/netns/{netns}/network/interfaces.d/'
'{netns_int}.cfg'.format(
netns=consts.AMPHORA_NAMESPACE,
netns_int=consts.NETNS_PRIMARY_INTERFACE))
elif distro == consts.CENTOS:
file_name = ('/etc/netns/{netns}/sysconfig/network-scripts/'
'ifcfg-{netns_int}'.format(
netns=consts.AMPHORA_NAMESPACE,
netns_int=consts.NETNS_PRIMARY_INTERFACE))
m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open
with mock.patch('os.open') as mock_open, mock.patch.object(
os, 'fdopen', m) as mock_fdopen:
mock_open.return_value = 123
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
"/plug/vip/2001:db8::2",
content_type='application/json',
data=json.dumps(subnet_info))
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
"/plug/vip/2001:db8::2",
content_type='application/json',
data=json.dumps(subnet_info))
self.assertEqual(202, rv.status_code)
mock_open.assert_any_call(file_name, flags, mode)
mock_fdopen.assert_any_call(123, 'w')
plug_inf_file = '/var/lib/octavia/plugged_interfaces'
flags = os.O_RDWR | os.O_CREAT
mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
mock_open.assert_any_call(plug_inf_file, flags, mode)
mock_fdopen.assert_any_call(123, 'r+')
handle = m()
if distro == consts.UBUNTU:
handle.write.assert_any_call(
'\n# Generated by Octavia agent\n'
'auto {netns_int} {netns_int}:0\n\n'
'iface {netns_int} inet6 auto\n\n'
'iface {netns_int}:0 inet6 static\n'
'address 2001:0db8:0000:0000:0000:0000:0000:0002\n'
'broadcast 2001:0db8:ffff:ffff:ffff:ffff:ffff:ffff\n'
'netmask 32'.format(
netns_int=consts.NETNS_PRIMARY_INTERFACE))
elif distro == consts.CENTOS:
handle.write.assert_any_call(
'\n# Generated by Octavia agent\n'
'NM_CONTROLLED="no"\nDEVICE="{netns_int}"\n'
'ONBOOT="yes"\nTYPE="Ethernet"\nUSERCTL="yes" \n'
'IPV6INIT="yes"\nIPV6_DEFROUTE="yes"\n'
'IPV6_AUTOCONF="yes" \n'
'IPV6ADDR_SECONDARIES="2001:0db8:0000:0000:0000:0000:'
'0000:0002/32"\n'.format(
netns_int=consts.NETNS_PRIMARY_INTERFACE))
if distro == consts.UBUNTU:
mock_check_output.assert_called_with(
['ip', 'netns', 'exec', consts.AMPHORA_NAMESPACE,
'ifup', '{netns_int}:0'.format(
netns_int=consts.NETNS_PRIMARY_INTERFACE)], stderr=-2)
elif distro == consts.CENTOS:
mock_check_output.assert_called_with(
['ip', 'netns', 'exec', consts.AMPHORA_NAMESPACE,
'ifup', '{netns_int}'.format(
netns_int=consts.NETNS_PRIMARY_INTERFACE)], stderr=-2)
mock_interfaces.side_effect = [['blah']]
mock_ifaddress.side_effect = [[netifaces.AF_LINK],
{netifaces.AF_LINK: [{'addr': '123'}]}]
mock_check_output.side_effect = [
'unplug1',
subprocess.CalledProcessError(
7, 'test', RANDOM_ERROR), subprocess.CalledProcessError(
7, 'test', RANDOM_ERROR)]
m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open
with mock.patch('os.open'), mock.patch.object(os, 'fdopen', m):
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
"/plug/vip/2001:db8::2",
content_type='application/json',
data=json.dumps(subnet_info))
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
"/plug/vip/2001:db8::2",
content_type='application/json',
data=json.dumps(subnet_info))
self.assertEqual(500, rv.status_code)
self.assertEqual(
{'details': RANDOM_ERROR,
'message': 'Error plugging VIP'},
json.loads(rv.data.decode('utf-8')))
def test_ubuntu_get_interface(self):
self._test_get_interface(consts.UBUNTU)
def test_centos_get_interface(self):
self._test_get_interface(consts.CENTOS)
@mock.patch('pyroute2.NetNS')
def _test_get_interface(self, distro, mock_netns):
self.assertIn(distro, [consts.UBUNTU, consts.CENTOS])
netns_handle = mock_netns.return_value.__enter__.return_value
interface_res = {'interface': 'eth0'}
# Happy path
netns_handle.get_addr.return_value = [{
'index': 3, 'family': socket.AF_INET,
'attrs': [['IFA_ADDRESS', '203.0.113.2']]}]
netns_handle.get_links.return_value = [{
'attrs': [['IFLA_IFNAME', 'eth0']]}]
if distro == consts.UBUNTU:
rv = self.ubuntu_app.get('/' + api_server.VERSION +
'/interface/203.0.113.2',
data=json.dumps(interface_res),
content_type='application/json')
elif distro == consts.CENTOS:
rv = self.centos_app.get('/' + api_server.VERSION +
'/interface/203.0.113.2',
data=json.dumps(interface_res),
content_type='application/json')
self.assertEqual(200, rv.status_code)
# Happy path with IPv6 address normalization
netns_handle.get_addr.return_value = [{
'index': 3, 'family': socket.AF_INET6,
'attrs': [['IFA_ADDRESS',
'0000:0000:0000:0000:0000:0000:0000:0001']]}]
netns_handle.get_links.return_value = [{
'attrs': [['IFLA_IFNAME', 'eth0']]}]
if distro == consts.UBUNTU:
rv = self.ubuntu_app.get('/' + api_server.VERSION +
'/interface/::1',
data=json.dumps(interface_res),
content_type='application/json')
elif distro == consts.CENTOS:
rv = self.centos_app.get('/' + api_server.VERSION +
'/interface/::1',
data=json.dumps(interface_res),
content_type='application/json')
self.assertEqual(200, rv.status_code)
# Nonexistent interface
if distro == consts.UBUNTU:
rv = self.ubuntu_app.get('/' + api_server.VERSION +
'/interface/10.0.0.1',
data=json.dumps(interface_res),
content_type='application/json')
elif distro == consts.CENTOS:
rv = self.centos_app.get('/' + api_server.VERSION +
'/interface/10.0.0.1',
data=json.dumps(interface_res),
content_type='application/json')
self.assertEqual(404, rv.status_code)
# Invalid IP address
if distro == consts.UBUNTU:
rv = self.ubuntu_app.get('/' + api_server.VERSION +
'/interface/00:00:00:00:00:00',
data=json.dumps(interface_res),
content_type='application/json')
elif distro == consts.CENTOS:
rv = self.centos_app.get('/' + api_server.VERSION +
'/interface/00:00:00:00:00:00',
data=json.dumps(interface_res),
content_type='application/json')
self.assertEqual(400, rv.status_code)
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
'get_os_init_system', return_value=consts.INIT_SYSTEMD)
def test_ubuntu_upload_keepalived_config_systemd(self, mock_init_system):
self._test_upload_keepalived_config(consts.INIT_SYSTEMD,
consts.UBUNTU, mock_init_system)
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
'get_os_init_system', return_value=consts.INIT_SYSTEMD)
def test_centos_upload_keepalived_config_systemd(self, mock_init_system):
self._test_upload_keepalived_config(consts.INIT_SYSTEMD,
consts.CENTOS, mock_init_system)
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
'get_os_init_system', return_value=consts.INIT_UPSTART)
def test_ubuntu_upload_keepalived_config_upstart(self, mock_init_system):
self._test_upload_keepalived_config(consts.INIT_UPSTART,
consts.UBUNTU, mock_init_system)
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
'get_os_init_system', return_value=consts.INIT_SYSVINIT)
def test_ubuntu_upload_keepalived_config_sysvinit(self, mock_init_system):
self._test_upload_keepalived_config(consts.INIT_SYSVINIT,
consts.UBUNTU, mock_init_system)
@mock.patch('os.path.exists')
@mock.patch('os.makedirs')
@mock.patch('os.rename')
@mock.patch('subprocess.check_output')
@mock.patch('os.remove')
def _test_upload_keepalived_config(self, init_system, distro,
mock_init_system, mock_remove,
mock_subprocess, mock_rename,
mock_makedirs, mock_exists):
self.assertIn(distro, [consts.UBUNTU, consts.CENTOS])
flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
mock_exists.return_value = True
cfg_path = util.keepalived_cfg_path()
m = self.useFixture(test_utils.OpenFixture(cfg_path)).mock_open
with mock.patch('os.open') as mock_open, mock.patch.object(
os, 'fdopen', m) as mock_fdopen:
mock_open.return_value = 123
if distro == consts.UBUNTU:
rv = self.ubuntu_app.put('/' + api_server.VERSION +
'/vrrp/upload', data='test')
elif distro == consts.CENTOS:
rv = self.centos_app.put('/' + api_server.VERSION +
'/vrrp/upload', data='test')
mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
mock_open.assert_called_with(cfg_path, flags, mode)
mock_fdopen.assert_called_with(123, 'wb')
self.assertEqual(200, rv.status_code)
mock_exists.return_value = False
script_path = util.keepalived_check_script_path()
m = self.useFixture(test_utils.OpenFixture(script_path)).mock_open
with mock.patch('os.open') as mock_open, mock.patch.object(
os, 'fdopen', m) as mock_fdopen:
mock_open.return_value = 123
if distro == consts.UBUNTU:
rv = self.ubuntu_app.put('/' + api_server.VERSION +
'/vrrp/upload', data='test')
elif distro == consts.CENTOS:
rv = self.centos_app.put('/' + api_server.VERSION +
'/vrrp/upload', data='test')
mode = (stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP |
stat.S_IROTH | stat.S_IXOTH)
mock_open.assert_called_with(script_path, flags, mode)
mock_fdopen.assert_called_with(123, 'w')
self.assertEqual(200, rv.status_code)
def test_ubuntu_manage_service_vrrp(self):
self._test_manage_service_vrrp(consts.UBUNTU)
def test_centos_manage_service_vrrp(self):
self._test_manage_service_vrrp(consts.CENTOS)
@mock.patch('subprocess.check_output')
def _test_manage_service_vrrp(self, distro, mock_check_output):
self.assertIn(distro, [consts.UBUNTU, consts.CENTOS])
if distro == consts.UBUNTU:
rv = self.ubuntu_app.put('/' + api_server.VERSION + '/vrrp/start')
elif distro == consts.CENTOS:
rv = self.centos_app.put('/' + api_server.VERSION + '/vrrp/start')
self.assertEqual(202, rv.status_code)
if distro == consts.UBUNTU:
rv = self.ubuntu_app.put('/' + api_server.VERSION +
'/vrrp/restart')
elif distro == consts.CENTOS:
rv = self.centos_app.put('/' + api_server.VERSION +
'/vrrp/restart')
self.assertEqual(400, rv.status_code)
mock_check_output.side_effect = subprocess.CalledProcessError(1,
'blah!')
if distro == consts.UBUNTU:
rv = self.ubuntu_app.put('/' + api_server.VERSION + '/vrrp/start')
elif distro == consts.CENTOS:
rv = self.centos_app.put('/' + api_server.VERSION + '/vrrp/start')
self.assertEqual(500, rv.status_code)
def test_ubuntu_details(self):
self._test_details(consts.UBUNTU)
def test_centos_details(self):
self._test_details(consts.CENTOS)
@mock.patch('octavia.amphorae.backends.agent.api_server.amphora_info.'
'AmphoraInfo._count_haproxy_processes')
@mock.patch('octavia.amphorae.backends.agent.api_server.amphora_info.'
'AmphoraInfo._get_networks')
@mock.patch('octavia.amphorae.backends.agent.api_server.amphora_info.'
'AmphoraInfo._load')
@mock.patch('os.statvfs')
@mock.patch('octavia.amphorae.backends.agent.api_server.amphora_info.'
'AmphoraInfo._cpu')
@mock.patch('octavia.amphorae.backends.agent.api_server.amphora_info.'
'AmphoraInfo._get_meminfo')
@mock.patch('octavia.amphorae.backends.agent.api_server.'
'util.get_listeners')
@mock.patch('socket.gethostname')
@mock.patch('subprocess.check_output')
def _test_details(self, distro, mock_subbprocess, mock_hostname,
mock_get_listeners, mock_get_mem, mock_cpu,
mock_statvfs, mock_load, mock_get_nets,
mock_count_haproxy):
self.assertIn(distro, [consts.UBUNTU, consts.CENTOS])
listener_id = uuidutils.generate_uuid()
mock_get_listeners.return_value = [listener_id]
mock_hostname.side_effect = ['test-host']
mock_subbprocess.side_effect = [
b"""Package: haproxy
Status: install ok installed
Priority: optional
Section: net
Installed-Size: 803
Maintainer: Ubuntu Developers
Architecture: amd64
Version: 9.9.99-9
"""]
MemTotal = random.randrange(0, 1000)
MemFree = random.randrange(0, 1000)
Buffers = random.randrange(0, 1000)
Cached = random.randrange(0, 1000)
SwapCached = random.randrange(0, 1000)
Shmem = random.randrange(0, 1000)
Slab = random.randrange(0, 1000)
memory_dict = {'CmaFree': 0, 'Mapped': 38244, 'CommitLimit': 508048,
'MemFree': MemFree, 'AnonPages': 92384,
'DirectMap2M': 997376, 'SwapTotal': 0,
'NFS_Unstable': 0, 'SReclaimable': 34168,
'Writeback': 0, 'PageTables': 3760, 'Shmem': Shmem,
'Hugepagesize': 2048, 'MemAvailable': 738356,
'HardwareCorrupted': 0, 'SwapCached': SwapCached,
'Dirty': 80, 'Active': 237060, 'VmallocUsed': 0,
'Inactive(anon)': 2752, 'Slab': Slab, 'Cached': Cached,
'Inactive(file)': 149588, 'SUnreclaim': 17796,
'Mlocked': 3656, 'AnonHugePages': 6144, 'SwapFree': 0,
'Active(file)': 145512, 'CmaTotal': 0,
'Unevictable': 3656, 'KernelStack': 2368,
'Inactive': 152340, 'MemTotal': MemTotal, 'Bounce': 0,
'Committed_AS': 401884, 'Active(anon)': 91548,
'VmallocTotal': 34359738367, 'VmallocChunk': 0,
'DirectMap4k': 51072, 'WritebackTmp': 0,
'Buffers': Buffers}
mock_get_mem.return_value = memory_dict
cpu_total = random.randrange(0, 1000)
cpu_user = random.randrange(0, 1000)
cpu_system = random.randrange(0, 1000)
cpu_softirq = random.randrange(0, 1000)
cpu_dict = {'idle': '7168848', 'system': cpu_system,
'total': cpu_total, 'softirq': cpu_softirq, 'nice': '31',
'iowait': '902', 'user': cpu_user, 'irq': '0'}
mock_cpu.return_value = cpu_dict
f_blocks = random.randrange(0, 1000)
f_bfree = random.randrange(0, 1000)
f_frsize = random.randrange(0, 1000)
f_bavail = random.randrange(0, 1000)
stats = mock.MagicMock()
stats.f_blocks = f_blocks
stats.f_bfree = f_bfree
stats.f_frsize = f_frsize
stats.f_bavail = f_bavail
disk_used = (f_blocks - f_bfree) * f_frsize
disk_available = f_bavail * f_frsize
mock_statvfs.return_value = stats
load_1min = random.randrange(0, 10)
load_5min = random.randrange(0, 10)
load_15min = random.randrange(0, 10)
mock_load.return_value = [load_1min, load_5min, load_15min]
eth1_rx = random.randrange(0, 1000)
eth1_tx = random.randrange(0, 1000)
eth2_rx = random.randrange(0, 1000)
eth2_tx = random.randrange(0, 1000)
eth3_rx = random.randrange(0, 1000)
eth3_tx = random.randrange(0, 1000)
net_dict = {'eth2': {'network_rx': eth2_rx, 'network_tx': eth2_tx},
'eth1': {'network_rx': eth1_rx, 'network_tx': eth1_tx},
'eth3': {'network_rx': eth3_rx, 'network_tx': eth3_tx}}
mock_get_nets.return_value = net_dict
haproxy_count = random.randrange(0, 100)
mock_count_haproxy.return_value = haproxy_count
expected_dict = {'active': True, 'api_version': '0.5',
'cpu': {'soft_irq': cpu_softirq, 'system': cpu_system,
'total': cpu_total, 'user': cpu_user},
'disk': {'available': disk_available,
'used': disk_used},
'haproxy_count': haproxy_count,
'haproxy_version': '9.9.99-9',
'hostname': 'test-host',
'listeners': [listener_id],
'load': [load_1min, load_5min, load_15min],
'memory': {'buffers': Buffers,
'cached': Cached,
'free': MemFree,
'shared': Shmem,
'slab': Slab,
'swap_used': SwapCached,
'total': MemTotal},
'networks': {'eth1': {'network_rx': eth1_rx,
'network_tx': eth1_tx},
'eth2': {'network_rx': eth2_rx,
'network_tx': eth2_tx},
'eth3': {'network_rx': eth3_rx,
'network_tx': eth3_tx}},
'packages': {},
'topology': consts.TOPOLOGY_SINGLE,
'topology_status': consts.TOPOLOGY_STATUS_OK}
if distro == consts.UBUNTU:
rv = self.ubuntu_app.get('/' + api_server.VERSION + '/details')
elif distro == consts.CENTOS:
rv = self.centos_app.get('/' + api_server.VERSION + '/details')
self.assertEqual(200, rv.status_code)
self.assertEqual(expected_dict,
json.loads(rv.data.decode('utf-8')))
| 48.366041 | 79 | 0.533369 |
d21a5b5730416488262491a4d221b92fe9eadcec | 3,304 | py | Python | test_honeypot_exporter.py | Intrinsec/honeypot_exporter | 8d9109457879e5aa49214e5cf18fa8365a4dac6d | [
"Apache-2.0"
] | 3 | 2020-05-06T21:14:59.000Z | 2021-08-06T12:23:38.000Z | test_honeypot_exporter.py | Intrinsec/honeypot_exporter | 8d9109457879e5aa49214e5cf18fa8365a4dac6d | [
"Apache-2.0"
] | 1 | 2020-06-02T10:27:01.000Z | 2020-09-18T08:43:18.000Z | test_honeypot_exporter.py | Intrinsec/honeypot_exporter | 8d9109457879e5aa49214e5cf18fa8365a4dac6d | [
"Apache-2.0"
] | 1 | 2020-06-02T12:13:59.000Z | 2020-06-02T12:13:59.000Z | import socket
import requests
from scapy.all import send, IP, UDP
HONEYPOT_EXPORTER_URL = "http://honeypot:9733/metrics"
# The ip of the honeypot exporter is fixed in the docker-compose
HONEYPOT_IP = "172.20.0.10"
HONEYPOT_PORT = 4242
# The ip of the test client is fixed in the docker-compose
TEST_CLIENT_IP = "172.20.0.20"
def send_udp_packet(src: str, dst: str, port: int = HONEYPOT_PORT):
"""Use scapy to send crafted udp packet (with spoofed ip source)."""
payload = "test"
packet = IP(src=src, dst=dst) / UDP(dport=port) / payload
send(packet)
def send_tcp_packet(dst: str, port: int = HONEYPOT_PORT):
"""Uses built-in socket to properly create a tcp connection."""
payload = "test"
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((dst, port))
s.send(payload.encode("utf-8"))
s.close()
def exporter_line(
auth: str, dst: str, proto: str, count: str, port: int = 4242
):
"""Format the exporter metric line with wanted arguments."""
return f'honeypot_{auth}_connections_total{{dst="{dst}",port="{port}",proto="{proto}"}} {count}'
def test_honeypot_exporter_listeners():
"""Test the honeypot is listening on tcp and udp.
It will send tcp and udp packet and check if metrics are exported
"""
send_tcp_packet(dst=HONEYPOT_IP)
r = requests.get(HONEYPOT_EXPORTER_URL)
assert exporter_line("authorized", HONEYPOT_IP, "tcp", 1) in r.text
send_udp_packet(src=TEST_CLIENT_IP, dst=HONEYPOT_IP)
r = requests.get(HONEYPOT_EXPORTER_URL)
# As we are sending crafted udp packet, the honeypot exported somehow will
# not display the dst ip on the exported metric, but this string "[::]"
assert exporter_line("authorized", "[::]", "udp", 1) in r.text
send_tcp_packet(dst=HONEYPOT_IP, port=4243)
r = requests.get(HONEYPOT_EXPORTER_URL)
assert exporter_line("authorized", HONEYPOT_IP, "tcp", 1) in r.text
send_udp_packet(src=TEST_CLIENT_IP, dst=HONEYPOT_IP, port=4243)
r = requests.get(HONEYPOT_EXPORTER_URL)
# As we are sending crafted udp packet, the honeypot exported somehow will
# not display the dst ip on the exported metric, but this string "[::]"
assert exporter_line("authorized", "[::]", "udp", 1) in r.text
def test_honeypot_exporter_authorization():
"""Test the authorization logic of the exporter.
Only UDP packet are sent as only UDP Protocol can be spoofed.
This test is dependant of the preceding one as the exported metrics counter
are not reset.
"""
# test global authorization
send_udp_packet(src="172.21.0.10", dst=HONEYPOT_IP)
send_udp_packet(src="172.21.0.11", dst=HONEYPOT_IP)
r = requests.get(HONEYPOT_EXPORTER_URL)
assert exporter_line("authorized", "[::]", "udp", 3) in r.text
# test particular authorization
send_udp_packet(src="172.20.0.30", dst=HONEYPOT_IP)
send_udp_packet(src="172.23.0.30", dst=HONEYPOT_IP)
r = requests.get(HONEYPOT_EXPORTER_URL)
assert exporter_line("authorized", "[::]", "udp", 5) in r.text
# test unauthorized
send_udp_packet(src="172.22.0.10", dst=HONEYPOT_IP)
send_udp_packet(src="172.22.0.20", dst=HONEYPOT_IP)
r = requests.get(HONEYPOT_EXPORTER_URL)
assert exporter_line("unauthorized", "[::]", "udp", 2) in r.text
| 36.307692 | 100 | 0.701877 |
46c190d46c22d1e48c6292730e782f11967f82b5 | 1,373 | py | Python | tests/test_histogram.py | janpipek/physt | bf6b05952b7d09bbbdae2b077f0989c392eac13e | [
"MIT"
] | 123 | 2016-04-05T10:29:02.000Z | 2021-12-15T21:18:23.000Z | tests/test_histogram.py | janpipek/physt | bf6b05952b7d09bbbdae2b077f0989c392eac13e | [
"MIT"
] | 70 | 2016-03-25T19:10:49.000Z | 2022-03-09T11:47:08.000Z | tests/test_histogram.py | janpipek/physt | bf6b05952b7d09bbbdae2b077f0989c392eac13e | [
"MIT"
] | 16 | 2016-09-07T14:07:20.000Z | 2020-07-23T16:13:59.000Z | import numpy as np
from physt import h1
class TestNumpyBins:
def test_nbin(self):
arr = np.random.rand(100)
hist = h1(arr, bins=15)
assert hist.bin_count == 15
assert np.isclose(hist.bin_right_edges[-1], arr.max())
assert np.isclose(hist.bin_left_edges[0], arr.min())
def test_edges(self):
arr = np.arange(0, 1, 0.01)
hist = h1(arr, np.arange(0.1, 0.8001, 0.1))
assert np.allclose(hist.numpy_bins, [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
assert hist.underflow == 10
assert hist.overflow == 19
def test_range(self):
arr = np.arange(0, 1.00, 0.01)
hist = h1(arr, 10, range=(0.5, 1.0))
assert hist.bin_count == 10
assert hist.bin_left_edges[0] == 0.5
assert hist.bin_right_edges[-1] == 1.0
assert hist.overflow == 0
assert hist.underflow == 50
assert hist.total == 50
hist = h1(arr, bins=10, range=(0.5, 1.0), keep_missed=False)
assert hist.total == 50
assert np.isnan(hist.underflow)
assert np.isnan(hist.overflow)
def test_metadata(self):
arr = np.arange(0, 1.00, 0.01)
hist = h1(arr, name="name", title="title", axis_name="axis_name")
assert hist.name == "name"
assert hist.title == "title"
assert hist.axis_names == ("axis_name",)
| 32.690476 | 85 | 0.577567 |
73992b06a5f68388ce2cf2d296533683bb5d9c80 | 10,655 | py | Python | pywikibot/tools/djvu.py | PArangSae/pywikibot | caf1401e71a81d11e681a6d6adfdea907aa33b94 | [
"MIT"
] | null | null | null | pywikibot/tools/djvu.py | PArangSae/pywikibot | caf1401e71a81d11e681a6d6adfdea907aa33b94 | [
"MIT"
] | null | null | null | pywikibot/tools/djvu.py | PArangSae/pywikibot | caf1401e71a81d11e681a6d6adfdea907aa33b94 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Wrapper around djvulibre to access djvu files properties and content."""
#
# (C) Pywikibot team, 2015-2020
#
# Distributed under the terms of the MIT license.
#
import os
import re
import subprocess
from collections import Counter
import pywikibot
from pywikibot.tools import deprecated_args
def _call_cmd(args, lib='djvulibre') -> tuple:
"""
Tiny wrapper around subprocess.Popen().
@param args: same as Popen()
@type args: str or typing.Sequence[string]
@param lib: library to be logged in logging messages
@type lib: str
@return: returns a tuple (res, stdoutdata), where
res is True if dp.returncode != 0 else False
"""
if not isinstance(args, str):
# upcast any param in sequence args to str
cmd = ' '.join(str(a) for a in args)
else:
cmd = args
dp = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdoutdata, stderrdata = dp.communicate()
if dp.returncode != 0:
pywikibot.error('{0} error; {1}'.format(lib, cmd))
pywikibot.error('{0}'.format(stderrdata))
return (False, stdoutdata)
pywikibot.log('SUCCESS: {0} (PID: {1})'.format(cmd, dp.pid))
return (True, stdoutdata)
class DjVuFile:
"""Wrapper around djvulibre to access djvu files properties and content.
Perform file existence checks.
Control characters in djvu text-layer are converted for convenience
(see http://djvu.sourceforge.net/doc/man/djvused.html for control chars
details).
"""
@deprecated_args(file_djvu='file')
def __init__(self, file: str):
"""
Initializer.
@param file: filename (including path) to djvu file
"""
self._filename = file
filename = os.path.expanduser(file)
filename = os.path.abspath(filename)
# Check file exists and has read permissions.
with open(filename):
self.file = filename
self.dirname = os.path.dirname(filename)
# pattern for parsing of djvudump output.
self._pat_form = re.compile(
r' *?FORM:DJVU *?\[\d+\] *?(?P<id>{[^\}]*?})? *?\[P(?P<n>\d+)\]')
self._pat_info = re.compile(
r'DjVu.*?(?P<size>\d+x\d+).*?(?P<dpi>\d+) dpi')
def __repr__(self) -> str:
"""Return a more complete string representation."""
return str("{0}.{1}('{2}')").format(self.__module__,
self.__class__.__name__,
self._filename)
def __str__(self) -> str:
"""Return a string representation."""
return str("{}('{}')").format(self.__class__.__name__, self._filename)
def check_cache(fn):
"""Decorator to check if cache shall be cleared."""
cache = ['_page_count', '_has_text', '_page_info']
def wrapper(obj, *args, **kwargs):
force = kwargs.get('force', False)
if force:
for el in cache:
obj.__dict__.pop(el, None)
_res = fn(obj, *args, **kwargs)
return _res
return wrapper
def check_page_number(fn):
"""Decorator to check if page number is valid.
@raises ValueError
"""
def wrapper(obj, *args, **kwargs):
n = args[0]
force = kwargs.get('force', False)
if not (1 <= n <= obj.number_of_images(force=force)):
raise ValueError('Page %d not in file %s [%d-%d]'
% (n, obj.file, n, obj.number_of_images()))
_res = fn(obj, *args, **kwargs)
return _res
return wrapper
@check_cache
def number_of_images(self, force=False):
"""
Return the number of images in the djvu file.
@param force: if True, refresh the cached data
@type force: bool
"""
if not hasattr(self, '_page_count'):
res, stdoutdata = _call_cmd(['djvused', '-e', 'n', self.file])
if not res:
return False
self._page_count = int(stdoutdata)
return self._page_count
@check_page_number
def page_info(self, n, force=False):
"""
Return a tuple (id, (size, dpi)) for page n of djvu file.
@param force: if True, refresh the cached data
@type force: bool
"""
if not hasattr(self, '_page_info') or force:
self._get_page_info(force=force)
return self._page_info[n]
@check_cache
def _get_page_info(self, force=False):
"""
Return a dict of tuples (id, (size, dpi)) for all pages of djvu file.
@param force: if True, refresh the cached data
@type force: bool
"""
if not hasattr(self, '_page_info'):
self._page_info = {}
res, stdoutdata = _call_cmd(['djvudump', self.file])
if not res:
return False
has_text = False
for line in stdoutdata.decode('utf-8').split('\n'):
if 'TXTz' in line:
has_text = True
if 'FORM:DJVU' in line:
m = self._pat_form.search(line)
if m:
key, id = int(m.group('n')), m.group('id')
else:
# If djvu doc has only one page,
# FORM:DJVU line in djvudump has no id
key, id = 1, ''
if 'INFO' in line:
m = self._pat_info.search(line)
if m:
size, dpi = m.group('size'), int(m.group('dpi'))
else:
size, dpi = None, None
else:
continue
self._page_info[key] = (id, (size, dpi))
self._has_text = has_text
return self._page_info
def get_most_common_info(self):
"""Return most common size and dpi for pages in djvu file."""
cnt = Counter(s_d for _, s_d in self._get_page_info().values())
(size, dpi), _ = cnt.most_common()[0]
return size, dpi
@check_cache
def has_text(self, force=False):
"""
Test if the djvu file has a text-layer.
@param force: if True, refresh the cached data
@type force: bool
"""
if not hasattr(self, '_has_text'):
self._get_page_info(force=force)
return self._has_text
def _remove_control_chars(self, data):
"""Remove djvu format control characters.
See http://djvu.sourceforge.net/doc/man/djvused.html for control chars.
"""
txt = data.decode('utf-8')
# vertical tab (\013=\x0b): remove
txt = txt.replace('\x0b', '')
# group (\035=\x1d) separator: replace with \n
txt = txt.replace('\x1d', '\n')
# unit separator (\037=\x1f): replace with \n
txt = txt.replace('\x1f', '\n')
# feed char (\f=\x0c), \n and trailing spaces: strip
txt = txt.strip('\x0c\n ')
return txt
@check_page_number
@check_cache
def get_page(self, n, force=False):
"""
Get page n for djvu file.
@param force: if True, refresh the cached data
@type force: bool
"""
if not self.has_text(force=force):
raise ValueError('Djvu file %s has no text layer.' % self.file)
res, stdoutdata = _call_cmd(['djvutxt', '--page=%d' % n, self.file])
if not res:
return False
return self._remove_control_chars(stdoutdata)
@check_page_number
def whiten_page(self, n):
"""Replace page 'n' of djvu file with a blank page."""
# tmp files for creation/insertion of a white page.
white_ppm = os.path.join(self.dirname, 'white_page.ppm')
white_djvu = os.path.join(self.dirname, 'white_page.djvu')
n_tot = self.number_of_images()
# Check n is in valid range and set ref_page number for final checks.
ref_page = 2 if n == 1 else n - 1
size, dpi = self.get_most_common_info()
# Generate white_page.
res, data = _call_cmd(['convert', '-size', size, 'xc:white',
white_ppm], lib='ImageMagik')
if not res:
return False
# Convert white_page to djvu.
res, data = _call_cmd(['c44', white_ppm, '-dpi', dpi])
os.unlink(white_ppm) # rm white_page.ppm before retuning.
if not res:
return False
# Delete page n.
# Get ref page info for later checks.
info_ref_page = self.page_info(ref_page)
res, data = _call_cmd(['djvm', '-d', self.file, n])
if not res:
return False
# Insert new page
res, data = _call_cmd(['djvm', '-i', self.file, white_djvu, n])
os.unlink(white_djvu) # rm white_page.djvu before returning.
if not res:
return False
# Check if page processing is as expected.
expected_id = '{%s}' % os.path.basename(white_djvu)
assert self.number_of_images(force=True) == n_tot
assert self.page_info(n) == (expected_id, (size, dpi)) # white page id
assert self.page_info(ref_page) == info_ref_page # ref page info.
return True
@check_page_number
def delete_page(self, n):
"""Delete page 'n' of djvu file ."""
n_tot = self.number_of_images()
# Check n is in valid range and set ref_page number for final checks.
ref_page = n - 1 if n == n_tot else n + 1
new_ref_page = n - 1 if n == n_tot else n
# Delete page n.
# Get ref page info for later checks.
info_ref_page = self.page_info(ref_page)
res, data = _call_cmd(['djvm', '-d', self.file, n])
if not res:
return False
# Check if page processing is as expected.
# ref page info.
if n_tot > 2:
assert self.number_of_images(force=True) == n_tot - 1
# cache cleared above
assert self.page_info(new_ref_page) == info_ref_page
else:
# If djvu has only one page, FORM:DJVU line in djvudump has no id
_id, (sz, dpi) = info_ref_page
assert self.page_info(new_ref_page, force=True) == ('', (sz, dpi))
return True
# This is to be used only if this class is subclassed and the decorators
# needs to be used by the child.
check_page_number = staticmethod(check_page_number)
check_cache = staticmethod(check_cache)
| 33.090062 | 79 | 0.558893 |
2b5712cab6a1a424b75361bae27a3bf284c67446 | 24,554 | py | Python | tests/invalid_models_tests/test_ordinary_fields.py | Yoann-Vie/esgi-hearthstone | 115d03426c7e8e80d89883b78ac72114c29bed12 | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | tests/invalid_models_tests/test_ordinary_fields.py | Yoann-Vie/esgi-hearthstone | 115d03426c7e8e80d89883b78ac72114c29bed12 | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | tests/invalid_models_tests/test_ordinary_fields.py | Yoann-Vie/esgi-hearthstone | 115d03426c7e8e80d89883b78ac72114c29bed12 | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | import unittest
from django.core.checks import Error, Warning as DjangoWarning
from django.db import connection, models
from django.test import SimpleTestCase, TestCase, skipIfDBFeature
from django.test.utils import isolate_apps, override_settings
from django.utils.functional import lazy
from django.utils.timezone import now
from django.utils.translation import gettext_lazy as _
@isolate_apps('invalid_models_tests')
class AutoFieldTests(SimpleTestCase):
def test_valid_case(self):
class Model(models.Model):
id = models.AutoField(primary_key=True)
field = Model._meta.get_field('id')
self.assertEqual(field.check(), [])
def test_primary_key(self):
# primary_key must be True. Refs #12467.
class Model(models.Model):
field = models.AutoField(primary_key=False)
# Prevent Django from autocreating `id` AutoField, which would
# result in an error, because a model must have exactly one
# AutoField.
another = models.IntegerField(primary_key=True)
field = Model._meta.get_field('field')
self.assertEqual(field.check(), [
Error(
'AutoFields must set primary_key=True.',
obj=field,
id='fields.E100',
),
])
@isolate_apps('invalid_models_tests')
class CharFieldTests(TestCase):
def test_valid_field(self):
class Model(models.Model):
field = models.CharField(
max_length=255,
choices=[
('1', 'item1'),
('2', 'item2'),
],
db_index=True,
)
field = Model._meta.get_field('field')
self.assertEqual(field.check(), [])
def test_missing_max_length(self):
class Model(models.Model):
field = models.CharField()
field = Model._meta.get_field('field')
self.assertEqual(field.check(), [
Error(
"CharFields must define a 'max_length' attribute.",
obj=field,
id='fields.E120',
),
])
def test_negative_max_length(self):
class Model(models.Model):
field = models.CharField(max_length=-1)
field = Model._meta.get_field('field')
self.assertEqual(field.check(), [
Error(
"'max_length' must be a positive integer.",
obj=field,
id='fields.E121',
),
])
def test_bad_max_length_value(self):
class Model(models.Model):
field = models.CharField(max_length="bad")
field = Model._meta.get_field('field')
self.assertEqual(field.check(), [
Error(
"'max_length' must be a positive integer.",
obj=field,
id='fields.E121',
),
])
def test_str_max_length_value(self):
class Model(models.Model):
field = models.CharField(max_length='20')
field = Model._meta.get_field('field')
self.assertEqual(field.check(), [
Error(
"'max_length' must be a positive integer.",
obj=field,
id='fields.E121',
),
])
def test_str_max_length_type(self):
class Model(models.Model):
field = models.CharField(max_length=True)
field = Model._meta.get_field('field')
self.assertEqual(field.check(), [
Error(
"'max_length' must be a positive integer.",
obj=field,
id='fields.E121'
),
])
def test_non_iterable_choices(self):
class Model(models.Model):
field = models.CharField(max_length=10, choices='bad')
field = Model._meta.get_field('field')
self.assertEqual(field.check(), [
Error(
"'choices' must be an iterable (e.g., a list or tuple).",
obj=field,
id='fields.E004',
),
])
def test_non_iterable_choices_two_letters(self):
"""Two letters isn't a valid choice pair."""
class Model(models.Model):
field = models.CharField(max_length=10, choices=['ab'])
field = Model._meta.get_field('field')
self.assertEqual(field.check(), [
Error(
"'choices' must be an iterable containing (actual value, "
"human readable name) tuples.",
obj=field,
id='fields.E005',
),
])
def test_iterable_of_iterable_choices(self):
class ThingItem:
def __init__(self, value, display):
self.value = value
self.display = display
def __iter__(self):
return (x for x in [self.value, self.display])
def __len__(self):
return 2
class Things:
def __iter__(self):
return (x for x in [ThingItem(1, 2), ThingItem(3, 4)])
class ThingWithIterableChoices(models.Model):
thing = models.CharField(max_length=100, blank=True, choices=Things())
self.assertEqual(ThingWithIterableChoices._meta.get_field('thing').check(), [])
def test_choices_containing_non_pairs(self):
class Model(models.Model):
field = models.CharField(max_length=10, choices=[(1, 2, 3), (1, 2, 3)])
class Model2(models.Model):
field = models.IntegerField(choices=[0])
for model in (Model, Model2):
with self.subTest(model.__name__):
field = model._meta.get_field('field')
self.assertEqual(field.check(), [
Error(
"'choices' must be an iterable containing (actual "
"value, human readable name) tuples.",
obj=field,
id='fields.E005',
),
])
def test_choices_containing_lazy(self):
class Model(models.Model):
field = models.CharField(max_length=10, choices=[['1', _('1')], ['2', _('2')]])
self.assertEqual(Model._meta.get_field('field').check(), [])
def test_lazy_choices(self):
class Model(models.Model):
field = models.CharField(max_length=10, choices=lazy(lambda: [[1, '1'], [2, '2']], tuple)())
self.assertEqual(Model._meta.get_field('field').check(), [])
def test_choices_named_group(self):
class Model(models.Model):
field = models.CharField(
max_length=10, choices=[
['knights', [['L', 'Lancelot'], ['G', 'Galahad']]],
['wizards', [['T', 'Tim the Enchanter']]],
['R', 'Random character'],
],
)
self.assertEqual(Model._meta.get_field('field').check(), [])
def test_choices_named_group_non_pairs(self):
class Model(models.Model):
field = models.CharField(
max_length=10,
choices=[['knights', [['L', 'Lancelot', 'Du Lac']]]],
)
field = Model._meta.get_field('field')
self.assertEqual(field.check(), [
Error(
"'choices' must be an iterable containing (actual value, "
"human readable name) tuples.",
obj=field,
id='fields.E005',
),
])
def test_choices_named_group_bad_structure(self):
class Model(models.Model):
field = models.CharField(
max_length=10, choices=[
['knights', [
['Noble', [['G', 'Galahad']]],
['Combative', [['L', 'Lancelot']]],
]],
],
)
field = Model._meta.get_field('field')
self.assertEqual(field.check(), [
Error(
"'choices' must be an iterable containing (actual value, "
"human readable name) tuples.",
obj=field,
id='fields.E005',
),
])
def test_choices_named_group_lazy(self):
class Model(models.Model):
field = models.CharField(
max_length=10, choices=[
[_('knights'), [['L', _('Lancelot')], ['G', _('Galahad')]]],
['R', _('Random character')],
],
)
self.assertEqual(Model._meta.get_field('field').check(), [])
def test_bad_db_index_value(self):
class Model(models.Model):
field = models.CharField(max_length=10, db_index='bad')
field = Model._meta.get_field('field')
self.assertEqual(field.check(), [
Error(
"'db_index' must be None, True or False.",
obj=field,
id='fields.E006',
),
])
def test_bad_validators(self):
class Model(models.Model):
field = models.CharField(max_length=10, validators=[True])
field = Model._meta.get_field('field')
self.assertEqual(field.check(), [
Error(
"All 'validators' must be callable.",
hint=(
"validators[0] (True) isn't a function or instance of a "
"validator class."
),
obj=field,
id='fields.E008',
),
])
@unittest.skipUnless(connection.vendor == 'mysql',
"Test valid only for MySQL")
def test_too_long_char_field_under_mysql(self):
from django.db.backends.mysql.validation import DatabaseValidation
class Model(models.Model):
field = models.CharField(unique=True, max_length=256)
field = Model._meta.get_field('field')
validator = DatabaseValidation(connection=connection)
self.assertEqual(validator.check_field(field), [
Error(
'MySQL does not allow unique CharFields to have a max_length > 255.',
obj=field,
id='mysql.E001',
)
])
@isolate_apps('invalid_models_tests')
class DateFieldTests(TestCase):
maxDiff = None
def test_auto_now_and_auto_now_add_raise_error(self):
class Model(models.Model):
field0 = models.DateTimeField(auto_now=True, auto_now_add=True, default=now)
field1 = models.DateTimeField(auto_now=True, auto_now_add=False, default=now)
field2 = models.DateTimeField(auto_now=False, auto_now_add=True, default=now)
field3 = models.DateTimeField(auto_now=True, auto_now_add=True, default=None)
expected = []
checks = []
for i in range(4):
field = Model._meta.get_field('field%d' % i)
expected.append(Error(
"The options auto_now, auto_now_add, and default "
"are mutually exclusive. Only one of these options "
"may be present.",
obj=field,
id='fields.E160',
))
checks.extend(field.check())
self.assertEqual(checks, expected)
def test_fix_default_value(self):
class Model(models.Model):
field_dt = models.DateField(default=now())
field_d = models.DateField(default=now().date())
field_now = models.DateField(default=now)
field_dt = Model._meta.get_field('field_dt')
field_d = Model._meta.get_field('field_d')
field_now = Model._meta.get_field('field_now')
errors = field_dt.check()
errors.extend(field_d.check())
errors.extend(field_now.check()) # doesn't raise a warning
self.assertEqual(errors, [
DjangoWarning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=field_dt,
id='fields.W161',
),
DjangoWarning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=field_d,
id='fields.W161',
)
])
@override_settings(USE_TZ=True)
def test_fix_default_value_tz(self):
self.test_fix_default_value()
@isolate_apps('invalid_models_tests')
class DateTimeFieldTests(TestCase):
maxDiff = None
def test_fix_default_value(self):
class Model(models.Model):
field_dt = models.DateTimeField(default=now())
field_d = models.DateTimeField(default=now().date())
field_now = models.DateTimeField(default=now)
field_dt = Model._meta.get_field('field_dt')
field_d = Model._meta.get_field('field_d')
field_now = Model._meta.get_field('field_now')
errors = field_dt.check()
errors.extend(field_d.check())
errors.extend(field_now.check()) # doesn't raise a warning
self.assertEqual(errors, [
DjangoWarning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=field_dt,
id='fields.W161',
),
DjangoWarning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=field_d,
id='fields.W161',
)
])
@override_settings(USE_TZ=True)
def test_fix_default_value_tz(self):
self.test_fix_default_value()
@isolate_apps('invalid_models_tests')
class DecimalFieldTests(SimpleTestCase):
def test_required_attributes(self):
class Model(models.Model):
field = models.DecimalField()
field = Model._meta.get_field('field')
self.assertEqual(field.check(), [
Error(
"DecimalFields must define a 'decimal_places' attribute.",
obj=field,
id='fields.E130',
),
Error(
"DecimalFields must define a 'max_digits' attribute.",
obj=field,
id='fields.E132',
),
])
def test_negative_max_digits_and_decimal_places(self):
class Model(models.Model):
field = models.DecimalField(max_digits=-1, decimal_places=-1)
field = Model._meta.get_field('field')
self.assertEqual(field.check(), [
Error(
"'decimal_places' must be a non-negative integer.",
obj=field,
id='fields.E131',
),
Error(
"'max_digits' must be a positive integer.",
obj=field,
id='fields.E133',
),
])
def test_bad_values_of_max_digits_and_decimal_places(self):
class Model(models.Model):
field = models.DecimalField(max_digits="bad", decimal_places="bad")
field = Model._meta.get_field('field')
self.assertEqual(field.check(), [
Error(
"'decimal_places' must be a non-negative integer.",
obj=field,
id='fields.E131',
),
Error(
"'max_digits' must be a positive integer.",
obj=field,
id='fields.E133',
),
])
def test_decimal_places_greater_than_max_digits(self):
class Model(models.Model):
field = models.DecimalField(max_digits=9, decimal_places=10)
field = Model._meta.get_field('field')
self.assertEqual(field.check(), [
Error(
"'max_digits' must be greater or equal to 'decimal_places'.",
obj=field,
id='fields.E134',
),
])
def test_valid_field(self):
class Model(models.Model):
field = models.DecimalField(max_digits=10, decimal_places=10)
field = Model._meta.get_field('field')
self.assertEqual(field.check(), [])
@isolate_apps('invalid_models_tests')
class FileFieldTests(SimpleTestCase):
def test_valid_default_case(self):
class Model(models.Model):
field = models.FileField()
self.assertEqual(Model._meta.get_field('field').check(), [])
def test_valid_case(self):
class Model(models.Model):
field = models.FileField(upload_to='somewhere')
field = Model._meta.get_field('field')
self.assertEqual(field.check(), [])
def test_primary_key(self):
class Model(models.Model):
field = models.FileField(primary_key=False, upload_to='somewhere')
field = Model._meta.get_field('field')
self.assertEqual(field.check(), [
Error(
"'primary_key' is not a valid argument for a FileField.",
obj=field,
id='fields.E201',
)
])
def test_upload_to_starts_with_slash(self):
class Model(models.Model):
field = models.FileField(upload_to='/somewhere')
field = Model._meta.get_field('field')
self.assertEqual(field.check(), [
Error(
"FileField's 'upload_to' argument must be a relative path, not "
"an absolute path.",
obj=field,
id='fields.E202',
hint='Remove the leading slash.',
)
])
def test_upload_to_callable_not_checked(self):
def callable(instance, filename):
return '/' + filename
class Model(models.Model):
field = models.FileField(upload_to=callable)
field = Model._meta.get_field('field')
self.assertEqual(field.check(), [])
@isolate_apps('invalid_models_tests')
class FilePathFieldTests(SimpleTestCase):
def test_forbidden_files_and_folders(self):
class Model(models.Model):
field = models.FilePathField(allow_files=False, allow_folders=False)
field = Model._meta.get_field('field')
self.assertEqual(field.check(), [
Error(
"FilePathFields must have either 'allow_files' or 'allow_folders' set to True.",
obj=field,
id='fields.E140',
),
])
@isolate_apps('invalid_models_tests')
class GenericIPAddressFieldTests(SimpleTestCase):
def test_non_nullable_blank(self):
class Model(models.Model):
field = models.GenericIPAddressField(null=False, blank=True)
field = Model._meta.get_field('field')
self.assertEqual(field.check(), [
Error(
('GenericIPAddressFields cannot have blank=True if null=False, '
'as blank values are stored as nulls.'),
obj=field,
id='fields.E150',
),
])
@isolate_apps('invalid_models_tests')
class ImageFieldTests(SimpleTestCase):
def test_pillow_installed(self):
try:
from PIL import Image # NOQA
except ImportError:
pillow_installed = False
else:
pillow_installed = True
class Model(models.Model):
field = models.ImageField(upload_to='somewhere')
field = Model._meta.get_field('field')
errors = field.check()
expected = [] if pillow_installed else [
Error(
'Cannot use ImageField because Pillow is not installed.',
hint=('Get Pillow at https://pypi.org/project/Pillow/ '
'or run command "pip install Pillow".'),
obj=field,
id='fields.E210',
),
]
self.assertEqual(errors, expected)
@isolate_apps('invalid_models_tests')
class IntegerFieldTests(SimpleTestCase):
def test_max_length_warning(self):
class Model(models.Model):
value = models.IntegerField(max_length=2)
field = Model._meta.get_field('value')
self.assertEqual(field.check(), [
DjangoWarning(
"'max_length' is ignored when used with IntegerField",
hint="Remove 'max_length' from field",
obj=field,
id='fields.W122',
)
])
@isolate_apps('invalid_models_tests')
class TimeFieldTests(TestCase):
maxDiff = None
def test_fix_default_value(self):
class Model(models.Model):
field_dt = models.TimeField(default=now())
field_t = models.TimeField(default=now().time())
field_now = models.DateField(default=now)
field_dt = Model._meta.get_field('field_dt')
field_t = Model._meta.get_field('field_t')
field_now = Model._meta.get_field('field_now')
errors = field_dt.check()
errors.extend(field_t.check())
errors.extend(field_now.check()) # doesn't raise a warning
self.assertEqual(errors, [
DjangoWarning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=field_dt,
id='fields.W161',
),
DjangoWarning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=field_t,
id='fields.W161',
)
])
@override_settings(USE_TZ=True)
def test_fix_default_value_tz(self):
self.test_fix_default_value()
@isolate_apps('invalid_models_tests')
class TextFieldTests(TestCase):
@skipIfDBFeature('supports_index_on_text_field')
def test_max_length_warning(self):
class Model(models.Model):
value = models.TextField(db_index=True)
field = Model._meta.get_field('value')
field_type = field.db_type(connection)
self.assertEqual(field.check(), [
DjangoWarning(
'%s does not support a database index on %s columns.'
% (connection.display_name, field_type),
hint=(
"An index won't be created. Silence this warning if you "
"don't care about it."
),
obj=field,
id='fields.W162',
)
])
| 35.278736 | 105 | 0.532622 |
c73029ecb15a1a29af9d12db5f50a937baeab1eb | 8,085 | py | Python | preprocess.py | sedrickkeh/Hashtag_new | 993edd200e2e0050e06982df2fec30bffaae5577 | [
"MIT"
] | 23 | 2019-05-22T07:13:05.000Z | 2022-02-23T19:53:30.000Z | preprocess.py | sedrickkeh/Hashtag_new | 993edd200e2e0050e06982df2fec30bffaae5577 | [
"MIT"
] | 4 | 2019-10-26T06:19:25.000Z | 2022-01-11T11:21:17.000Z | preprocess.py | sedrickkeh/Hashtag_new | 993edd200e2e0050e06982df2fec30bffaae5577 | [
"MIT"
] | 6 | 2019-06-18T03:44:22.000Z | 2021-11-12T05:40:36.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import os
import glob
import sys
import torch
import onmt.io
import onmt.opts
from onmt.Utils import get_logger
def check_existing_pt_files(opt):
# We will use glob.glob() to find sharded {train|valid}.[0-9]*.pt
# when training, so check to avoid tampering with existing pt files
# or mixing them up.
for t in ['train', 'valid', 'vocab']:
pattern = opt.save_data + '.' + t + '*.pt'
if glob.glob(pattern):
sys.stderr.write("Please backup exisiting pt file: %s, "
"to avoid tampering!\n" % pattern)
sys.exit(1)
def parse_args():
parser = argparse.ArgumentParser(
description='preprocess.py',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
onmt.opts.add_md_help_argument(parser)
onmt.opts.preprocess_opts(parser)
opt = parser.parse_args()
torch.manual_seed(opt.seed)
check_existing_pt_files(opt)
return opt
def build_save_text_dataset_in_shards(src_corpus, conversation_corpus, tgt_corpus, fields,
corpus_type, opt, logger=None):
'''
Divide the big corpus into shards, and build dataset separately.
This is currently only for data_type=='text'.
The reason we do this is to avoid taking up too much memory due
to sucking in a huge corpus file.
To tackle this, we only read in part of the corpus file of size
`max_shard_size`(actually it is multiples of 64 bytes that equals
or is slightly larger than this size), and process it into dataset,
then write it to disk along the way. By doing this, we only focus on
part of the corpus at any moment, thus effectively reducing memory use.
According to test, this method can reduce memory footprint by ~50%.
Note! As we process along the shards, previous shards might still
stay in memory, but since we are done with them, and no more
reference to them, if there is memory tight situation, the OS could
easily reclaim these memory.
If `max_shard_size` is 0 or is larger than the corpus size, it is
effectively preprocessed into one dataset, i.e. no sharding.
NOTE! `max_shard_size` is measuring the input corpus size, not the
output pt file size. So a shard pt file consists of examples of size
2 * `max_shard_size`(source + target).
'''
corpus_size = os.path.getsize(src_corpus)
if corpus_size > 10 * (1024 ** 2) and opt.max_shard_size == 0:
if logger:
logger.info("Warning. The corpus %s is larger than 10M bytes, "
"you can set '-max_shard_size' to process it by "
"small shards to use less memory." % src_corpus)
if opt.max_shard_size != 0:
if logger:
logger.info(' * divide corpus into shards and build dataset '
'separately (shard_size = %d bytes).'
% opt.max_shard_size)
ret_list = []
src_iter = onmt.io.ShardedTextCorpusIterator(
src_corpus, opt.src_seq_length_trunc,
"src", opt.max_shard_size)
conversation_iter = onmt.io.ShardedTextCorpusIterator(
conversation_corpus, opt.conversation_seq_length_trunc,
"conversation", opt.max_shard_size,
assoc_iter=src_iter)
tgt_iter = onmt.io.ShardedTextCorpusIterator(
tgt_corpus, opt.tgt_seq_length_trunc,
"tgt", opt.max_shard_size,
assoc_iter=src_iter)
index = 0
while not src_iter.hit_end():
index += 1
dataset = onmt.io.TextDataset(
fields, src_iter, conversation_iter, tgt_iter,
src_iter.num_feats, conversation_iter.num_feats, tgt_iter.num_feats,
src_seq_length=opt.src_seq_length,
conversation_seq_length=opt.conversation_seq_length,
tgt_seq_length=opt.tgt_seq_length,
dynamic_dict=opt.dynamic_dict)
# We save fields in vocab.pt seperately, so make it empty.
dataset.fields = []
pt_file = "{:s}.{:s}.{:d}.pt".format(
opt.save_data, corpus_type, index)
if logger:
logger.info(" * saving %s data shard to %s."
% (corpus_type, pt_file))
torch.save(dataset, pt_file)
ret_list.append(pt_file)
return ret_list
def build_save_dataset(corpus_type, fields, opt, logger=None):
assert corpus_type in ['train', 'valid']
if corpus_type == 'train':
src_corpus = opt.train_src
conversation_corpus = opt.train_conv
tgt_corpus = opt.train_tgt
else:
src_corpus = opt.valid_src
conversation_corpus = opt.valid_conv
tgt_corpus = opt.valid_tgt
# Currently we only do preprocess sharding for corpus: data_type=='text'.
if opt.data_type == 'text':
return build_save_text_dataset_in_shards(
src_corpus, conversation_corpus, tgt_corpus, fields,
corpus_type, opt)
# For data_type == 'img' or 'audio', currently we don't do
# preprocess sharding. We only build a monolithic dataset.
# But since the interfaces are uniform, it would be not hard
# to do this should users need this feature.
dataset = onmt.io.build_dataset(
fields, opt.data_type, src_corpus, tgt_corpus,
src_dir=opt.src_dir,
src_seq_length=opt.src_seq_length,
tgt_seq_length=opt.tgt_seq_length,
src_seq_length_trunc=opt.src_seq_length_trunc,
tgt_seq_length_trunc=opt.tgt_seq_length_trunc,
dynamic_dict=opt.dynamic_dict,
sample_rate=opt.sample_rate,
window_size=opt.window_size,
window_stride=opt.window_stride,
window=opt.window)
# We save fields in vocab.pt seperately, so make it empty.
dataset.fields = []
pt_file = "{:s}.{:s}.pt".format(opt.save_data, corpus_type)
if logger:
logger.info(" * saving %s dataset to %s." % (corpus_type, pt_file))
torch.save(dataset, pt_file)
return [pt_file]
def build_save_vocab(train_dataset, fields, opt, logger=None):
fields = onmt.io.build_vocab(train_dataset, fields, opt.data_type,
opt.share_vocab,
opt.src_vocab,
opt.src_vocab_size,
opt.src_words_min_frequency,
opt.conversation_vocab,
opt.conversation_vocab_size,
opt.conversation_words_min_frequency,
opt.tgt_vocab,
opt.tgt_vocab_size,
opt.tgt_words_min_frequency,
logger)
# Can't save fields, so remove/reconstruct at training time.
vocab_file = opt.save_data + '.vocab.pt'
torch.save(onmt.io.save_fields_to_vocab(fields), vocab_file)
def main():
opt = parse_args()
logger = get_logger(opt.log_file)
logger.info("Extracting features...")
src_nfeats = onmt.io.get_num_features(opt.data_type, opt.train_src, 'src')
conversation_nfeats = onmt.io.get_num_features(opt.data_type, opt.train_conv, 'conversation')
tgt_nfeats = onmt.io.get_num_features(opt.data_type, opt.train_tgt, 'tgt')
logger.info(" * number of source features: %d." % src_nfeats)
logger.info(" * number of conversation features: %d." % conversation_nfeats)
logger.info(" * number of target features: %d." % tgt_nfeats)
logger.info("Building `Fields` object...")
fields = onmt.io.get_fields(opt.data_type, src_nfeats, conversation_nfeats, tgt_nfeats)
logger.info("Building & saving training data...")
train_dataset_files = build_save_dataset('train', fields, opt, logger)
logger.info("Building & saving vocabulary...")
build_save_vocab(train_dataset_files, fields, opt, logger)
logger.info("Building & saving validation data...")
build_save_dataset('valid', fields, opt, logger)
if __name__ == "__main__":
main()
| 37.604651 | 97 | 0.645145 |
08cc2d7c4e5bbf937aa8408bfcb2af28708a87b1 | 9,507 | py | Python | external/cclib/method/volume.py | faribas/RMG-Py | 6149e29b642bf8da9537e2db98f15121f0e040c7 | [
"MIT"
] | 1 | 2017-12-18T18:43:22.000Z | 2017-12-18T18:43:22.000Z | external/cclib/method/volume.py | speth/RMG-Py | 1d2c2b684580396e984459d9347628a5ceb80e2e | [
"MIT"
] | 72 | 2016-06-06T18:18:49.000Z | 2019-11-17T03:21:10.000Z | external/cclib/method/volume.py | speth/RMG-Py | 1d2c2b684580396e984459d9347628a5ceb80e2e | [
"MIT"
] | 3 | 2017-09-22T15:47:37.000Z | 2021-12-30T23:51:47.000Z | """
cclib (http://cclib.sf.net) is (c) 2006, the cclib development team
and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html).
"""
__revision__ = "$Revision: 742 $"
import copy
import numpy
try:
from PyQuante.CGBF import CGBF
module_pyq = True
except:
module_pyq = False
try:
from pyvtk import *
from pyvtk.DataSetAttr import *
module_pyvtk = True
except:
module_pyvtk = False
from cclib.bridge import makepyquante
from cclib.parser.utils import convertor
class Volume(object):
"""Represent a volume in space.
Required parameters:
origin -- the bottom left hand corner of the volume
topcorner -- the top right hand corner
spacing -- the distance between the points in the cube
Attributes:
data -- a numpy array of values for each point in the volume
(set to zero at initialisation)
numpts -- the numbers of points in the (x,y,z) directions
"""
def __init__(self, origin, topcorner, spacing):
self.origin = origin
self.spacing = spacing
self.topcorner = topcorner
self.numpts = []
for i in range(3):
self.numpts.append(int((self.topcorner[i]-self.origin[i])/self.spacing[i] + 1) )
self.data = numpy.zeros( tuple(self.numpts), "d")
def __str__(self):
"""Return a string representation."""
return "Volume %s to %s (density: %s)" % (self.origin, self.topcorner,
self.spacing)
def write(self, filename, format="Cube"):
"""Write the volume to file."""
format = format.upper()
if format.upper() not in ["VTK", "CUBE"]:
raise "Format must be either VTK or Cube"
elif format=="VTK":
self.writeasvtk(filename)
else:
self.writeascube(filename)
def writeasvtk(self, filename):
if not module_pyvtk:
raise Exception, "You need to have pyvtk installed"
ranges = (numpy.arange(self.data.shape[2]),
numpy.arange(self.data.shape[1]),
numpy.arange(self.data.shape[0]))
v = VtkData(RectilinearGrid(*ranges), "Test",
PointData(Scalars(self.data.ravel(), "from cclib", "default")))
v.tofile(filename)
def integrate(self):
boxvol = (self.spacing[0] * self.spacing[1] * self.spacing[2] *
convertor(1, "Angstrom", "bohr")**3)
return sum(self.data.ravel()) * boxvol
def integrate_square(self):
boxvol = (self.spacing[0] * self.spacing[1] * self.spacing[2] *
convertor(1, "Angstrom", "bohr")**3)
return sum(self.data.ravel()**2) * boxvol
def writeascube(self, filename):
# Remember that the units are bohr, not Angstroms
convert = lambda x : convertor(x, "Angstrom", "bohr")
ans = []
ans.append("Cube file generated by cclib")
ans.append("")
format = "%4d%12.6f%12.6f%12.6f"
origin = [convert(x) for x in self.origin]
ans.append(format % (0, origin[0], origin[1], origin[2]))
ans.append(format % (self.data.shape[0], convert(self.spacing[0]), 0.0, 0.0))
ans.append(format % (self.data.shape[1], 0.0, convert(self.spacing[1]), 0.0))
ans.append(format % (self.data.shape[2], 0.0, 0.0, convert(self.spacing[2])))
line = []
for i in range(self.data.shape[0]):
for j in range(self.data.shape[1]):
for k in range(self.data.shape[2]):
line.append(scinotation(self.data[i][j][k]))
if len(line)==6:
ans.append(" ".join(line))
line = []
if line:
ans.append(" ".join(line))
line = []
outputfile = open(filename, "w")
outputfile.write("\n".join(ans))
outputfile.close()
def scinotation(num):
"""Write in scientific notation
>>> scinotation(1./654)
' 1.52905E-03'
>>> scinotation(-1./654)
'-1.52905E-03'
"""
ans = "%10.5E" % num
broken = ans.split("E")
exponent = int(broken[1])
if exponent<-99:
return " 0.000E+00"
if exponent<0:
sign="-"
else:
sign="+"
return ("%sE%s%s" % (broken[0],sign,broken[1][-2:])).rjust(12)
def getbfs(coords, gbasis):
"""Convenience function for both wavefunction and density based on PyQuante Ints.py."""
mymol = makepyquante(coords, [0 for x in coords])
sym2powerlist = {
'S' : [(0,0,0)],
'P' : [(1,0,0),(0,1,0),(0,0,1)],
'D' : [(2,0,0),(0,2,0),(0,0,2),(1,1,0),(0,1,1),(1,0,1)],
'F' : [(3,0,0),(2,1,0),(2,0,1),(1,2,0),(1,1,1),(1,0,2),
(0,3,0),(0,2,1),(0,1,2), (0,0,3)]
}
bfs = []
for i,atom in enumerate(mymol):
bs = gbasis[i]
for sym,prims in bs:
for power in sym2powerlist[sym]:
bf = CGBF(atom.pos(),power)
for expnt,coef in prims:
bf.add_primitive(expnt,coef)
bf.normalize()
bfs.append(bf)
return bfs
def wavefunction(coords, mocoeffs, gbasis, volume):
"""Calculate the magnitude of the wavefunction at every point in a volume.
Attributes:
coords -- the coordinates of the atoms
mocoeffs -- mocoeffs for one eigenvalue
gbasis -- gbasis from a parser object
volume -- a template Volume object (will not be altered)
"""
bfs = getbfs(coords, gbasis)
wavefn = copy.copy(volume)
wavefn.data = numpy.zeros( wavefn.data.shape, "d")
conversion = convertor(1,"bohr","Angstrom")
x = numpy.arange(wavefn.origin[0], wavefn.topcorner[0]+wavefn.spacing[0], wavefn.spacing[0]) / conversion
y = numpy.arange(wavefn.origin[1], wavefn.topcorner[1]+wavefn.spacing[1], wavefn.spacing[1]) / conversion
z = numpy.arange(wavefn.origin[2], wavefn.topcorner[2]+wavefn.spacing[2], wavefn.spacing[2]) / conversion
for bs in range(len(bfs)):
data = numpy.zeros( wavefn.data.shape, "d")
for i,xval in enumerate(x):
for j,yval in enumerate(y):
for k,zval in enumerate(z):
data[i, j, k] = bfs[bs].amp(xval,yval,zval)
numpy.multiply(data, mocoeffs[bs], data)
numpy.add(wavefn.data, data, wavefn.data)
return wavefn
def electrondensity(coords, mocoeffslist, gbasis, volume):
"""Calculate the magnitude of the electron density at every point in a volume.
Attributes:
coords -- the coordinates of the atoms
mocoeffs -- mocoeffs for all of the occupied eigenvalues
gbasis -- gbasis from a parser object
volume -- a template Volume object (will not be altered)
Note: mocoeffs is a list of numpy arrays. The list will be of length 1
for restricted calculations, and length 2 for unrestricted.
"""
bfs = getbfs(coords, gbasis)
density = copy.copy(volume)
density.data = numpy.zeros( density.data.shape, "d")
conversion = convertor(1,"bohr","Angstrom")
x = numpy.arange(density.origin[0], density.topcorner[0]+density.spacing[0], density.spacing[0]) / conversion
y = numpy.arange(density.origin[1], density.topcorner[1]+density.spacing[1], density.spacing[1]) / conversion
z = numpy.arange(density.origin[2], density.topcorner[2]+density.spacing[2], density.spacing[2]) / conversion
for mocoeffs in mocoeffslist:
for mocoeff in mocoeffs:
wavefn = numpy.zeros( density.data.shape, "d")
for bs in range(len(bfs)):
data = numpy.zeros( density.data.shape, "d")
for i,xval in enumerate(x):
for j,yval in enumerate(y):
tmp = []
for k,zval in enumerate(z):
tmp.append(bfs[bs].amp(xval, yval, zval))
data[i,j,:] = tmp
numpy.multiply(data, mocoeff[bs], data)
numpy.add(wavefn, data, wavefn)
density.data += wavefn**2
if len(mocoeffslist) == 1:
density.data = density.data*2. # doubly-occupied
return density
if __name__=="__main__":
try:
import psyco
psyco.full()
except ImportError:
pass
from cclib.parser import ccopen
import logging
a = ccopen("../../../data/Gaussian/basicGaussian03/dvb_sp_basis.log")
a.logger.setLevel(logging.ERROR)
c = a.parse()
b = ccopen("../../../data/Gaussian/basicGaussian03/dvb_sp.out")
b.logger.setLevel(logging.ERROR)
d = b.parse()
vol = Volume( (-3.0,-6,-2.0), (3.0, 6, 2.0), spacing=(0.25,0.25,0.25) )
wavefn = wavefunction(d.atomcoords[0], d.mocoeffs[0][d.homos[0]],
c.gbasis, vol)
assert abs(wavefn.integrate())<1E-6 # not necessarily true for all wavefns
assert abs(wavefn.integrate_square() - 1.00)<1E-3 # true for all wavefns
print wavefn.integrate(), wavefn.integrate_square()
vol = Volume( (-3.0,-6,-2.0), (3.0, 6, 2.0), spacing=(0.25,0.25,0.25) )
frontierorbs = [d.mocoeffs[0][(d.homos[0]-3):(d.homos[0]+1)]]
density = electrondensity(d.atomcoords[0], frontierorbs, c.gbasis, vol)
assert abs(density.integrate()-8.00)<1E-2
print "Combined Density of 4 Frontier orbitals=",density.integrate()
| 35.875472 | 126 | 0.574314 |
2725e51de3d9507d11ea772d27ed79eddd16dd0d | 246 | py | Python | tomodachi/__version__.py | the-gw/tomodachi | a1e2efc1abe6f4e2de4a580e58184323660b4299 | [
"MIT"
] | null | null | null | tomodachi/__version__.py | the-gw/tomodachi | a1e2efc1abe6f4e2de4a580e58184323660b4299 | [
"MIT"
] | null | null | null | tomodachi/__version__.py | the-gw/tomodachi | a1e2efc1abe6f4e2de4a580e58184323660b4299 | [
"MIT"
] | null | null | null | __version_info__ = (0, 16, 2)
__version__ = ''.join(['.{}'.format(str(n)) if type(n) is int else str(n) for n in __version_info__]).replace('.', '', 1 if type(__version_info__[0]) is int else 0)
if __name__ == "__main__":
print(__version__)
| 41 | 164 | 0.658537 |
b52bb03a80c867f3049fc0c8f386856f31496e0c | 24,435 | py | Python | tests/python/mkl/test_mkldnn.py | mchoi8739/incubator-mxnet | cff583250479b31c394f568ffb835b720cb84dc4 | [
"Apache-2.0"
] | null | null | null | tests/python/mkl/test_mkldnn.py | mchoi8739/incubator-mxnet | cff583250479b31c394f568ffb835b720cb84dc4 | [
"Apache-2.0"
] | null | null | null | tests/python/mkl/test_mkldnn.py | mchoi8739/incubator-mxnet | cff583250479b31c394f568ffb835b720cb84dc4 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
MKL-DNN related test cases
"""
import sys
import os
import numpy as np
import mxnet as mx
import unittest
from mxnet.test_utils import rand_ndarray, assert_almost_equal
from mxnet.module import Module
from mxnet import gluon
from mxnet.gluon import nn
from mxnet.test_utils import *
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.append(os.path.join(curr_path, '../unittest/'))
from common import with_seed
def test_mkldnn_model():
model = os.path.join(os.path.dirname(os.path.realpath(__file__)), "data",
"test_mkldnn_test_mkldnn_model_model1.json")
shape = (32, 3, 300, 300)
ctx = mx.cpu()
sym = mx.sym.load(model)
args = sym.list_arguments()
shapes = sym.infer_shape(data=shape)
def get_tensors(args, shapes, ctx):
return {x: mx.nd.ones(y, ctx) for x, y in zip(args, shapes)}
inputs = get_tensors(args, shapes[0], ctx)
grads = get_tensors(args, shapes[0], ctx)
try:
exe = sym.bind(ctx, inputs, args_grad=grads)
for _ in range(2):
exe.forward(is_train=True)
for y in exe.outputs:
y.wait_to_read()
exe.backward()
for y in exe.grad_arrays:
y.wait_to_read()
except: # pylint: disable=bare-except
assert 0, "test_mkldnn_model exception in bind and execution"
@with_seed(1234)
def test_mkldnn_ndarray_slice():
ctx = mx.cpu()
net = gluon.nn.HybridSequential()
with net.name_scope():
net.add(gluon.nn.Conv2D(channels=32, kernel_size=3, activation=None))
net.collect_params().initialize(ctx=ctx)
x = mx.nd.array(np.ones([32, 3, 224, 224]), ctx)
y = net(x)
# trigger computation on ndarray slice
assert_almost_equal(y[0].asnumpy()[0, 0, 0], np.array(0.056331709))
@with_seed(1234)
def test_mkldnn_engine_threading():
net = gluon.nn.HybridSequential()
with net.name_scope():
net.add(gluon.nn.Conv2D(channels=32, kernel_size=3, activation=None))
net.collect_params().initialize(ctx=mx.cpu())
class Dummy(gluon.data.Dataset):
def __len__(self):
return 2
def __getitem__(self, key):
return key, np.ones((3, 224, 224)), np.ones((10, ))
loader = gluon.data.DataLoader(Dummy(), batch_size=2, num_workers=1)
X = (32, 3, 32, 32)
# trigger mkldnn execution thread
y = net(mx.nd.array(np.ones(X))).asnumpy()
# Use Gluon dataloader to trigger different thread.
# below line triggers different execution thread
for _ in loader:
y = net(mx.nd.array(np.ones(X))).asnumpy()
# output should be 056331709 (non-mkldnn mode output)
assert_almost_equal(y[0, 0, 0, 0], np.array(0.056331709))
break
@with_seed()
def test_mkldnn_reshape():
def test_reshape_after_conv(dst_shape):
shape = (1,1,4,4)
data = mx.symbol.Variable('data')
conv = mx.symbol.Convolution(data=data, num_filter=16, kernel=(1, 1), pad=(0, 0), stride=(1, 1))
res = mx.symbol.reshape(data=conv, shape=dst_shape)
exe = res.simple_bind(mx.cpu(), data=shape, grad_req='null')
val1 = np.random.uniform(-1, 1, shape)
val2 = np.random.uniform(-1, 1, (16, 1, 1, 1))
val3 = np.random.uniform(-1 ,1, (1))
exe.arg_arrays[0][:] = val1
exe.arg_arrays[1][:] = val2
exe.arg_arrays[2][:] = val3
outputs = exe.forward(is_train=False)[0].asnumpy()
conv_exe = conv.simple_bind(mx.cpu(), data=shape, grad_req='null')
conv_exe.arg_arrays[0][:] = val1
conv_exe.arg_arrays[1][:] = val2
conv_exe.arg_arrays[2][:] = val3
data_npy = conv_exe.forward(is_train=False)[0].asnumpy()
assert_almost_equal(outputs, data_npy.reshape(dst_shape))
# Test mkldnn reshape (Using shape)
test_cases = [(256), (16, 16), (4, 4, 16), (4, 4, 4, 4)]
for test_case in test_cases:
test_reshape_after_conv(test_case)
@with_seed()
def test_reshape_before_conv():
class Net(gluon.HybridBlock):
"""
test Net
"""
def __init__(self, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.conv0 = nn.Conv2D(10, (3, 3))
self.conv1 = nn.Conv2D(5, (3, 3))
def hybrid_forward(self, F, x, *args, **kwargs):
x_reshape = x.reshape((0, 0, 20, 5))
y = self.conv0(x_reshape)
y_reshape = y.reshape((0, 0, 9, 6))
out = self.conv1(y_reshape)
return out
x = mx.nd.random.uniform(shape=(2, 4, 10, 10))
x.attach_grad()
net = Net()
net.collect_params().initialize()
with mx.autograd.record():
out1 = net(x)
out1.backward()
dx1 = x.grad
net.hybridize()
with mx.autograd.record():
out2 = net(x)
out2.backward()
assert_almost_equal(dx1, x.grad, rtol=1e-5, atol=1e-6)
assert_almost_equal(out1, out2, rtol=1e-5, atol=1e-6)
@with_seed()
def test_slice_before_conv():
class Net(gluon.HybridBlock):
"""
test Net
"""
def __init__(self, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.conv0 = nn.Conv2D(4, (3, 3))
self.conv1 = nn.Conv2D(4, (3, 3))
def hybrid_forward(self, F, x, *args, **kwargs):
x_slice = x.slice(begin=(0, 0, 0, 0), end=(2, 4, 10, 10))
y = self.conv0(x_slice)
y_slice = y.slice(begin=(1, 0, 2, 2), end=(2, 1, 7, 7))
out = self.conv1(y_slice)
return out
x = mx.nd.random.uniform(shape=(2, 10, 10, 10))
x.attach_grad()
net = Net()
net.collect_params().initialize()
with mx.autograd.record():
out1 = net(x)
out1.backward()
dx1 = x.grad
net.hybridize()
with mx.autograd.record():
out2 = net(x)
out2.backward()
assert_almost_equal(dx1, x.grad, rtol=1e-5, atol=1e-6)
assert_almost_equal(out1, out2, rtol=1e-5, atol=1e-6)
@with_seed()
def test_slice_reshape_before_conv():
class Net(gluon.HybridBlock):
"""
test Net
"""
def __init__(self, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.conv0 = nn.Conv2D(4, (3, 3))
self.conv1 = nn.Conv2D(4, (3, 3))
def hybrid_forward(self, F, x, *args, **kwargs):
x_slice = x.slice(begin=(0, 0, 0, 0), end=(2, 4, 8, 9))
y = self.conv0(x_slice)
y_reshape = y.reshape((0, 0, 14, 3))
out = self.conv1(y_reshape)
return out
x = mx.nd.random.uniform(shape=(2, 10, 10, 10))
x.attach_grad()
net = Net()
net.collect_params().initialize()
with mx.autograd.record():
out1 = net(x)
out1.backward()
dx1 = x.grad
net.hybridize()
with mx.autograd.record():
out2 = net(x)
out2.backward()
assert_almost_equal(dx1, x.grad, rtol=1e-5, atol=1e-6)
assert_almost_equal(out1, out2, rtol=1e-5, atol=1e-6)
@with_seed()
def test_flatten_slice_after_conv():
data = mx.symbol.Variable('data')
weight = mx.symbol.Variable('weight')
bias = mx.symbol.Variable('bias')
conv1= mx.symbol.Convolution(data = data, weight=weight, bias=bias, name='conv1', num_filter=64, kernel=(3,3), stride=(1,1))
flatten1 = mx.symbol.flatten(data = conv1)
slice1 = mx.symbol.slice(data = flatten1, begin=0, end=1)
shape = (2, 16, 16, 16)
val = np.random.rand(2, 16, 16, 16).astype(np.float32)
exe = slice1.simple_bind(Context.default_ctx, data=shape)
exe.arg_arrays[0][:] = val
exe.arg_arrays[1][:] = np.random.normal(size=exe.arg_arrays[1].shape)
exe.arg_arrays[2][:] = np.random.normal(size=exe.arg_arrays[2].shape)
p = exe.forward(is_train=False)
p[0].wait_to_read()
print(p[0])
def test_mkldnn_sum_inplace_with_cpu_layout():
x_shape = (32, 3, 224, 224)
x_npy = np.ones(x_shape)
y_shape = (32, 32, 222, 222)
y_npy = np.ones(y_shape)
x = mx.sym.Variable("x")
y = mx.sym.Variable("y")
z = mx.symbol.Convolution(data=x, num_filter=32, kernel=(3, 3))
z = mx.sym.add_n(z, y)
exe = z.simple_bind(ctx=mx.cpu(), x=x_shape, y=y_shape)
out = exe.forward(is_train=False, x=x_npy, y=y_npy)[0]
assert_almost_equal(out[0].asnumpy()[0, 0, 0], 1.0)
@with_seed()
def test_batchnorm():
def check_batchnorm_training(stype):
for shape in [(2, 3), (2, 3, 2, 2)]:
data_tmp = np.random.normal(-0.1, 0.1, size=shape)
s = shape[1],
gamma = np.ones(s)
beta = np.ones(s)
gamma[1] = 3
beta[0] = 3
rolling_mean = np.random.uniform(size=s)
rolling_std = np.random.uniform(size=s)
data = mx.symbol.Variable('data', stype=stype)
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
mean_std = [mx.nd.array(rolling_mean).tostype(stype), mx.nd.array(rolling_std).tostype(stype)]
test = mx.symbol.BatchNorm(data, fix_gamma=False)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
stypes = ['row_sparse', 'default']
for stype in stypes:
check_batchnorm_training(stype)
@with_seed()
def test_softmax():
def check_softmax_training(stype):
for shape in [(2, 3), (2, 3, 2, 2)]:
data_tmp = np.random.normal(-0.1, 0.1, size=shape)
data = mx.symbol.Variable('data', stype=stype)
in_location = [mx.nd.array(data_tmp).tostype(stype)]
test = mx.symbol.softmax(data, axis=-1)
check_numeric_gradient(test, in_location, numeric_eps=1e-2, rtol=0.16, atol=1e-4)
stypes = ['row_sparse', 'default']
for stype in stypes:
check_softmax_training(stype)
@with_seed()
def test_pooling():
def check_pooling_training(stype):
for shape in [(3, 3, 10), (3, 3, 20, 20), (3, 3, 10, 20, 20)]:
data_tmp = np.random.normal(-0.1, 0.1, size=shape)
data = mx.symbol.Variable('data', stype=stype)
in_location = [mx.nd.array(data_tmp).tostype(stype)]
if np.array(shape).shape[0] == 3:
test = mx.symbol.Pooling(data=data, kernel=(3), stride=(2), pool_type='avg')
elif np.array(shape).shape[0] == 4:
test = mx.symbol.Pooling(data=data, kernel=(3, 3), stride=(2, 2), pool_type='avg')
elif np.array(shape).shape[0] == 5:
test = mx.symbol.Pooling(data=data, kernel=(3, 3, 3), stride=(2, 2, 2), pool_type='avg')
else:
return 0
check_numeric_gradient(test, in_location, numeric_eps=1e-2, rtol=0.16, atol=1e-4)
stypes = ['row_sparse', 'default']
for stype in stypes:
check_pooling_training(stype)
@with_seed()
def test_activation():
def check_activation_training(stype):
for shape in [(2, 3, 3), (2, 3, 2, 2)]:
eps = 1e-5
data_tmp = np.random.normal(-0.1, 1, size=shape)
# Avoid finite difference method inaccuracies due to discontinuous gradient at the origin.
# Here we replace small problematic inputs with 1.0. Repro issue with seed 851486559.
data_tmp[abs(data_tmp) < eps] = 1.0
data = mx.symbol.Variable('data', stype=stype)
in_location = [mx.nd.array(data_tmp).tostype(stype)]
test = mx.symbol.Activation(data, act_type="relu")
check_numeric_gradient(test, in_location, numeric_eps=eps, rtol=0.16, atol=1e-4)
stypes = ['row_sparse', 'default']
for stype in stypes:
check_activation_training(stype)
@with_seed()
def test_convolution():
def check_convolution_training(stype):
for shape in [(3, 3, 10), (3, 3, 10, 10), (3, 3, 10, 10, 10)]:
data_tmp = np.random.normal(-0.1, 1, size=shape)
data = mx.symbol.Variable('data', stype=stype)
if np.array(shape).shape[0] == 3:
test = mx.symbol.Convolution(data=data, kernel=(3,), stride=(2), num_filter=4)
weight_tmp = np.random.normal(-0.1, 0.1, size=(4, 3, 3))
elif np.array(shape).shape[0] == 4:
test = mx.symbol.Convolution(data=data, kernel=(3, 3), stride=(2, 2), num_filter=4)
weight_tmp = np.random.normal(-0.1, 0.1, size=(4, 3, 3, 3))
elif np.array(shape).shape[0] == 5:
test = mx.symbol.Convolution(data=data, kernel=(3, 3, 3), stride=(2, 2, 2), num_filter=4)
weight_tmp = np.random.normal(-0.1, 0.1, size=(4, 3, 3, 3, 3))
else:
return 0
bias_tmp = np.random.normal(0.1, 0.1, size=(4,))
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(weight_tmp).tostype(stype),
mx.nd.array(bias_tmp).tostype(stype)]
check_numeric_gradient(test, in_location, numeric_eps=1e-2, rtol=0.16, atol=1e-4)
stypes = ['row_sparse', 'default']
for stype in stypes:
check_convolution_training(stype)
@with_seed()
@unittest.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/12579")
def test_Deconvolution():
def check_Deconvolution_training(stype):
for shape in [(3, 3, 10), (3, 3, 10, 10)]:
data_tmp = np.random.randint(256, size=shape)
data = mx.symbol.Variable('data', stype=stype)
if np.array(shape).shape[0] == 3:
test = mx.symbol.Deconvolution(data=data, kernel=(3,), stride=(2), num_filter=4)
weight_tmp = np.random.normal(-0.1, 0.1, size=(3, 4, 3))
elif np.array(shape).shape[0] == 4:
test = mx.symbol.Deconvolution(data=data, kernel=(3, 3), stride=(2, 2), num_filter=4)
weight_tmp = np.random.normal(-0.1, 0.1, size=(3, 4, 3, 3))
else:
return 0
bias_tmp = np.random.normal(0.1, 0.1, size=(4,))
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(weight_tmp).tostype(stype),
mx.nd.array(bias_tmp).tostype(stype)]
check_numeric_gradient(test, in_location, numeric_eps=1e-2, rtol=0.16, atol=1e-4)
stypes = ['row_sparse', 'default']
for stype in stypes:
check_Deconvolution_training(stype)
@with_seed()
def test_LRN():
def check_LRN_training(stype):
for shape in [(3, 4, 5, 5)]:
data_tmp = np.random.normal(-0.1, 0.1, size=shape)
data = mx.symbol.Variable('data', stype=stype)
in_location = [mx.nd.array(data_tmp).tostype(stype)]
test = mx.symbol.LRN(data, nsize=3)
check_numeric_gradient(test, in_location, numeric_eps=1e-2, rtol=0.16, atol=1e-4)
stypes = ['row_sparse', 'default']
for stype in stypes:
check_LRN_training(stype)
@with_seed()
def test_fullyconnected():
def check_fullyconnected_training(stype):
data_shape = rand_shape_nd(2)
weight_shape = rand_shape_nd(2)
weight_shape = (weight_shape[0], data_shape[1])
for density in [1.0, 0.5, 0.0]:
x = rand_ndarray(shape=data_shape, stype=stype, density=density)
w = rand_ndarray(shape=weight_shape, stype=stype, density=density)
x_sym = mx.sym.Variable("data")
w_sym = mx.sym.Variable("weight")
sym = mx.sym.FullyConnected(data=x_sym, weight=w_sym, num_hidden=weight_shape[0], no_bias=True)
in_location = [x, w]
check_numeric_gradient(sym, in_location, numeric_eps=1e-3, rtol=1e-3, atol=5e-3)
stypes = ['row_sparse', 'default']
for stype in stypes:
check_fullyconnected_training(stype)
def test_softmax_with_large_inputs():
def softmax_forward(input_data, true_output):
data = mx.sym.Variable('data')
out1 = data.softmax(axis=1)
exec1 = out1.bind(mx.cpu(), args={'data': input_data})
exec1.forward()[0].wait_to_read()
ndarr = exec1.outputs[0][0][0][0]
nparr = ndarr.asnumpy()
assert_almost_equal(nparr, true_output, rtol=1e-5, atol=1e-5)
softmax_forward(mx.nd.array([[[[-1e30,-1e30]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[1e30,1e30]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[-3.4e38,-3.4e38]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[3.4e38,3.4e38]]]]), np.array([1.0,1.0]))
@with_seed()
def test_non_mkldnn_fcomputeex():
# test special case where MKLDNN formatted NDArray feeds into non-mkldnn fcomputeex operator
# conv is example where MKLDNN NDArray is created from regular NDArrays
# CustomOps is example of non-mkldnn fcomputeex operator
@mx.operator.register("custom")
class CustomProp(mx.operator.CustomOpProp):
def __int__(self):
super(CustomProp, self).__init__(need_top_grad=False)
def list_arguments(self):
return ['data']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
data_shape = in_shape[0]
output_shape = in_shape[0]
return [data_shape], [output_shape], []
def infer_type(self, in_type):
dtype = in_type[0]
return [dtype], [dtype], []
def create_operator(self, ctx, shapes, dtypes):
return Custom()
class Custom(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
print(in_data[0])
self.assign(out_data[0], req[0], in_data[0])
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], out_grad)
data = mx.symbol.Variable('data')
conv = mx.sym.Convolution(data=data, kernel=(5, 5), pad=(1, 1), stride=(1,1), num_filter=8, name="conv", no_bias=True)
custom = mx.symbol.Custom(name='custom', data=conv, op_type='custom')
exec1 = custom.bind(mx.cpu(), args={'data': mx.nd.ones([10,3,96,96]), 'conv_weight': mx.nd.ones([8,3,5,5])})
exec1.forward()[0].wait_to_read()
@with_seed()
def test_conv_transpose():
axes = [(0,2,1,3), (0,2,3,1), (1,2,3,0), (3,2,1,0)]
a = np.random.rand(10, 16, 50, 50)
b = np.random.rand(32, 16, 3, 3)
x = mx.nd.array(a)
w = mx.nd.array(b)
y = mx.nd.Convolution(data=x, weight=w, kernel=(3, 3), num_group=1, num_filter=32, no_bias=True)
for axis in axes:
t = mx.nd.transpose(y, axis)
t.wait_to_read()
s = y.asnumpy()
n = np.transpose(s, axis)
np.allclose(t.asnumpy(), n)
# This test case is contributed by @awsbillz in https://github.com/apache/incubator-mxnet/issues/14766
@with_seed()
def test_reshape_transpose_6d():
class Reshape2D(gluon.HybridBlock):
def __init__(self, factor):
super(Reshape2D, self).__init__()
self._factors = (int(factor),) * 2
def hybrid_forward(self, F, x):
f1, f2 = self._factors
# (N, f1*f2*C, H, W)
x = F.reshape(x, (0, -4, -1, f1 * f2, 0, 0)) # (N, C, f1*f2, H, W)
x = F.reshape(x, (0, 0, -4, f1, f2, 0, 0)) # (N, C, f1, f2, H, W)
x = F.transpose(x, (0, 1, 4, 2, 5, 3)) # (N, C, H, f1, W, f2)
x = F.reshape(x, (0, 0, -3, -3)) # (N, C, H*f1, W*f2)
return x
class Net(gluon.HybridBlock):
def __init__(self, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = nn.Conv2D(8, kernel_size=5)
self.reshape2D = Reshape2D(2)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.reshape2D(x)
return x
net = Net()
net.initialize(mx.init.Xavier(), ctx=mx.cpu())
net.hybridize()
data = mx.nd.random_normal(shape=(1, 3, 600, 600))
output = net(data)
a = output.asnumpy()
@with_seed()
def test_weight_async_reorder():
data = mx.sym.Variable("data")
w1 = mx.sym.Variable("1_weight")
w2 = mx.sym.Variable("2_weight")
conv1 = mx.sym.Convolution(data=data, weight=w1 + w1, num_filter=32, no_bias=True, kernel=(3, 3))
conv2 = mx.sym.Convolution(data=conv1, weight=w2 + w2, num_filter=32, no_bias=True, kernel=(1, 1))
mod = Module(symbol=conv2, label_names=None, context=mx.current_context())
mod.bind(for_training=False, data_shapes=[('data', (10, 16, 50, 50))])
mod.init_params(initializer=mx.init.Xavier(magnitude=2.))
data = [mx.random.uniform(-1.0, 1.0, shape=(10, 16, 50, 50), ctx=mx.current_context())]
batch=mx.io.DataBatch(data, [])
for i in range(2):
mod.forward(batch, is_train=False)
for output in mod.get_outputs():
output.wait_to_read()
@with_seed()
def test_concat():
def ref_concat(a, b, axis):
return np.concatenate((a, b), axis=axis)
a_sym = mx.sym.Variable("a")
b_sym = mx.sym.Variable("b")
dshape = rand_shape_nd(4)
a_shape = tuple(dshape)
b_shape = tuple(dshape)
for axis in range(0, 4):
z = mx.sym.concat(a_sym, b_sym, dim=axis)
a = np.random.uniform(-1, 1, a_shape)
b = np.random.uniform(-1, 1, b_shape)
exe = z.simple_bind(ctx=mx.cpu(), a=a_shape, b=b_shape)
out = exe.forward(is_train=False, a=a, b=b)
ref_out = ref_concat(a, b, axis=axis)
out = out[0].asnumpy()
assert_almost_equal(out, ref_out)
def check_concat_training(stype):
data_shape = rand_shape_nd(4)
for density in [1.0, 0.5, 0.0]:
a_sym = mx.sym.Variable('a')
b_sym = mx.sym.Variable('b')
sym = mx.sym.concat(a_sym, b_sym, dim=1)
a = rand_ndarray(shape=data_shape, stype=stype, density=density)
b = rand_ndarray(shape=data_shape, stype=stype, density=density)
in_location = [a, b]
check_numeric_gradient(sym, in_location, numeric_eps=1e-3, rtol=1e-3, atol=5e-3)
stypes = ['row_sparse', 'default']
for stype in stypes:
check_concat_training(stype)
@with_seed()
def test_elemwise_add():
def ref_add(a, b):
return np.add(a, b)
a_sym = mx.sym.Variable("a")
b_sym = mx.sym.Variable("b")
dshape = rand_shape_nd(4)
a_shape = tuple(dshape)
b_shape = tuple(dshape)
z = mx.sym.elemwise_add(a_sym, b_sym)
a = np.random.uniform(-1, 1, a_shape)
b = np.random.uniform(-1, 1, b_shape)
exe = z.simple_bind(ctx=mx.cpu(), a=a_shape, b=b_shape)
out = exe.forward(is_train=False, a=a, b=b)
ref_out = ref_add(a, b)
out = out[0].asnumpy()
assert_almost_equal(out, ref_out, rtol=1e-6, atol=1e-6)
def check_elemwise_add_training(stype):
data_shape = rand_shape_nd(4)
for density in [1.0, 0.5, 0.0]:
a_sym = mx.sym.Variable('a')
b_sym = mx.sym.Variable('b')
sym = mx.sym.elemwise_add(a_sym, b_sym)
a = rand_ndarray(shape=data_shape, stype=stype, density=density)
b = rand_ndarray(shape=data_shape, stype=stype, density=density)
in_location = [a, b]
check_numeric_gradient(sym, in_location, numeric_eps=1e-3, rtol=1e-3, atol=5e-3)
stypes = ['row_sparse', 'default']
for stype in stypes:
check_elemwise_add_training(stype)
if __name__ == '__main__':
import nose
nose.runmodule()
| 37.708333 | 128 | 0.600614 |
a5d38f13190eff72e6f7b2f51158ea34ab1ab6a2 | 156 | py | Python | django_api_client/client/__init__.py | rhenter/django-api-client | c113a94b292ffbc14e2da589dd9fde61f068dd7f | [
"MIT"
] | 15 | 2020-07-25T12:07:14.000Z | 2021-04-27T14:32:05.000Z | django_api_client/client/__init__.py | rhenter/django-api-client | c113a94b292ffbc14e2da589dd9fde61f068dd7f | [
"MIT"
] | null | null | null | django_api_client/client/__init__.py | rhenter/django-api-client | c113a94b292ffbc14e2da589dd9fde61f068dd7f | [
"MIT"
] | 2 | 2021-04-27T14:32:11.000Z | 2021-06-11T19:03:18.000Z | from .base import BaseAPI, BaseEndpoint # noqa
from .client import api_client_factory # noqa
__all__ = ['BaseAPI', 'BaseEndpoint', 'api_client_factory']
| 26 | 59 | 0.762821 |
a2e51304f19bb125193d5f9d2cf33df7f68a0fb6 | 35 | py | Python | zfit_physics/pdf.py | ryuwd/zfit-physics | 595eb68add00ab367aed5885c48eb78d15dbda0e | [
"BSD-3-Clause"
] | null | null | null | zfit_physics/pdf.py | ryuwd/zfit-physics | 595eb68add00ab367aed5885c48eb78d15dbda0e | [
"BSD-3-Clause"
] | null | null | null | zfit_physics/pdf.py | ryuwd/zfit-physics | 595eb68add00ab367aed5885c48eb78d15dbda0e | [
"BSD-3-Clause"
] | null | null | null | from .models.pdf_argus import Argus | 35 | 35 | 0.857143 |
b7b00fd10d483c5fdb2a0e06e6746d57f1a75209 | 2,132 | py | Python | function.py | Tech-with-anmol/auto_file | 0668e3ef594942055daf18b3df205ad228ee1d96 | [
"MIT"
] | 1 | 2021-09-06T08:04:22.000Z | 2021-09-06T08:04:22.000Z | function.py | Tech-with-anmol/auto_file | 0668e3ef594942055daf18b3df205ad228ee1d96 | [
"MIT"
] | null | null | null | function.py | Tech-with-anmol/auto_file | 0668e3ef594942055daf18b3df205ad228ee1d96 | [
"MIT"
] | null | null | null | import os
def __docs__(self):
"""THIS is a libray used for creating all required filrs for making liabrary also for creating normal files"""
pass
def mklib(dict_name):
"""make all folder and files required to make a library"""
os.mkdir(dict_name)
open("ReadMe.md", "x")
with open("ReadMe.md", "w") as f :
f.write("## add readme here")
open("git.gitignore", "x")
with open("git.gitignore", "w") as git:
git.write("# all folder you not want to upload will be here")
open("setup.py", "x")
with open("setup.py", "w") as f :
f.write("import setuptools \n\nsetuptools.setup( \n name="", \n version="",\n )")
open("LICENSE", "x")
with open("LICENSE", "w") as f :
f.write("MIT License \n\nCopyright (c) 2021 YOUR_NAME\n\nPermission is hereby granted, free of charge, to any person \nobtaining a copy of this software and associated documentation files (the Software), \nto deal in the Software without restriction, including without limitation\nthe rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, \nand to permit persons to whom the Software is furnished to do so, subject to the following conditions: \nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.")
os.chdir(dict_name)
try:
open("__init__.py", "x")
open("functions.py", "x")
except Exception as e:
print(e)
raise FileExistsError
| 52 | 1,094 | 0.663696 |
43d07a527ec51ca872812c7f4cf5fa2926986603 | 3,476 | py | Python | keras_retinanet/utils/coco_eval.py | kukuruza/keras-retinanet | 17aae2cf8906053feaba253fc599a5de3fffc1a1 | [
"Apache-2.0"
] | null | null | null | keras_retinanet/utils/coco_eval.py | kukuruza/keras-retinanet | 17aae2cf8906053feaba253fc599a5de3fffc1a1 | [
"Apache-2.0"
] | null | null | null | keras_retinanet/utils/coco_eval.py | kukuruza/keras-retinanet | 17aae2cf8906053feaba253fc599a5de3fffc1a1 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pycocotools.cocoeval import COCOeval
import os
import keras
import numpy as np
import json
import tempfile
import shutil
import progressbar
assert(callable(progressbar.progressbar)), "Using wrong progressbar module, install 'progressbar2' instead."
def evaluate_coco(generator, model, threshold=0.05):
""" Use the pycocotools to evaluate a COCO model on a dataset.
Args
generator : The generator for generating the evaluation data.
model : The model to evaluate.
threshold : The score threshold to use.
"""
# start collecting results
results = []
image_ids = []
for index in progressbar.progressbar(range(generator.size()), prefix='COCO evaluation: '):
image = generator.load_image(index)
image = generator.preprocess_image(image)
image, scale = generator.resize_image(image)
if keras.backend.image_data_format() == 'channels_first':
image = image.transpose((2, 0, 1))
# run network
boxes, scores, labels = model.predict_on_batch(np.expand_dims(image, axis=0))
# correct boxes for image scale
boxes /= scale
# change to (x, y, w, h) (MS COCO standard)
boxes[:, :, 2] -= boxes[:, :, 0]
boxes[:, :, 3] -= boxes[:, :, 1]
# compute predicted labels and scores
for box, score, label in zip(boxes[0], scores[0], labels[0]):
# scores are sorted, so we can break
if score < threshold:
break
# append detection for each positively labeled class
image_result = {
'image_id' : generator.image_ids[index],
'category_id' : generator.label_to_coco_label(label),
'score' : float(score),
'bbox' : box.tolist(),
}
# append detection to results
results.append(image_result)
# append image to list of processed images
image_ids.append(generator.image_ids[index])
if not len(results):
return
# write output
dirpath = tempfile.mkdtemp()
bbox_path = os.path.join(dirpath, '{}_bbox_results.json'.format(generator.set_name))
image_ids_path = os.path.join(dirpath, '{}_processed_image_ids.json'.format(generator.set_name))
json.dump(results, open(bbox_path, 'w'), indent=4)
json.dump(image_ids, open(image_ids_path, 'w'), indent=4)
print ('Dumping bboxes to "%s" and image_ids to "%s".' % (bbox_path, image_ids_path))
# load results in COCO evaluation tool
coco_true = generator.coco
coco_pred = coco_true.loadRes(bbox_path)
# run COCO evaluation
coco_eval = COCOeval(coco_true, coco_pred, 'bbox')
coco_eval.params.imgIds = image_ids
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
shutil.rmtree(dirpath)
return coco_eval.stats
| 33.747573 | 108 | 0.660242 |
1741a32d9b22a995ae51d80dae1327383d8c1373 | 11,478 | py | Python | tests/test_login_and_assignments.py | DataManagementLab/pretix-cas | f80eac4f698257f131404d2dbb11102dfce4d147 | [
"Apache-2.0"
] | null | null | null | tests/test_login_and_assignments.py | DataManagementLab/pretix-cas | f80eac4f698257f131404d2dbb11102dfce4d147 | [
"Apache-2.0"
] | 4 | 2021-04-05T10:48:58.000Z | 2022-02-15T13:11:45.000Z | tests/test_login_and_assignments.py | DataManagementLab/pretix-cas | f80eac4f698257f131404d2dbb11102dfce4d147 | [
"Apache-2.0"
] | null | null | null | import pytest
from django.test import override_settings
from pretix_cas import views, auth_backend
from pretix_cas.models import CasAttributeTeamAssignmentRule
from rest_framework.reverse import reverse
from pretix.base.models import User, Team, Organizer
fake_cas_data = ('ab12abcd',
{'mail': 'john.doe@tu-darmstadt.de', 'eduPersonAffiliation': ['student', 'member', 'employee'],
'ou': ['T20', 'FB20'], 'groupMembership': ['cn=T20', 'ou=central-it', 'o=tu-darmstadt'],
'givenName': 'John', 'surname': 'Doe'
}, None)
def login_mock(cas_data, client):
# Override verification of the ticket to just return the simply return 'cas_data'
views.__verify_cas = lambda request: cas_data
client.get(reverse('plugins:pretix_cas:cas.response'))
def get_user(cas_data):
return User.objects.get(email=cas_data[1].get('mail'))
def is_part_of_team(user, team):
return user.teams.filter(id=team.id).exists()
@pytest.fixture
def env():
organizer = Organizer.objects.create(name="FB 20", slug="FB20")
central_it_team = Team.objects.create(name="Central IT", organizer=organizer, can_view_orders=True)
admin_team = Team.objects.create(name="Admins", organizer=organizer, can_change_event_settings=True)
employee_team = Team.objects.create(name="Employees", organizer=organizer, can_view_vouchers=True)
return central_it_team, admin_team, employee_team
@pytest.mark.django_db
@override_settings(PRETIX_AUTH_BACKENDS=['pretix_cas.auth_backend.CasAuthBackend'])
def test_successful_user_creation(env, client):
login_mock(fake_cas_data, client)
created_user = get_user(fake_cas_data)
assert created_user.email == fake_cas_data[1]['mail']
assert created_user.get_full_name() == f"{fake_cas_data[1]['surname']}, {fake_cas_data[1]['givenName']}"
assert created_user.auth_backend == auth_backend.CasAuthBackend.identifier
@pytest.mark.django_db
@override_settings(PRETIX_AUTH_BACKENDS=['pretix_cas.auth_backend.CasAuthBackend'])
def test_failed_user_creation(env, client):
login_mock((None, None, None), client)
assert User.objects.count() == 0
@pytest.mark.django_db
@override_settings(PRETIX_AUTH_BACKENDS=['pretix_cas.auth_backend.CasAuthBackend'])
def test_login_with_weird_cas_attribute_list_response(env, client):
team1 = env[0]
CasAttributeTeamAssignmentRule.objects.create(attribute='FB20', team=team1)
team2 = env[2]
CasAttributeTeamAssignmentRule.objects.create(attribute='o=tu-darmstadt', team=team2)
login_mock(('ab12abcd',
{'mail': 'abc@def.gh', 'givenName': 'John', 'surname': 'Doe'},
None), client)
login_mock(('ab12abcd',
{'mail': 'abc@def.gh', 'givenName': 'John', 'surname': 'Doe', 'ou':[], 'groupMembership':[]},
None), client)
login_mock(('ab12abcd',
{'mail': 'abc@def.gh', 'givenName': 'John', 'surname': 'Doe', 'ou': None, 'groupMembership': None},
None), client)
login_mock(('ab12abcd',
{'mail': 'abc@def.gh', 'givenName': 'John', 'surname': 'Doe', 'ou': 'FB20', 'groupMembership': None},
None), client)
assert is_part_of_team(get_user(('',{'mail': 'abc@def.gh'}, None)), team1)
assert not is_part_of_team(get_user(('',{'mail': 'abc@def.gh'}, None)), team2)
login_mock(('ab12abcd',
{'mail': 'abc@def.gh', 'givenName': 'John', 'surname': 'Doe', 'ou': None, 'groupMembership': 'o=tu-darmstadt'},
None), client)
assert is_part_of_team(get_user(('',{'mail': 'abc@def.gh'}, None)), team1)
assert is_part_of_team(get_user(('',{'mail': 'abc@def.gh'}, None)), team2)
@pytest.mark.django_db
@override_settings(PRETIX_AUTH_BACKENDS=['pretix_cas.auth_backend.CasAuthBackend'])
def test_auto_assign_ou_rules(env, client):
expected_team = env[0]
CasAttributeTeamAssignmentRule.objects.create(attribute="T20", team=expected_team)
login_mock(fake_cas_data, client)
user = get_user(fake_cas_data)
assert is_part_of_team(user, expected_team)
assert user.teams.count() == 1
@pytest.mark.django_db
@override_settings(PRETIX_AUTH_BACKENDS=['pretix_cas.auth_backend.CasAuthBackend'])
def test_auto_assign_group_membership_rules(env, client):
central_it_team = env[0]
admin_team = env[1]
employee_team = env[2]
CasAttributeTeamAssignmentRule.objects.create(attribute="ou=central-it", team=central_it_team)
CasAttributeTeamAssignmentRule.objects.create(attribute="ou=admin", team=admin_team)
CasAttributeTeamAssignmentRule.objects.create(attribute="cn=T20", team=employee_team)
login_mock(fake_cas_data, client)
user = get_user(fake_cas_data)
assert is_part_of_team(user, central_it_team)
assert is_part_of_team(user, employee_team)
assert not is_part_of_team(user, admin_team)
assert user.teams.count() == 2
@pytest.mark.django_db
@override_settings(PRETIX_AUTH_BACKENDS=['pretix_cas.auth_backend.CasAuthBackend'])
def test_auto_assign_both(env, client):
central_it_team = env[0]
admin_team = env[1]
employee_team = env[2]
CasAttributeTeamAssignmentRule.objects.create(attribute="ou=central-it", team=central_it_team) # Match
CasAttributeTeamAssignmentRule.objects.create(attribute="ou=admin", team=admin_team) # No match
CasAttributeTeamAssignmentRule.objects.create(attribute="T20", team=employee_team) # Match
CasAttributeTeamAssignmentRule.objects.create(attribute="FB00", team=admin_team) # No match
login_mock(fake_cas_data, client)
user = get_user(fake_cas_data)
assert is_part_of_team(user, central_it_team)
assert not is_part_of_team(user, admin_team)
assert is_part_of_team(user, employee_team)
assert user.teams.count() == 2
@pytest.mark.django_db
@override_settings(PRETIX_AUTH_BACKENDS=['pretix_cas.auth_backend.CasAuthBackend'])
def test_auto_assign_group_membership_rules_second_login(env, client):
central_it_team = env[0]
employee_team = env[2]
CasAttributeTeamAssignmentRule.objects.create(attribute="ou=central-it", team=central_it_team)
login_mock(fake_cas_data, client)
user = get_user(fake_cas_data)
assert user.teams.count() == 1
assert is_part_of_team(user, central_it_team)
CasAttributeTeamAssignmentRule.objects.create(attribute="o=tu-darmstadt", team=employee_team)
login_mock(fake_cas_data, client)
user = get_user(fake_cas_data)
assert is_part_of_team(user, central_it_team)
assert is_part_of_team(user, employee_team)
assert user.teams.count() == 2
@pytest.mark.django_db
@override_settings(PRETIX_AUTH_BACKENDS=['pretix_cas.auth_backend.CasAuthBackend'])
def test_add_rules_in_settings(env, client):
organizer = Organizer.objects.first()
central_it = env[0]
User.objects.create_superuser('admin@localhost', 'admin')
admin = User.objects.get(email='admin@localhost')
admin_team = Team.objects.create(organizer=organizer, can_change_organizer_settings=True)
admin_team.members.add(admin)
client.login(email='admin@localhost', password='admin')
response = client.get(f'/control/organizer/{organizer.slug}/teams/assignment_rules')
assert response.status_code == 200
response = client.get(f'/control/organizer/{organizer.slug}/teams/assignment_rules/add')
assert response.status_code == 200
response = client.post(f'/control/organizer/{organizer.slug}/teams/assignment_rules/add', {
'team': central_it.id,
'attribute': 'ou=central-it'
})
assert response.status_code == 302
assert CasAttributeTeamAssignmentRule.objects.count() == 1
assert CasAttributeTeamAssignmentRule.objects.first().attribute == 'ou=central-it'
assert CasAttributeTeamAssignmentRule.objects.first().team == central_it
response = client.get(f'/control/organizer/{organizer.slug}/teams/assignment_rules/{42}/edit')
assert response.status_code == 404
response = client.post(f'/control/organizer/{organizer.slug}/teams/assignment_rules/{42}/delete')
assert response.status_code == 404
client.logout()
@pytest.mark.django_db
@override_settings(PRETIX_AUTH_BACKENDS=['pretix_cas.auth_backend.CasAuthBackend'])
def test_add_ou_rule_in_settings(env, client):
organizer = Organizer.objects.first()
central_it = env[0]
User.objects.create_superuser('admin@localhost', 'admin')
admin = User.objects.get(email='admin@localhost')
admin_team = Team.objects.create(organizer=organizer, can_change_organizer_settings=True)
admin_team.members.add(admin)
client.login(email='admin@localhost', password='admin')
client.post(f'/control/organizer/{organizer.slug}/teams/assignment_rules/add', {
'team': central_it.id,
'attribute': 'FB20'
})
assert CasAttributeTeamAssignmentRule.objects.count() == 1
assert CasAttributeTeamAssignmentRule.objects.first().attribute == 'FB20'
assert CasAttributeTeamAssignmentRule.objects.first().team == central_it
user = get_user(fake_cas_data)
assert user.teams.count == 0
login_mock(fake_cas_data, client)
assert user.teams.count == 1
assert is_part_of_team(user, central_it)
@pytest.mark.django_db
@override_settings(PRETIX_AUTH_BACKENDS=['pretix_cas.auth_backend.CasAuthBackend'])
def test_add_ou_rule_in_settings(env, client):
organizer = Organizer.objects.first()
central_it = env[0]
some_team = env[1]
employee_team = env[2]
User.objects.create_superuser('admin@localhost', 'admin')
admin = User.objects.get(email='admin@localhost')
admin_team = Team.objects.create(organizer=organizer, can_change_organizer_settings=True)
admin_team.members.add(admin)
client.login(email='admin@localhost', password='admin')
client.post(f'/control/organizer/{organizer.slug}/teams/assignment_rules/add', {
'team': employee_team.id,
'attribute': 'o=tu-darmstadt'
})
client.post(f'/control/organizer/{organizer.slug}/teams/assignment_rules/add', {
'team': central_it.id,
'attribute': 'FB20'
})
client.post(f'/control/organizer/{organizer.slug}/teams/assignment_rules/add', {
'team': some_team.id,
'attribute': 'tu-darmstadt' # This does not match
})
login_mock(fake_cas_data, client)
user = get_user(fake_cas_data)
assert user.teams.count() == 2
assert is_part_of_team(user, central_it)
assert is_part_of_team(user, employee_team)
@pytest.mark.django_db
def test_add_rules_in_settings_insufficient_permissions(env, client):
organizer = Organizer.objects.first()
User.objects.create_user('test@example.org', 'password')
client.login(email='test@example.org', password='password')
response = client.get(f'/control/organizer/{organizer.slug}/teams/assignment_rules')
assert response.status_code == 404
response = client.post(f'/control/organizer/{organizer.slug}/teams/assignment_rules/add')
assert response.status_code == 404
response = client.post(f'/control/organizer/{organizer.slug}/teams/assignment_rules/1/edit')
assert response.status_code == 404
response = client.get(f'/control/organizer/{organizer.slug}/teams/assignment_rules/1/edit')
assert response.status_code == 404
response = client.post(f'/control/organizer/{organizer.slug}/teams/assignment_rules/1/delete')
assert response.status_code == 404
client.logout()
| 39.174061 | 127 | 0.725649 |
e24fb8d2375728bec17ae96041a99045f272d625 | 6,106 | py | Python | zoom-poof/poof_control.py | mbustosorg/zoom-poof | 18b85e035d19515e36257ba570ad1c2a8752dceb | [
"MIT"
] | null | null | null | zoom-poof/poof_control.py | mbustosorg/zoom-poof | 18b85e035d19515e36257ba570ad1c2a8752dceb | [
"MIT"
] | null | null | null | zoom-poof/poof_control.py | mbustosorg/zoom-poof | 18b85e035d19515e36257ba570ad1c2a8752dceb | [
"MIT"
] | null | null | null | """
Copyright (C) 2020 Mauricio Bustos (m@bustos.org)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import argparse
import logging
import os
import requests
import time
from pythonosc import osc_message_builder
from pythonosc import udp_client
from pythonosc.osc_server import AsyncIOOSCUDPServer
from pythonosc.dispatcher import Dispatcher
import asyncio
from gpiozero import LED, Button
FORMAT = '%(asctime)-15s %(message)s'
logging.basicConfig(format=FORMAT)
logger = logging.getLogger('POOF')
logger.setLevel(logging.INFO)
ZOOM_URL = os.getenv('ZOOM_URL')
logger.info(f'ZOOM_URL={ZOOM_URL}')
queue = []
relays = [LED(17, active_high=False), LED(27, active_high=False), LED(23, active_high=False), LED(24, active_high=False)]
remotes = [Button(16), Button(20), Button(21), Button(26)]
led_play = None
def broadcast_color(red: int, green: int, blue: int):
""" Send out color """
msg = osc_message_builder.OscMessageBuilder(address='/color')
msg.add_arg(red)
msg.add_arg(green)
msg.add_arg(blue)
led_play.send(msg.build())
def handle_poof(unused_addr, name, count, length, style, timing):
""" Handle the poof command """
try:
logger.info(f'Received Command - ({name}, {count}, {length}, {style}, {timing})')
queue.append((name, count, length, style, timing))
with open('seq.txt', 'r') as file:
seq = int(file.read())
seq += 1
#response = requests.post(ZOOM_URL + str(seq), data=f'{name} poofed {count} time for {length}s each', headers={'content-type': 'text/plain'})
#with open('seq.txt', 'w') as file:
# file.write(str(seq))
#logger.info(f'Sequence: {seq}')
#logger.info(response)
except ValueError as e:
logger.error(e)
async def run_command(command):
""" Run 'command' """
logger.info(f'Run Command {command}')
timing = float(command[2])
count = int(command[1])
style = command[3]
timing_style = command[4]
cylon_index = 0
cylon_direction = 1
for i in range(count):
if 'Full' in style:
[x.on() for x in relays]
broadcast_color(0, 50, 0)
elif 'Alternating' in style:
relays[0].on()
relays[1].on()
relays[2].off()
relays[3].off()
broadcast_color(0, 50, 0)
elif 'Cylon' in style:
relays[cylon_index].on()
if cylon_index == 0:
broadcast_color(0, 50, 0)
elif cylon_index == 1:
broadcast_color(0, 0, 50)
elif cylon_index == 2:
broadcast_color(50, 0, 50)
elif cylon_index == 3:
broadcast_color(50, 50, 50)
await asyncio.sleep(timing)
if 'Full' in style:
[x.off() for x in relays]
broadcast_color(0, 0, 50)
await asyncio.sleep(timing)
elif 'Alternating' in style:
relays[0].off()
relays[1].off()
relays[2].on()
relays[3].on()
broadcast_color(0, 0, 50)
await asyncio.sleep(timing)
elif 'Cylon' in style:
[x.off() for x in relays]
if 'Accelerating' in timing_style:
timing = max(0.05, timing - float(i + 1) / float(count) * timing)
if 'Cylon' in style:
cylon_index = cylon_index + cylon_direction
if cylon_index == 4:
cylon_direction = -1
cylon_index = 2
elif cylon_index == -1:
cylon_direction = 1
cylon_index = 1
[x.off() for x in relays]
broadcast_color(50, 0, 0)
logger.info(f'Complete')
async def main_loop():
""" Main execution loop """
[x.off() for x in relays]
while True:
if len(queue) > 0:
logger.info(f'{len(queue)} commands in the queue')
await asyncio.create_task(run_command(queue.pop(0)))
await asyncio.sleep(0.1)
for i in range(0, 4):
if not remotes[i].is_pressed and relays[i].is_lit:
logger.info(f'Remote off {i}')
broadcast_color(50, 0, 0)
relays[i].off()
elif remotes[i].is_pressed and not relays[i].is_lit:
logger.info(f'Remote on {i}')
broadcast_color(30, 30, 30)
relays[i].on()
async def init_main(args, dispatcher):
""" Initialization routine """
loop = asyncio.get_event_loop()
server = AsyncIOOSCUDPServer((args.ip, args.port), dispatcher, loop)
transport, protocol = await server.create_serve_endpoint()
await main_loop()
transport.close()
if __name__ == "__main__":
# with open('seq.txt', 'w') as file:
# file.write('0')
[x.off() for x in relays]
parser = argparse.ArgumentParser()
parser.add_argument("--ip", default="192.168.0.100", help="The ip to listen on")
parser.add_argument("--port", type=int, default=9999, help="The port to listen on")
parser.add_argument("--color_ip", default="192.168.0.100", help="IP for color server")
parser.add_argument("--color_port", type=int, default=9997, help="Port for color server")
args = parser.parse_args()
led_play = udp_client.UDPClient(args.color_ip, args.color_port)
dispatcher = Dispatcher()
dispatcher.map("/poof", handle_poof)
logger.info(f'Serving on {args.ip}:{args.port}')
asyncio.run(init_main(args, dispatcher))
| 33.549451 | 149 | 0.608418 |
734227f85ae256cfe8a62a6af67d8b5911f75d31 | 6,140 | py | Python | autotest/test_gwf_utl05_budparse.py | verkaik/modflow6-parallel | fac2892612d044634b9527fc7a417530ed43ca8f | [
"CC0-1.0"
] | 2 | 2021-09-07T07:19:12.000Z | 2022-03-31T10:19:41.000Z | autotest/test_gwf_utl05_budparse.py | verkaik/modflow6-parallel | fac2892612d044634b9527fc7a417530ed43ca8f | [
"CC0-1.0"
] | null | null | null | autotest/test_gwf_utl05_budparse.py | verkaik/modflow6-parallel | fac2892612d044634b9527fc7a417530ed43ca8f | [
"CC0-1.0"
] | 1 | 2020-03-04T17:10:34.000Z | 2020-03-04T17:10:34.000Z | """
Test of budget table parsing
"""
import os
import numpy as np
try:
import pymake
except:
msg = 'Error. Pymake package is not available.\n'
msg += 'Try installing using the following command:\n'
msg += ' pip install https://github.com/modflowpy/pymake/zipball/master'
raise Exception(msg)
try:
import flopy
except:
msg = 'Error. FloPy package is not available.\n'
msg += 'Try installing using the following command:\n'
msg += ' pip install flopy'
raise Exception(msg)
from framework import testing_framework
from simulation import Simulation
ex = ['gwf_utl05']
laytyp = [1]
ss = [1.e-10]
sy = [0.1]
exdirs = []
for s in ex:
exdirs.append(os.path.join('temp', s))
ddir = 'data'
nlay, nrow, ncol = 1, 1, 1
def build_models():
nper = 2
perlen = [2., 2.]
nstp = [14, 14]
tsmult = [1., 1.]
delr = 10.
delc = 10.
top = 10.
botm = [0.]
strt = top
hk = 1.0
nouter, ninner = 100, 300
hclose, rclose, relax = 1e-6, 1e-6, 0.97
tdis_rc = []
for idx in range(nper):
tdis_rc.append((perlen[idx], nstp[idx], tsmult[idx]))
for idx, dir in enumerate(exdirs):
name = ex[idx]
# build MODFLOW 6 files
ws = dir
sim = flopy.mf6.MFSimulation(sim_name=name, version='mf6',
exe_name='mf6',
sim_ws=ws)
# create tdis package
tdis = flopy.mf6.ModflowTdis(sim, time_units='DAYS',
nper=nper, perioddata=tdis_rc)
# create gwf model
gwfname = 'gwf_' + name
newtonoptions = ['NEWTON', 'UNDER_RELAXATION']
gwf = flopy.mf6.ModflowGwf(sim, modelname=gwfname,
newtonoptions=newtonoptions,)
# create iterative model solution and register the gwf model with it
imsgwf = flopy.mf6.ModflowIms(sim, print_option='SUMMARY',
outer_dvclose=hclose,
outer_maximum=nouter,
under_relaxation='DBD',
under_relaxation_theta=0.7,
inner_maximum=ninner,
inner_dvclose=hclose, rcloserecord=rclose,
linear_acceleration='BICGSTAB',
scaling_method='NONE',
reordering_method='NONE',
relaxation_factor=relax,
filename='{}.ims'.format(gwfname))
sim.register_ims_package(imsgwf, [gwf.name])
dis = flopy.mf6.ModflowGwfdis(gwf, nlay=nlay, nrow=nrow, ncol=ncol,
delr=delr, delc=delc,
top=top, botm=botm,
idomain=np.ones((nlay, nrow, ncol),
dtype=int))
# initial conditions
ic = flopy.mf6.ModflowGwfic(gwf, strt=strt)
# node property flow
npf = flopy.mf6.ModflowGwfnpf(gwf, save_flows=False,
icelltype=laytyp[idx],
k=hk, k33=hk)
# storage
sto = flopy.mf6.ModflowGwfsto(gwf, save_flows=False,
iconvert=laytyp[idx],
ss=ss[idx], sy=sy[idx],
steady_state={0: False},
transient={0: True})
# wel files
# include very small well rates to ensure that budget table correctly
# prints small numbers with 3-digit exponents
welspdict = {0: [[(0, 0, 0), -1.e-200, 0.]],
1: [[(0, 0, 0), 1.e-200, 0.]]}
wel = flopy.mf6.ModflowGwfwel(gwf, print_input=True, print_flows=True,
stress_period_data=welspdict,
save_flows=False,
auxiliary='CONCENTRATION', pname='WEL-1')
# output control
oc = flopy.mf6.ModflowGwfoc(gwf,
budget_filerecord='{}.cbc'.format(gwfname),
head_filerecord='{}.hds'.format(gwfname),
headprintrecord=[
('COLUMNS', 10, 'WIDTH', 15,
'DIGITS', 6, 'GENERAL')],
saverecord=[('HEAD', 'ALL')],
printrecord=[('HEAD', 'ALL'),
('BUDGET', 'ALL')])
# write MODFLOW 6 files
sim.write_simulation()
return
def eval_flow(sim):
print('evaluating flow...')
name = ex[sim.idxsim]
gwfname = 'gwf_' + name
# This will fail if budget numbers cannot be read
fpth = os.path.join(sim.simpath, '{}.lst'.format(gwfname))
mflist = flopy.utils.Mf6ListBudget(fpth)
names = mflist.get_record_names()
print(names)
inc = mflist.get_incremental()
print(inc)
assert np.allclose(inc['WEL_IN'], 0.)
assert np.allclose(inc['WEL_OUT'], 0.)
return
# - No need to change any code below
def test_mf6model():
# initialize testing framework
test = testing_framework()
# build the models
build_models()
# run the test models
for idx, dir in enumerate(exdirs):
yield test.run_mf6, Simulation(dir, exfunc=eval_flow, idxsim=idx)
return
def main():
# initialize testing framework
test = testing_framework()
# build the models
build_models()
# run the test models
for idx, dir in enumerate(exdirs):
sim = Simulation(dir, exfunc=eval_flow, idxsim=idx)
test.run_mf6(sim)
return
if __name__ == "__main__":
# print message
print('standalone run of {}'.format(os.path.basename(__file__)))
# run main routine
main()
| 31.326531 | 80 | 0.49886 |
020f90feadb2539584de35406cb484ce5a91d681 | 8,154 | py | Python | tf_agents/bandits/networks/global_and_arm_feature_network_test.py | Francis777/agents | 24e878a697be418307cfbff69724d86be767719d | [
"Apache-2.0"
] | 3,175 | 2017-09-08T18:28:32.000Z | 2022-03-31T01:32:22.000Z | tf_agents/bandits/networks/global_and_arm_feature_network_test.py | Francis777/agents | 24e878a697be418307cfbff69724d86be767719d | [
"Apache-2.0"
] | 703 | 2017-09-18T05:51:57.000Z | 2022-03-31T17:37:50.000Z | tf_agents/bandits/networks/global_and_arm_feature_network_test.py | Francis777/agents | 24e878a697be418307cfbff69724d86be767719d | [
"Apache-2.0"
] | 844 | 2017-09-08T23:28:57.000Z | 2022-03-30T09:29:32.000Z | # coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tf_agents.bandits.networks.global_and_arm_feature_network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.bandits.networks import global_and_arm_feature_network as gafn
from tf_agents.bandits.specs import utils as bandit_spec_utils
from tf_agents.specs import tensor_spec
from tf_agents.utils import test_utils
parameters = parameterized.named_parameters(
{
'testcase_name': 'batch2feat4act3',
'batch_size': 2,
'feature_dim': 4,
'num_actions': 3
}, {
'testcase_name': 'batch1feat7act9',
'batch_size': 1,
'feature_dim': 7,
'num_actions': 9
})
class GlobalAndArmFeatureNetworkTest(parameterized.TestCase,
test_utils.TestCase):
@parameters
def testCreateFeedForwardCommonTowerNetwork(self, batch_size, feature_dim,
num_actions):
obs_spec = bandit_spec_utils.create_per_arm_observation_spec(
7, feature_dim, num_actions)
net = gafn.create_feed_forward_common_tower_network(
obs_spec,
global_layers=(4, 3, 2),
arm_layers=(6, 5, 4),
common_layers=(7, 6, 5))
input_nest = tensor_spec.sample_spec_nest(
obs_spec, outer_dims=(batch_size,))
output, _ = net(input_nest)
self.evaluate(tf.compat.v1.global_variables_initializer())
output = self.evaluate(output)
self.assertAllEqual(output.shape, (batch_size, num_actions))
@parameters
def testCreateFeedForwardCommonTowerNetworkWithEmptyLayers(
self, batch_size, feature_dim, num_actions):
obs_spec = bandit_spec_utils.create_per_arm_observation_spec(
7, feature_dim, num_actions)
net = gafn.create_feed_forward_common_tower_network(
obs_spec, global_layers=(), arm_layers=(), common_layers=())
input_nest = tensor_spec.sample_spec_nest(
obs_spec, outer_dims=(batch_size,))
output, _ = net(input_nest)
self.evaluate(tf.compat.v1.global_variables_initializer())
output = self.evaluate(output)
self.assertAllEqual(output.shape, (batch_size, num_actions))
@parameters
def testCreateFeedForwardCommonTowerNetworkWithEmptyGlobalLayers(
self, batch_size, feature_dim, num_actions):
obs_spec = bandit_spec_utils.create_per_arm_observation_spec(
7, feature_dim, num_actions)
net = gafn.create_feed_forward_common_tower_network(
obs_spec,
global_layers=(),
arm_layers=(6, 5, 4),
common_layers=(7, 6, 5))
input_nest = tensor_spec.sample_spec_nest(
obs_spec, outer_dims=(batch_size,))
output, _ = net(input_nest)
self.evaluate(tf.compat.v1.global_variables_initializer())
output = self.evaluate(output)
self.assertAllEqual(output.shape, (batch_size, num_actions))
@parameters
def testCreateFeedForwardCommonTowerNetworkWithEmptyArmLayers(
self, batch_size, feature_dim, num_actions):
obs_spec = bandit_spec_utils.create_per_arm_observation_spec(
7, feature_dim, num_actions)
net = gafn.create_feed_forward_common_tower_network(
obs_spec,
global_layers=(4, 3, 2),
arm_layers=(),
common_layers=(7, 6, 5))
input_nest = tensor_spec.sample_spec_nest(
obs_spec, outer_dims=(batch_size,))
output, _ = net(input_nest)
self.evaluate(tf.compat.v1.global_variables_initializer())
output = self.evaluate(output)
self.assertAllEqual(output.shape, (batch_size, num_actions))
@parameters
def testCreateFeedForwardCommonTowerNetworkWithEmptyCommonLayers(
self, batch_size, feature_dim, num_actions):
obs_spec = bandit_spec_utils.create_per_arm_observation_spec(
7, feature_dim, num_actions)
net = gafn.create_feed_forward_common_tower_network(
obs_spec,
global_layers=(4, 3, 2),
arm_layers=(6, 5, 4),
common_layers=())
input_nest = tensor_spec.sample_spec_nest(
obs_spec, outer_dims=(batch_size,))
output, _ = net(input_nest)
self.evaluate(tf.compat.v1.global_variables_initializer())
output = self.evaluate(output)
self.assertAllEqual(output.shape, (batch_size, num_actions))
@parameters
def testCreateFeedForwardDotProductNetwork(self, batch_size, feature_dim,
num_actions):
obs_spec = bandit_spec_utils.create_per_arm_observation_spec(
7, feature_dim, num_actions)
net = gafn.create_feed_forward_dot_product_network(obs_spec, (4, 3, 4),
(6, 5, 4))
input_nest = tensor_spec.sample_spec_nest(
obs_spec, outer_dims=(batch_size,))
output, _ = net(input_nest)
self.evaluate(tf.compat.v1.global_variables_initializer())
output = self.evaluate(output)
self.assertAllEqual(output.shape, (batch_size, num_actions))
def testCreateFeedForwardCommonTowerNetworkWithFeatureColumns(
self, batch_size=2, feature_dim=4, num_actions=3):
obs_spec = {
'global': {
'dense':
tensor_spec.TensorSpec(shape=(feature_dim,), dtype=tf.float32),
'composer':
tensor_spec.TensorSpec((), tf.string)
},
'per_arm': {
'name': tensor_spec.TensorSpec((num_actions,), tf.string),
'fruit': tensor_spec.TensorSpec((num_actions,), tf.string)
}
}
columns_dense = tf.feature_column.numeric_column(
'dense', shape=(feature_dim,))
columns_composer = tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_vocabulary_list(
'composer', ['wolfgang', 'amadeus', 'mozart']))
columns_name = tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_vocabulary_list(
'name', ['bob', 'george', 'wanda']))
columns_fruit = tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_vocabulary_list(
'fruit', ['banana', 'kiwi', 'pear']))
net = gafn.create_feed_forward_common_tower_network(
observation_spec=obs_spec,
global_layers=(4, 3, 2),
arm_layers=(6, 5, 4),
common_layers=(7, 6, 5),
global_preprocessing_combiner=tf.compat.v2.keras.layers.DenseFeatures(
[columns_dense, columns_composer]),
arm_preprocessing_combiner=tf.compat.v2.keras.layers.DenseFeatures(
[columns_name, columns_fruit]))
input_nest = {
'global': {
'dense':
tf.constant(
np.random.rand(batch_size, feature_dim).astype(np.float32)),
'composer':
tf.constant(['wolfgang', 'mozart'])
},
'per_arm': {
'name':
tf.constant([[['george'], ['george'], ['george']],
[['bob'], ['bob'], ['bob']]]),
'fruit':
tf.constant([[['banana'], ['banana'], ['banana']],
[['kiwi'], ['kiwi'], ['kiwi']]])
}
}
output, _ = net(input_nest)
self.evaluate([
tf.compat.v1.global_variables_initializer(),
tf.compat.v1.tables_initializer()
])
output = self.evaluate(output)
self.assertAllEqual(output.shape, (batch_size, num_actions))
if __name__ == '__main__':
tf.test.main()
| 38.828571 | 80 | 0.670468 |
da897522841f592d4dcb952bfcba8b152b7cbeb0 | 8,424 | py | Python | keystone/common/cache/_memcache_pool.py | BMDan/keystone | 39de8b0a0a34c1645b607449fc1247d5cc11d89d | [
"Apache-2.0"
] | null | null | null | keystone/common/cache/_memcache_pool.py | BMDan/keystone | 39de8b0a0a34c1645b607449fc1247d5cc11d89d | [
"Apache-2.0"
] | null | null | null | keystone/common/cache/_memcache_pool.py | BMDan/keystone | 39de8b0a0a34c1645b607449fc1247d5cc11d89d | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 Mirantis Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Thread-safe connection pool for python-memcached."""
# NOTE(yorik-sar): this file is copied between keystone and keystonemiddleware
# and should be kept in sync until we can use external library for this.
import collections
import contextlib
import itertools
import logging
import threading
import time
import memcache
from six.moves import queue
from keystone import exception
from keystone.i18n import _
from keystone.openstack.common import log
LOG = log.getLogger(__name__)
# This 'class' is taken from http://stackoverflow.com/a/22520633/238308
# Don't inherit client from threading.local so that we can reuse clients in
# different threads
_MemcacheClient = type('_MemcacheClient', (object,),
dict(memcache.Client.__dict__))
_PoolItem = collections.namedtuple('_PoolItem', ['ttl', 'connection'])
class ConnectionPool(queue.Queue):
"""Base connection pool class
This class implements the basic connection pool logic as an abstract base
class.
"""
def __init__(self, maxsize, unused_timeout, conn_get_timeout=None):
"""Initialize the connection pool.
:param maxsize: maximum number of client connections for the pool
:type maxsize: int
:param unused_timeout: idle time to live for unused clients (in
seconds). If a client connection object has been
in the pool and idle for longer than the
unused_timeout, it will be reaped. This is to
ensure resources are released as utilization
goes down.
:type unused_timeout: int
:param conn_get_timeout: maximum time in seconds to wait for a
connection. If set to `None` timeout is
indefinite.
:type conn_get_timeout: int
"""
queue.Queue.__init__(self, maxsize)
self._unused_timeout = unused_timeout
self._connection_get_timeout = conn_get_timeout
self._acquired = 0
def _create_connection(self):
"""Returns a connection instance.
This is called when the pool needs another instance created.
:returns: a new connection instance
"""
raise NotImplementedError
def _destroy_connection(self, conn):
"""Destroy and cleanup a connection instance.
This is called when the pool wishes to get rid of an existing
connection. This is the opportunity for a subclass to free up
resources and cleaup after itself.
:param conn: the connection object to destroy
"""
raise NotImplementedError
def _debug_logger(self, msg, *args, **kwargs):
if LOG.isEnabledFor(logging.DEBUG):
thread_id = threading.current_thread().ident
args = (id(self), thread_id) + args
prefix = 'Memcached pool %s, thread %s: '
LOG.debug(prefix + msg, *args, **kwargs)
@contextlib.contextmanager
def acquire(self):
self._debug_logger('Acquiring connection')
try:
conn = self.get(timeout=self._connection_get_timeout)
except queue.Empty:
raise exception.UnexpectedError(
_('Unable to get a connection from pool id %(id)s after '
'%(seconds)s seconds.') %
{'id': id(self), 'seconds': self._connection_get_timeout})
self._debug_logger('Acquired connection %s', id(conn))
try:
yield conn
finally:
self._debug_logger('Releasing connection %s', id(conn))
self._drop_expired_connections()
try:
super(ConnectionPool, self).put(conn, block=False)
except queue.Full:
self._debug_logger('Reaping exceeding connection %s', id(conn))
self._destroy_connection(conn)
def _qsize(self):
if self.maxsize:
return self.maxsize - self._acquired
else:
# A value indicating there is always a free connection
# if maxsize is None or 0
return 1
# NOTE(dstanek): stdlib and eventlet Queue implementations
# have different names for the qsize method. This ensures
# that we override both of them.
if not hasattr(queue.Queue, '_qsize'):
qsize = _qsize
def _get(self):
if self.queue:
conn = self.queue.pop().connection
else:
conn = self._create_connection()
self._acquired += 1
return conn
def _drop_expired_connections(self):
"""Drop all expired connections from the right end of the queue."""
now = time.time()
while self.queue and self.queue[0].ttl < now:
conn = self.queue.popleft().connection
self._debug_logger('Reaping connection %s', id(conn))
self._destroy_connection(conn)
def _put(self, conn):
self.queue.append(_PoolItem(
ttl=time.time() + self._unused_timeout,
connection=conn,
))
self._acquired -= 1
class MemcacheClientPool(ConnectionPool):
def __init__(self, urls, arguments, **kwargs):
ConnectionPool.__init__(self, **kwargs)
self.urls = urls
self._arguments = arguments
# NOTE(morganfainberg): The host objects expect an int for the
# deaduntil value. Initialize this at 0 for each host with 0 indicating
# the host is not dead.
self._hosts_deaduntil = [0] * len(urls)
def _create_connection(self):
return _MemcacheClient(self.urls, **self._arguments)
def _destroy_connection(self, conn):
conn.disconnect_all()
def _get(self):
conn = ConnectionPool._get(self)
try:
# Propagate host state known to us to this client's list
now = time.time()
for deaduntil, host in zip(self._hosts_deaduntil, conn.servers):
if deaduntil > now and host.deaduntil <= now:
host.mark_dead('propagating death mark from the pool')
host.deaduntil = deaduntil
except Exception:
# We need to be sure that connection doesn't leak from the pool.
# This code runs before we enter context manager's try-finally
# block, so we need to explicitly release it here
ConnectionPool._put(self, conn)
raise
return conn
def _put(self, conn):
try:
# If this client found that one of the hosts is dead, mark it as
# such in our internal list
now = time.time()
for i, host in zip(itertools.count(), conn.servers):
deaduntil = self._hosts_deaduntil[i]
# Do nothing if we already know this host is dead
if deaduntil <= now:
if host.deaduntil > now:
self._hosts_deaduntil[i] = host.deaduntil
self._debug_logger(
'Marked host %s dead until %s',
self.urls[i], host.deaduntil)
else:
self._hosts_deaduntil[i] = 0
# If all hosts are dead we should forget that they're dead. This
# way we won't get completely shut off until dead_retry seconds
# pass, but will be checking servers as frequent as we can (over
# way smaller socket_timeout)
if all(deaduntil > now for deaduntil in self._hosts_deaduntil):
self._debug_logger('All hosts are dead. Marking them as live.')
self._hosts_deaduntil[:] = [0] * len(self._hosts_deaduntil)
finally:
ConnectionPool._put(self, conn)
| 37.945946 | 79 | 0.617877 |
a0e88b50dd3601f4ffabdf4a7befb08e73f62c99 | 46,323 | py | Python | python/test/annotators.py | mehta-sandip/spark-nlp | 13a27cd3d885ec49d2e692a6a1270883c9f97003 | [
"Apache-2.0"
] | 1 | 2020-08-20T00:07:20.000Z | 2020-08-20T00:07:20.000Z | python/test/annotators.py | mehta-sandip/spark-nlp | 13a27cd3d885ec49d2e692a6a1270883c9f97003 | [
"Apache-2.0"
] | null | null | null | python/test/annotators.py | mehta-sandip/spark-nlp | 13a27cd3d885ec49d2e692a6a1270883c9f97003 | [
"Apache-2.0"
] | null | null | null | import re
import unittest
import os
from sparknlp.annotator import *
from sparknlp.base import *
from test.util import SparkContextForTest
from test.util import SparkSessionForTest
from pyspark.ml.feature import SQLTransformer
from pyspark.ml.clustering import KMeans
class BasicAnnotatorsTestSpec(unittest.TestCase):
def setUp(self):
# This implicitly sets up py4j for us
self.data = SparkContextForTest.data
def runTest(self):
document_assembler = DocumentAssembler() \
.setInputCol("text") \
.setOutputCol("document")
tokenizer = Tokenizer() \
.setInputCols(["document"]) \
.setOutputCol("token") \
.setExceptions(["New York"]) \
.addInfixPattern("(%\\d+)")
stemmer = Stemmer() \
.setInputCols(["token"]) \
.setOutputCol("stem")
normalizer = Normalizer() \
.setInputCols(["stem"]) \
.setOutputCol("normalize")
token_assembler = TokenAssembler() \
.setInputCols(["document", "normalize"]) \
.setOutputCol("assembled")
finisher = Finisher() \
.setInputCols(["assembled"]) \
.setOutputCols(["reassembled_view"]) \
.setCleanAnnotations(True)
assembled = document_assembler.transform(self.data)
tokenized = tokenizer.fit(assembled).transform(assembled)
stemmed = stemmer.transform(tokenized)
normalized = normalizer.fit(stemmed).transform(stemmed)
reassembled = token_assembler.transform(normalized)
finisher.transform(reassembled).show()
class RegexMatcherTestSpec(unittest.TestCase):
def setUp(self):
# This implicitly sets up py4j for us
self.data = SparkContextForTest.data
def runTest(self):
document_assembler = DocumentAssembler() \
.setInputCol("text") \
.setOutputCol("document")
regex_matcher = RegexMatcher() \
.setInputCols(['document']) \
.setStrategy("MATCH_ALL") \
.setExternalRules(path="file:///" + os.getcwd() + "/../src/test/resources/regex-matcher/rules.txt",
delimiter=",") \
.setOutputCol("regex")
assembled = document_assembler.transform(self.data)
regex_matcher.fit(assembled).transform(assembled).show()
class LemmatizerTestSpec(unittest.TestCase):
def setUp(self):
self.data = SparkContextForTest.data
def runTest(self):
document_assembler = DocumentAssembler() \
.setInputCol("text") \
.setOutputCol("document")
tokenizer = Tokenizer() \
.setInputCols(["document"]) \
.setOutputCol("token")
lemmatizer = Lemmatizer() \
.setInputCols(["token"]) \
.setOutputCol("lemma") \
.setDictionary(path="file:///" + os.getcwd() + "/../src/test/resources/lemma-corpus-small/lemmas_small.txt",
key_delimiter="->", value_delimiter="\t")
assembled = document_assembler.transform(self.data)
tokenized = tokenizer.fit(assembled).transform(assembled)
lemmatizer.fit(tokenized).transform(tokenized).show()
class TokenizerTestSpec(unittest.TestCase):
def setUp(self):
self.session = SparkContextForTest.spark
def runTest(self):
data = self.session.createDataFrame([("this is some/text I wrote",)], ["text"])
document_assembler = DocumentAssembler() \
.setInputCol("text") \
.setOutputCol("document")
tokenizer = Tokenizer() \
.setInputCols(["document"]) \
.setOutputCol("token") \
.addInfixPattern("(\\p{L}+)(\\/)(\\p{L}+\\b)") \
.setMinLength(3) \
.setMaxLength(6)
finisher = Finisher() \
.setInputCols(["token"]) \
.setOutputCols(["token_out"]) \
.setOutputAsArray(True)
assembled = document_assembler.transform(data)
tokenized = tokenizer.fit(assembled).transform(assembled)
finished = finisher.transform(tokenized)
print(finished.first()['token_out'])
self.assertEqual(len(finished.first()['token_out']), 4)
class ChunkTokenizerTestSpec(unittest.TestCase):
def setUp(self):
self.session = SparkContextForTest.spark
def runTest(self):
document_assembler = DocumentAssembler() \
.setInputCol("text") \
.setOutputCol("document")
tokenizer = Tokenizer() \
.setInputCols(["document"]) \
.setOutputCol("token")
entity_extractor = TextMatcher() \
.setInputCols(['document', 'token']) \
.setOutputCol("entity") \
.setEntities(path="file:///" + os.getcwd() + "/../src/test/resources/entity-extractor/test-chunks.txt")
chunk_tokenizer = ChunkTokenizer() \
.setInputCols(['entity']) \
.setOutputCol('chunk_token')
pipeline = Pipeline(stages=[document_assembler, tokenizer, entity_extractor, chunk_tokenizer])
data = self.session.createDataFrame([
["Hello world, my name is Michael, I am an artist and I work at Benezar"],
["Robert, an engineer from Farendell, graduated last year. The other one, Lucas, graduated last week."]
]).toDF("text")
pipeline.fit(data).transform(data).show()
class NormalizerTestSpec(unittest.TestCase):
def setUp(self):
self.data = SparkContextForTest.data
def runTest(self):
document_assembler = DocumentAssembler() \
.setInputCol("text") \
.setOutputCol("document")
tokenizer = Tokenizer() \
.setInputCols(["document"]) \
.setOutputCol("token")
lemmatizer = Normalizer() \
.setInputCols(["token"]) \
.setOutputCol("normalized_token") \
.setLowercase(False)
assembled = document_assembler.transform(self.data)
tokenized = tokenizer.fit(assembled).transform(assembled)
lemmatizer.transform(tokenized).show()
class DateMatcherTestSpec(unittest.TestCase):
def setUp(self):
self.data = SparkContextForTest.data
def runTest(self):
document_assembler = DocumentAssembler() \
.setInputCol("text") \
.setOutputCol("document")
date_matcher = DateMatcher() \
.setInputCols(['document']) \
.setOutputCol("date") \
.setFormat("yyyyMM")
assembled = document_assembler.transform(self.data)
date_matcher.transform(assembled).show()
class TextMatcherTestSpec(unittest.TestCase):
def setUp(self):
self.data = SparkContextForTest.data
def runTest(self):
document_assembler = DocumentAssembler() \
.setInputCol("text") \
.setOutputCol("document")
tokenizer = Tokenizer() \
.setInputCols(["document"]) \
.setOutputCol("token")
entity_extractor = TextMatcher() \
.setInputCols(['document', 'token']) \
.setOutputCol("entity") \
.setEntities(path="file:///" + os.getcwd() + "/../src/test/resources/entity-extractor/test-phrases.txt")
assembled = document_assembler.transform(self.data)
tokenized = tokenizer.fit(assembled).transform(assembled)
entity_extractor.fit(tokenized).transform(tokenized).show()
class PerceptronApproachTestSpec(unittest.TestCase):
def setUp(self):
from sparknlp.training import POS
self.data = SparkContextForTest.data
self.train = POS().readDataset(SparkContextForTest.spark,
os.getcwd() + "/../src/test/resources/anc-pos-corpus-small/test-training.txt",
delimiter="|", outputPosCol="tags", outputDocumentCol="document",
outputTextCol="text")
def runTest(self):
document_assembler = DocumentAssembler() \
.setInputCol("text") \
.setOutputCol("document")
sentence_detector = SentenceDetector() \
.setInputCols(["document"]) \
.setOutputCol("sentence")
tokenizer = Tokenizer() \
.setInputCols(["sentence"]) \
.setOutputCol("token")
pos_tagger = PerceptronApproach() \
.setInputCols(["token", "sentence"]) \
.setOutputCol("pos") \
.setIterations(1) \
.fit(self.train)
assembled = document_assembler.transform(self.data)
sentenced = sentence_detector.transform(assembled)
tokenized = tokenizer.fit(sentenced).transform(sentenced)
pos_tagger.transform(tokenized).show()
class ChunkerTestSpec(unittest.TestCase):
def setUp(self):
from sparknlp.training import POS
self.data = SparkContextForTest.data
self.train_pos = POS().readDataset(SparkContextForTest.spark,
os.getcwd() + "/../src/test/resources/anc-pos-corpus-small/test-training.txt",
delimiter="|", outputPosCol="tags", outputDocumentCol="document",
outputTextCol="text")
def runTest(self):
document_assembler = DocumentAssembler() \
.setInputCol("text") \
.setOutputCol("document")
sentence_detector = SentenceDetector() \
.setInputCols(["document"]) \
.setOutputCol("sentence")
tokenizer = Tokenizer() \
.setInputCols(["sentence"]) \
.setOutputCol("token")
pos_tagger = PerceptronApproach() \
.setInputCols(["token", "sentence"]) \
.setOutputCol("pos") \
.setIterations(3) \
.fit(self.train_pos)
chunker = Chunker() \
.setInputCols(["sentence", "pos"]) \
.setOutputCol("chunk") \
.setRegexParsers(["<NNP>+", "<DT|PP\\$>?<JJ>*<NN>"])
assembled = document_assembler.transform(self.data)
sentenced = sentence_detector.transform(assembled)
tokenized = tokenizer.fit(sentenced).transform(sentenced)
pos_sentence_format = pos_tagger.transform(tokenized)
chunk_phrases = chunker.transform(pos_sentence_format)
chunk_phrases.show()
class PragmaticSBDTestSpec(unittest.TestCase):
def setUp(self):
self.data = SparkContextForTest.data
def runTest(self):
document_assembler = DocumentAssembler() \
.setInputCol("text") \
.setOutputCol("document")
sentence_detector = SentenceDetector() \
.setInputCols(["document"]) \
.setOutputCol("sentence") \
.setCustomBounds(["%%"]) \
.setSplitLength(235) \
.setMinLength(4) \
.setMaxLength(50)
assembled = document_assembler.transform(self.data)
sentence_detector.transform(assembled).show()
class DeepSentenceDetectorTestSpec(unittest.TestCase):
def setUp(self):
from sparknlp.training import CoNLL
self.data = SparkContextForTest.data
self.embeddings = os.getcwd() + "/../src/test/resources/ner-corpus/embeddings.100d.test.txt"
external_dataset = os.getcwd() + "/../src/test/resources/ner-corpus/sentence-detector/unpunctuated_dataset.txt"
self.training_set = CoNLL().readDataset(SparkContextForTest.spark, external_dataset)
def runTest(self):
glove = WordEmbeddings() \
.setInputCols(["document", "token"]) \
.setOutputCol("glove") \
.setStoragePath(self.embeddings, "TEXT") \
.setStorageRef('embeddings_100') \
.setDimension(100)
ner_tagger = NerDLApproach() \
.setInputCols(["document", "token", "glove"]) \
.setLabelColumn("label") \
.setOutputCol("ner") \
.setMaxEpochs(100) \
.setPo(0.01) \
.setLr(0.1) \
.setBatchSize(9) \
.setRandomSeed(0)
ner_converter = NerConverter() \
.setInputCols(["document", "token", "ner"]) \
.setOutputCol("ner_con")
deep_sentence_detector = DeepSentenceDetector() \
.setInputCols(["document", "token", "ner_con"]) \
.setOutputCol("sentence") \
.setIncludePragmaticSegmenter(True) \
.setEndPunctuation([".", "?"])
embedded_training_set = glove.fit(self.training_set).transform(self.training_set)
ner_tagged = ner_tagger.fit(embedded_training_set).transform(embedded_training_set)
ner_converted = ner_converter.transform(ner_tagged)
deep_sentence_detected = deep_sentence_detector.transform(ner_converted)
deep_sentence_detected.show()
class PragmaticScorerTestSpec(unittest.TestCase):
def setUp(self):
self.data = SparkContextForTest.data
def runTest(self):
document_assembler = DocumentAssembler() \
.setInputCol("text") \
.setOutputCol("document")
sentence_detector = SentenceDetector() \
.setInputCols(["document"]) \
.setOutputCol("sentence")
tokenizer = Tokenizer() \
.setInputCols(["sentence"]) \
.setOutputCol("token")
lemmatizer = Lemmatizer() \
.setInputCols(["token"]) \
.setOutputCol("lemma") \
.setDictionary(path="file:///" + os.getcwd() + "/../src/test/resources/lemma-corpus-small/lemmas_small.txt",
key_delimiter="->", value_delimiter="\t")
sentiment_detector = SentimentDetector() \
.setInputCols(["lemma", "sentence"]) \
.setOutputCol("sentiment") \
.setDictionary(
"file:///" + os.getcwd() + "/../src/test/resources/sentiment-corpus/default-sentiment-dict.txt",
delimiter=",")
assembled = document_assembler.transform(self.data)
sentenced = sentence_detector.transform(assembled)
tokenized = tokenizer.fit(sentenced).transform(sentenced)
lemmatized = lemmatizer.fit(tokenized).transform(tokenized)
sentiment_detector.fit(lemmatized).transform(lemmatized).show()
class DeepSentenceDetectorPipelinePersistenceTestSpec(unittest.TestCase):
@staticmethod
def runTest():
pipeline = Pipeline(stages=[DeepSentenceDetector()])
pipe_path = "file:///" + os.getcwd() + "/tmp_pipeline"
pipeline.write().overwrite().save(pipe_path)
loaded_pipeline = Pipeline.read().load(pipe_path)
if loaded_pipeline:
assert True
class PipelineTestSpec(unittest.TestCase):
def setUp(self):
self.data = SparkContextForTest.data
def runTest(self):
document_assembler = DocumentAssembler() \
.setInputCol("text") \
.setOutputCol("document")
tokenizer = Tokenizer() \
.setInputCols(["document"]) \
.setOutputCol("token")
lemmatizer = Lemmatizer() \
.setInputCols(["token"]) \
.setOutputCol("lemma") \
.setDictionary("file:///" + os.getcwd() + "/../src/test/resources/lemma-corpus-small/simple.txt",
key_delimiter="->", value_delimiter="\t")
finisher = Finisher() \
.setInputCols(["token", "lemma"]) \
.setOutputCols(["token_views", "lemma_views"]) \
.setOutputAsArray(False) \
.setAnnotationSplitSymbol('@') \
.setValueSplitSymbol('#')
pipeline = Pipeline(stages=[document_assembler, tokenizer, lemmatizer, finisher])
model = pipeline.fit(self.data)
token_before_save = model.transform(self.data).select("token_views").take(1)[0].token_views.split("@")[2]
lemma_before_save = model.transform(self.data).select("lemma_views").take(1)[0].lemma_views.split("@")[2]
pipe_path = "file:///" + os.getcwd() + "/tmp_pipeline"
pipeline.write().overwrite().save(pipe_path)
loaded_pipeline = Pipeline.read().load(pipe_path)
token_after_save = model.transform(self.data).select("token_views").take(1)[0].token_views.split("@")[2]
lemma_after_save = model.transform(self.data).select("lemma_views").take(1)[0].lemma_views.split("@")[2]
assert token_before_save == "sad"
assert lemma_before_save == "unsad"
assert token_after_save == token_before_save
assert lemma_after_save == lemma_before_save
pipeline_model = loaded_pipeline.fit(self.data)
pipeline_model.transform(self.data).show()
pipeline_model.write().overwrite().save(pipe_path)
loaded_model = PipelineModel.read().load(pipe_path)
loaded_model.transform(self.data).show()
locdata = list(map(lambda d: d[0], self.data.select("text").collect()))
spless = LightPipeline(loaded_model).annotate(locdata)
fullSpless = LightPipeline(loaded_model).fullAnnotate(locdata)
for row in spless[:2]:
for _, annotations in row.items():
for annotation in annotations[:2]:
print(annotation)
for row in fullSpless[:5]:
for _, annotations in row.items():
for annotation in annotations[:2]:
print(annotation.result)
single = LightPipeline(loaded_model).annotate("Joe was running under the rain.")
print(single)
assert single["lemma"][2] == "run"
class SpellCheckerTestSpec(unittest.TestCase):
def setUp(self):
self.prediction_data = SparkContextForTest.data
text_file = "file:///" + os.getcwd() + "/../src/test/resources/spell/sherlockholmes.txt"
self.train_data = SparkContextForTest.spark.read.text(text_file)
self.train_data = self.train_data.withColumnRenamed("value", "text")
def runTest(self):
document_assembler = DocumentAssembler() \
.setInputCol("text") \
.setOutputCol("document")
tokenizer = Tokenizer() \
.setInputCols(["document"]) \
.setOutputCol("token")
spell_checker = NorvigSweetingApproach() \
.setInputCols(["token"]) \
.setOutputCol("spell") \
.setDictionary("file:///" + os.getcwd() + "/../src/test/resources/spell/words.txt")
pipeline = Pipeline(stages=[
document_assembler,
tokenizer,
spell_checker
])
model = pipeline.fit(self.train_data)
checked = model.transform(self.prediction_data)
checked.show()
class SymmetricDeleteTestSpec(unittest.TestCase):
def setUp(self):
self.prediction_data = SparkContextForTest.data
text_file = "file:///" + os.getcwd() + "/../src/test/resources/spell/sherlockholmes.txt"
self.train_data = SparkContextForTest.spark.read.text(text_file)
self.train_data = self.train_data.withColumnRenamed("value", "text")
def runTest(self):
document_assembler = DocumentAssembler() \
.setInputCol("text") \
.setOutputCol("document")
tokenizer = Tokenizer() \
.setInputCols(["document"]) \
.setOutputCol("token")
spell_checker = SymmetricDeleteApproach() \
.setInputCols(["token"]) \
.setOutputCol("symmspell")
pipeline = Pipeline(stages=[
document_assembler,
tokenizer,
spell_checker
])
model = pipeline.fit(self.train_data)
checked = model.transform(self.prediction_data)
checked.show()
class ContextSpellCheckerTestSpec(unittest.TestCase):
def setUp(self):
self.prediction_data = SparkContextForTest.data
text_file = "file:///" + os.getcwd() + "/../src/test/resources/spell/sherlockholmes.txt"
self.train_data = SparkContextForTest.spark.read.text(text_file)
self.train_data = self.train_data.withColumnRenamed("value", "text")
def runTest(self):
document_assembler = DocumentAssembler() \
.setInputCol("text") \
.setOutputCol("document")
tokenizer = Tokenizer() \
.setInputCols(["document"]) \
.setOutputCol("token")
spell_checker = ContextSpellCheckerModel \
.pretrained('spellcheck_dl', 'en') \
.setInputCols("token") \
.setOutputCol("checked")
pipeline = Pipeline(stages=[
document_assembler,
tokenizer,
spell_checker
])
model = pipeline.fit(self.train_data)
checked = model.transform(self.prediction_data)
checked.show()
class ParamsGettersTestSpec(unittest.TestCase):
@staticmethod
def runTest():
annotators = [DocumentAssembler, PerceptronApproach, Lemmatizer, TokenAssembler, NorvigSweetingApproach]
for annotator in annotators:
a = annotator()
for param in a.params:
param_name = param.name
camelized_param = re.sub(r"(?:^|_)(.)", lambda m: m.group(1).upper(), param_name)
assert(hasattr(a, param_name))
param_value = getattr(a, "get" + camelized_param)()
assert(param_value is None or param_value is not None)
# Try a getter
sentence_detector = SentenceDetector() \
.setInputCols(["document"]) \
.setOutputCol("sentence") \
.setCustomBounds(["%%"])
assert(sentence_detector.getOutputCol() == "sentence")
assert(sentence_detector.getCustomBounds() == ["%%"])
# Try a default getter
document_assembler = DocumentAssembler()
assert(document_assembler.getOutputCol() == "document")
class DependencyParserTreeBankTestSpec(unittest.TestCase):
def setUp(self):
self.data = SparkContextForTest.spark \
.createDataFrame([["I saw a girl with a telescope"]]).toDF("text")
self.corpus = os.getcwd() + "/../src/test/resources/anc-pos-corpus-small/"
self.dependency_treebank = os.getcwd() + "/../src/test/resources/parser/unlabeled/dependency_treebank"
from sparknlp.training import POS
self.train_pos = POS().readDataset(SparkContextForTest.spark,
os.getcwd() + "/../src/test/resources/anc-pos-corpus-small/test-training.txt",
delimiter="|", outputPosCol="tags", outputDocumentCol="document",
outputTextCol="text")
def runTest(self):
document_assembler = DocumentAssembler() \
.setInputCol("text") \
.setOutputCol("document")
sentence_detector = SentenceDetector() \
.setInputCols(["document"]) \
.setOutputCol("sentence")
tokenizer = Tokenizer() \
.setInputCols(["sentence"]) \
.setOutputCol("token")
pos_tagger = PerceptronApproach() \
.setInputCols(["token", "sentence"]) \
.setOutputCol("pos") \
.setIterations(1) \
.fit(self.train_pos)
dependency_parser = DependencyParserApproach() \
.setInputCols(["sentence", "pos", "token"]) \
.setOutputCol("dependency") \
.setDependencyTreeBank(self.dependency_treebank) \
.setNumberOfIterations(10)
assembled = document_assembler.transform(self.data)
sentenced = sentence_detector.transform(assembled)
tokenized = tokenizer.fit(sentenced).transform(sentenced)
pos_tagged = pos_tagger.transform(tokenized)
dependency_parsed = dependency_parser.fit(pos_tagged).transform(pos_tagged)
dependency_parsed.show()
class DependencyParserConllUTestSpec(unittest.TestCase):
def setUp(self):
self.data = SparkContextForTest.spark \
.createDataFrame([["I saw a girl with a telescope"]]).toDF("text")
self.corpus = os.getcwd() + "/../src/test/resources/anc-pos-corpus-small/"
self.conllu = os.getcwd() + "/../src/test/resources/parser/unlabeled/conll-u/train_small.conllu.txt"
from sparknlp.training import POS
self.train_pos = POS().readDataset(SparkContextForTest.spark,
os.getcwd() + "/../src/test/resources/anc-pos-corpus-small/test-training.txt",
delimiter="|", outputPosCol="tags", outputDocumentCol="document",
outputTextCol="text")
def runTest(self):
document_assembler = DocumentAssembler() \
.setInputCol("text") \
.setOutputCol("document")
sentence_detector = SentenceDetector() \
.setInputCols(["document"]) \
.setOutputCol("sentence")
tokenizer = Tokenizer() \
.setInputCols(["sentence"]) \
.setOutputCol("token")
pos_tagger = PerceptronApproach() \
.setInputCols(["token", "sentence"]) \
.setOutputCol("pos") \
.setIterations(1) \
.fit(self.train_pos)
dependency_parser = DependencyParserApproach() \
.setInputCols(["sentence", "pos", "token"]) \
.setOutputCol("dependency") \
.setConllU(self.conllu) \
.setNumberOfIterations(10)
assembled = document_assembler.transform(self.data)
sentenced = sentence_detector.transform(assembled)
tokenized = tokenizer.fit(sentenced).transform(sentenced)
pos_tagged = pos_tagger.transform(tokenized)
dependency_parsed = dependency_parser.fit(pos_tagged).transform(pos_tagged)
dependency_parsed.show()
class TypedDependencyParserConllUTestSpec(unittest.TestCase):
def setUp(self):
self.data = SparkContextForTest.spark \
.createDataFrame([["I saw a girl with a telescope"]]).toDF("text")
self.corpus = os.getcwd() + "/../src/test/resources/anc-pos-corpus-small/"
self.conllu = os.getcwd() + "/../src/test/resources/parser/unlabeled/conll-u/train_small.conllu.txt"
self.conllu = os.getcwd() + "/../src/test/resources/parser/labeled/train_small.conllu.txt"
from sparknlp.training import POS
self.train_pos = POS().readDataset(SparkContextForTest.spark,
os.getcwd() + "/../src/test/resources/anc-pos-corpus-small/test-training.txt",
delimiter="|", outputPosCol="tags", outputDocumentCol="document",
outputTextCol="text")
def runTest(self):
document_assembler = DocumentAssembler() \
.setInputCol("text") \
.setOutputCol("document")
sentence_detector = SentenceDetector() \
.setInputCols(["document"]) \
.setOutputCol("sentence")
tokenizer = Tokenizer() \
.setInputCols(["sentence"]) \
.setOutputCol("token")
pos_tagger = PerceptronApproach() \
.setInputCols(["token", "sentence"]) \
.setOutputCol("pos") \
.setIterations(1) \
.fit(self.train_pos)
dependency_parser = DependencyParserApproach() \
.setInputCols(["sentence", "pos", "token"]) \
.setOutputCol("dependency") \
.setConllU(self.conllu) \
.setNumberOfIterations(10)
typed_dependency_parser = TypedDependencyParserApproach() \
.setInputCols(["token", "pos", "dependency"]) \
.setOutputCol("labdep") \
.setConllU(self.conllu) \
.setNumberOfIterations(10)
assembled = document_assembler.transform(self.data)
sentenced = sentence_detector.transform(assembled)
tokenized = tokenizer.fit(sentenced).transform(sentenced)
pos_tagged = pos_tagger.transform(tokenized)
dependency_parsed = dependency_parser.fit(pos_tagged).transform(pos_tagged)
typed_dependency_parsed = typed_dependency_parser.fit(dependency_parsed).transform(dependency_parsed)
typed_dependency_parsed.show()
class TypedDependencyParserConll2009TestSpec(unittest.TestCase):
def setUp(self):
self.data = SparkContextForTest.spark \
.createDataFrame([["I saw a girl with a telescope"]]).toDF("text")
self.corpus = os.getcwd() + "/../src/test/resources/anc-pos-corpus-small/"
self.tree_bank = os.getcwd() + "/../src/test/resources/parser/unlabeled/dependency_treebank"
self.conll2009 = os.getcwd() + "/../src/test/resources/parser/labeled/example.train.conll2009"
from sparknlp.training import POS
self.train_pos = POS().readDataset(SparkContextForTest.spark,
os.getcwd() + "/../src/test/resources/anc-pos-corpus-small/test-training.txt",
delimiter="|", outputPosCol="tags", outputDocumentCol="document",
outputTextCol="text")
def runTest(self):
document_assembler = DocumentAssembler() \
.setInputCol("text") \
.setOutputCol("document")
sentence_detector = SentenceDetector() \
.setInputCols(["document"]) \
.setOutputCol("sentence")
tokenizer = Tokenizer() \
.setInputCols(["sentence"]) \
.setOutputCol("token")
pos_tagger = PerceptronApproach() \
.setInputCols(["token", "sentence"]) \
.setOutputCol("pos") \
.setIterations(1) \
.fit(self.train_pos)
dependency_parser = DependencyParserApproach() \
.setInputCols(["sentence", "pos", "token"]) \
.setOutputCol("dependency") \
.setDependencyTreeBank(self.tree_bank) \
.setNumberOfIterations(10)
typed_dependency_parser = TypedDependencyParserApproach() \
.setInputCols(["token", "pos", "dependency"]) \
.setOutputCol("labdep") \
.setConll2009(self.conll2009) \
.setNumberOfIterations(10)
assembled = document_assembler.transform(self.data)
sentenced = sentence_detector.transform(assembled)
tokenized = tokenizer.fit(sentenced).transform(sentenced)
pos_tagged = pos_tagger.transform(tokenized)
dependency_parsed = dependency_parser.fit(pos_tagged).transform(pos_tagged)
typed_dependency_parsed = typed_dependency_parser.fit(dependency_parsed).transform(dependency_parsed)
typed_dependency_parsed.show()
class ChunkDocSerializingTestSpec(unittest.TestCase):
def setUp(self):
self.data = SparkContextForTest.spark \
.createDataFrame([["I saw a girl with a telescope"]]).toDF("text")
def runTest(self):
document_assembler = DocumentAssembler() \
.setInputCol("text") \
.setOutputCol("document")
tokenizer = Tokenizer() \
.setInputCols(["document"]) \
.setOutputCol("token")
entity_extractor = TextMatcher() \
.setOutputCol("entity") \
.setEntities(path="file:///" + os.getcwd() + "/../src/test/resources/entity-extractor/test-chunks.txt")
chunk2doc = Chunk2Doc() \
.setInputCols(['entity']) \
.setOutputCol('entity_doc')
doc2chunk = Doc2Chunk() \
.setInputCols(['entity_doc']) \
.setOutputCol('entity_rechunk')
pipeline = Pipeline(stages=[
document_assembler,
tokenizer,
entity_extractor,
chunk2doc,
doc2chunk
])
model = pipeline.fit(self.data)
pipe_path = "file:///" + os.getcwd() + "/tmp_chunkdoc"
model.write().overwrite().save(pipe_path)
PipelineModel.load(pipe_path)
class SentenceEmbeddingsTestSpec(unittest.TestCase):
def setUp(self):
self.data = SparkContextForTest.spark.read.option("header", "true") \
.csv(path="file:///" + os.getcwd() + "/../src/test/resources/embeddings/sentence_embeddings.csv")
def runTest(self):
document_assembler = DocumentAssembler() \
.setInputCol("text") \
.setOutputCol("document")
sentence_detector = SentenceDetector() \
.setInputCols(["document"]) \
.setOutputCol("sentence")
tokenizer = Tokenizer() \
.setInputCols(["sentence"]) \
.setOutputCol("token")
glove = WordEmbeddingsModel.pretrained() \
.setInputCols(["sentence", "token"]) \
.setOutputCol("embeddings")
sentence_embeddings = SentenceEmbeddings() \
.setInputCols(["sentence", "embeddings"]) \
.setOutputCol("sentence_embeddings") \
.setPoolingStrategy("AVERAGE")
pipeline = Pipeline(stages=[
document_assembler,
sentence_detector,
tokenizer,
glove,
sentence_embeddings
])
model = pipeline.fit(self.data)
model.transform(self.data).show()
class StopWordsCleanerTestSpec(unittest.TestCase):
def setUp(self):
self.data = SparkContextForTest.spark.createDataFrame([
["This is my first sentence. This is my second."],
["This is my third sentence. This is my forth."]]) \
.toDF("text").cache()
def runTest(self):
document_assembler = DocumentAssembler() \
.setInputCol("text") \
.setOutputCol("document")
sentence_detector = SentenceDetector() \
.setInputCols(["document"]) \
.setOutputCol("sentence")
tokenizer = Tokenizer() \
.setInputCols(["sentence"]) \
.setOutputCol("token")
stop_words_cleaner = StopWordsCleaner() \
.setInputCols(["token"]) \
.setOutputCol("cleanTokens") \
.setCaseSensitive(False) \
.setStopWords(["this", "is"])
pipeline = Pipeline(stages=[
document_assembler,
sentence_detector,
tokenizer,
stop_words_cleaner
])
model = pipeline.fit(self.data)
model.transform(self.data).select("cleanTokens.result").show()
class NGramGeneratorTestSpec(unittest.TestCase):
def setUp(self):
self.data = SparkContextForTest.spark.createDataFrame([
["This is my first sentence. This is my second."],
["This is my third sentence. This is my forth."]]) \
.toDF("text").cache()
def runTest(self):
document_assembler = DocumentAssembler() \
.setInputCol("text") \
.setOutputCol("document")
sentence_detector = SentenceDetector() \
.setInputCols(["document"]) \
.setOutputCol("sentence")
tokenizer = Tokenizer() \
.setInputCols(["sentence"]) \
.setOutputCol("token")
ngrams = NGramGenerator() \
.setInputCols(["token"]) \
.setOutputCol("ngrams") \
.setN(2)
ngrams_cum = NGramGenerator() \
.setInputCols(["token"]) \
.setOutputCol("ngrams_cum") \
.setN(2) \
.setEnableCumulative(True)
pipeline = Pipeline(stages=[
document_assembler,
sentence_detector,
tokenizer,
ngrams,
ngrams_cum,
])
model = pipeline.fit(self.data)
transformed_data = model.transform(self.data)
transformed_data.select("ngrams.result", "ngrams_cum.result").show(2, False)
assert transformed_data.select("ngrams.result").rdd.flatMap(lambda x: x).collect() == \
[['This is', 'is my', 'my first', 'first sentence', 'sentence .', 'This is', 'is my', 'my second', 'second .'], ['This is', 'is my', 'my third', 'third sentence', 'sentence .', 'This is', 'is my', 'my forth', 'forth .']]
assert transformed_data.select("ngrams_cum.result").rdd.flatMap(lambda x: x).collect() == \
[['This', 'is', 'my', 'first', 'sentence', '.', 'This is', 'is my', 'my first', 'first sentence', 'sentence .', 'This', 'is', 'my', 'second', '.', 'This is', 'is my', 'my second', 'second .'], ['This', 'is', 'my', 'third', 'sentence', '.', 'This is', 'is my', 'my third', 'third sentence', 'sentence .', 'This', 'is', 'my', 'forth', '.', 'This is', 'is my', 'my forth', 'forth .']]
class ChunkEmbeddingsTestSpec(unittest.TestCase):
def setUp(self):
self.data = SparkContextForTest.spark.read.option("header", "true") \
.csv(path="file:///" + os.getcwd() + "/../src/test/resources/embeddings/sentence_embeddings.csv")
def runTest(self):
document_assembler = DocumentAssembler() \
.setInputCol("text") \
.setOutputCol("document")
sentence_detector = SentenceDetector() \
.setInputCols(["document"]) \
.setOutputCol("sentence")
tokenizer = Tokenizer() \
.setInputCols(["sentence"]) \
.setOutputCol("token")
pos_tagger = PerceptronModel.pretrained() \
.setInputCols(["token", "sentence"]) \
.setOutputCol("pos")
chunker = Chunker() \
.setInputCols(["sentence", "pos"]) \
.setOutputCol("chunk") \
.setRegexParsers(["<DT>?<JJ>*<NN>+"])
glove = WordEmbeddingsModel.pretrained() \
.setInputCols(["sentence", "token"]) \
.setOutputCol("embeddings")
chunk_embeddings = ChunkEmbeddings() \
.setInputCols(["chunk", "embeddings"]) \
.setOutputCol("chunk_embeddings") \
.setPoolingStrategy("AVERAGE")
pipeline = Pipeline(stages=[
document_assembler,
sentence_detector,
tokenizer,
pos_tagger,
chunker,
glove,
chunk_embeddings
])
model = pipeline.fit(self.data)
model.transform(self.data).show()
class EmbeddingsFinisherTestSpec(unittest.TestCase):
def setUp(self):
self.data = SparkContextForTest.spark.read.option("header", "true") \
.csv(path="file:///" + os.getcwd() + "/../src/test/resources/embeddings/sentence_embeddings.csv")
def runTest(self):
document_assembler = DocumentAssembler() \
.setInputCol("text") \
.setOutputCol("document")
sentence_detector = SentenceDetector() \
.setInputCols(["document"]) \
.setOutputCol("sentence")
tokenizer = Tokenizer() \
.setInputCols(["sentence"]) \
.setOutputCol("token")
glove = WordEmbeddingsModel.pretrained() \
.setInputCols(["sentence", "token"]) \
.setOutputCol("embeddings")
sentence_embeddings = SentenceEmbeddings() \
.setInputCols(["sentence", "embeddings"]) \
.setOutputCol("sentence_embeddings") \
.setPoolingStrategy("AVERAGE")
embeddings_finisher = EmbeddingsFinisher() \
.setInputCols("sentence_embeddings") \
.setOutputCols("sentence_embeddings_vectors") \
.setOutputAsVector(True)
explode_vectors = SQLTransformer(statement="SELECT EXPLODE(sentence_embeddings_vectors) AS features, * FROM __THIS__")
kmeans = KMeans().setK(2).setSeed(1).setFeaturesCol("features")
pipeline = Pipeline(stages=[
document_assembler,
sentence_detector,
tokenizer,
glove,
sentence_embeddings,
embeddings_finisher,
explode_vectors,
kmeans
])
model = pipeline.fit(self.data)
model.transform(self.data).show()
class UniversalSentenceEncoderTestSpec(unittest.TestCase):
def setUp(self):
self.data = SparkSessionForTest.spark.read.option("header", "true") \
.csv(path="file:///" + os.getcwd() + "/../src/test/resources/embeddings/sentence_embeddings.csv")
def runTest(self):
document_assembler = DocumentAssembler() \
.setInputCol("text") \
.setOutputCol("document")
sentence_detector = SentenceDetector() \
.setInputCols(["document"]) \
.setOutputCol("sentence")
sentence_embeddings = UniversalSentenceEncoder.pretrained() \
.setInputCols("sentence") \
.setOutputCol("sentence_embeddings")
pipeline = Pipeline(stages=[
document_assembler,
sentence_detector,
sentence_embeddings
])
model = pipeline.fit(self.data)
model.transform(self.data).show()
class ElmoEmbeddingsTestSpec(unittest.TestCase):
def setUp(self):
self.data = SparkContextForTest.spark.read.option("header", "true") \
.csv(path="file:///" + os.getcwd() + "/../src/test/resources/embeddings/sentence_embeddings.csv")
def runTest(self):
document_assembler = DocumentAssembler() \
.setInputCol("text") \
.setOutputCol("document")
sentence_detector = SentenceDetector() \
.setInputCols(["document"]) \
.setOutputCol("sentence")
tokenizer = Tokenizer() \
.setInputCols(["sentence"]) \
.setOutputCol("token")
elmo = ElmoEmbeddings.pretrained() \
.setInputCols(["sentence", "token"]) \
.setOutputCol("embeddings") \
.setPoolingLayer("word_emb")
pipeline = Pipeline(stages=[
document_assembler,
sentence_detector,
tokenizer,
elmo
])
model = pipeline.fit(self.data)
model.transform(self.data).show()
class ClassifierDLTestSpec(unittest.TestCase):
def setUp(self):
self.data = SparkSessionForTest.spark.read.option("header", "true") \
.csv(path="file:///" + os.getcwd() + "/../src/test/resources/classifier/sentiment.csv")
def runTest(self):
document_assembler = DocumentAssembler() \
.setInputCol("text") \
.setOutputCol("document")
sentence_embeddings = UniversalSentenceEncoder.pretrained() \
.setInputCols("document") \
.setOutputCol("sentence_embeddings")
classifier = ClassifierDLApproach() \
.setInputCols("sentence_embeddings") \
.setOutputCol("category") \
.setLabelColumn("label")
pipeline = Pipeline(stages=[
document_assembler,
sentence_embeddings,
classifier
])
model = pipeline.fit(self.data)
model.stages[-1].write().overwrite().save('./tmp_classifierDL_model')
classsifierdlModel = ClassifierDLModel.load("./tmp_classifierDL_model") \
.setInputCols(["sentence_embeddings"]) \
.setOutputCol("class")
class AlbertEmbeddingsTestSpec(unittest.TestCase):
def setUp(self):
self.data = SparkContextForTest.spark.read.option("header", "true") \
.csv(path="file:///" + os.getcwd() + "/../src/test/resources/embeddings/sentence_embeddings.csv")
def runTest(self):
document_assembler = DocumentAssembler() \
.setInputCol("text") \
.setOutputCol("document")
sentence_detector = SentenceDetector() \
.setInputCols(["document"]) \
.setOutputCol("sentence")
tokenizer = Tokenizer() \
.setInputCols(["sentence"]) \
.setOutputCol("token")
albert = AlbertEmbeddings.pretrained() \
.setInputCols(["sentence", "token"]) \
.setOutputCol("embeddings")
pipeline = Pipeline(stages=[
document_assembler,
sentence_detector,
tokenizer,
albert
])
model = pipeline.fit(self.data)
model.transform(self.data).show()
class SentimentDLTestSpec(unittest.TestCase):
def setUp(self):
self.data = SparkSessionForTest.spark.read.option("header", "true") \
.csv(path="file:///" + os.getcwd() + "/../src/test/resources/classifier/sentiment.csv")
def runTest(self):
document_assembler = DocumentAssembler() \
.setInputCol("text") \
.setOutputCol("document")
sentence_embeddings = UniversalSentenceEncoder.pretrained() \
.setInputCols("document") \
.setOutputCol("sentence_embeddings")
classifier = SentimentDLApproach() \
.setInputCols("sentence_embeddings") \
.setOutputCol("category") \
.setLabelColumn("label")
pipeline = Pipeline(stages=[
document_assembler,
sentence_embeddings,
classifier
])
model = pipeline.fit(self.data)
model.stages[-1].write().overwrite().save('./tmp_sentimentDL_model')
sentimentdlModel = SentimentDLModel.load("./tmp_sentimentDL_model") \
.setInputCols(["sentence_embeddings"]) \
.setOutputCol("class")
class XlnetEmbeddingsTestSpec(unittest.TestCase):
def setUp(self):
self.data = SparkContextForTest.spark.read.option("header", "true") \
.csv(path="file:///" + os.getcwd() + "/../src/test/resources/embeddings/sentence_embeddings.csv")
def runTest(self):
sentence_detector = SentenceDetector() \
.setInputCols(["document"]) \
.setOutputCol("sentence")
tokenizer = Tokenizer() \
.setInputCols(["sentence"]) \
.setOutputCol("token")
xlnet = XlnetEmbeddings.pretrained() \
.setInputCols(["sentence", "token"]) \
.setOutputCol("embeddings")
pipeline = Pipeline(stages=[
document_assembler,
sentence_detector,
tokenizer,
xlnet
])
model = pipeline.fit(self.data)
model.transform(self.data).show()
| 39.157227 | 396 | 0.595061 |
3a4090cec252d88cf721a4a89a79da3e104fa893 | 22,721 | py | Python | src/tiden/sshpool.py | gridgain/tiden | 53a465a3b8b58fe8f26c36c8988b9ced0dbe83ec | [
"Apache-2.0"
] | 14 | 2020-06-05T09:30:42.000Z | 2022-01-19T00:26:48.000Z | src/tiden/sshpool.py | gridgain/tiden | 53a465a3b8b58fe8f26c36c8988b9ced0dbe83ec | [
"Apache-2.0"
] | 6 | 2020-06-09T14:05:21.000Z | 2021-03-18T13:55:15.000Z | src/tiden/sshpool.py | gridgain/tiden | 53a465a3b8b58fe8f26c36c8988b9ced0dbe83ec | [
"Apache-2.0"
] | 1 | 2020-06-09T13:53:15.000Z | 2020-06-09T13:53:15.000Z | #!/usr/bin/env python3
#
# Copyright 2017-2020 GridGain Systems.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
from hashlib import md5
from multiprocessing.dummy import Pool as ThreadPool
from os import path
from os.path import basename
from re import search, split, sub
from time import sleep
from typing import Dict, List
from paramiko import AutoAddPolicy, SSHClient, SSHException, SFTPClient
from paramiko.buffered_pipe import PipeTimeout
from .abstractsshpool import AbstractSshPool
from .tidenexception import RemoteOperationTimeout, TidenException
from .util import log_print, log_put, log_add, get_logger
debug_ssh_pool = False
class SshPool(AbstractSshPool):
default_timeout = 400
no_java_commands = [
'echo', 'cat', 'grep', 'kill', 'ps', 'ls', 'ln', 'mkdir', 'rm', 'md5sum', 'unzip', 'touch', 'chmod'
]
def __init__(self, ssh_config, **kwargs):
super(SshPool, self).__init__(ssh_config, **kwargs)
self.retries = kwargs.get('retries')
self.username = self.config['username']
self.private_key_path = self.config.get('private_key_path')
self.use_ssh_agent = self.config.get('use_ssh_agent')
self.threads_num = self.config['threads_num']
self.home = str(self.config['home'])
if self.retries is None:
self.retries = 3
self.clients = {}
self.docker_hosts = set()
self.trace_info()
def trace_info(self):
"""
called at startup to trace pool configuration to logs
:return:
"""
log_print('SSH Pool threads: %s' % self.config['threads_num'])
def available_space(self):
"""
calculate available disk space per host
:return:
"""
total_size = 0
min_size = None
threshold = 10
problem_hosts = set()
to_gb = lambda x: int(int(x) / 1048576)
results = self.exec(['df -l'])
for host in results.keys():
if host not in results or len(results[host]) == 0:
problem_hosts.add(f"Can't get available space at host {host}")
continue
lines = results[host][0]
for line in lines.rstrip().splitlines():
storage_items = split('\s+', line)
if len(storage_items) == 6:
match = search('^[0-9]+$', storage_items[3])
if (match and
self.home.startswith(storage_items[5]) and
storage_items[5] != '/'):
total_size += int(storage_items[3])
if min_size is None:
min_size = int(storage_items[3])
min_size = min(int(storage_items[3]), min_size)
if to_gb(min_size) < threshold:
problem_hosts.add('WARNING! host {} has free space less than {}GB ({}GB actual)'
.format(host, threshold, to_gb(min_size)))
if min_size is None:
min_size = total_size
if problem_hosts:
return str(to_gb(total_size)), problem_hosts
else:
return str(to_gb(total_size)), '{} GB'.format(to_gb(min_size))
def connect(self):
for host in self.hosts:
attempt = 0
log_put("Checking connection to %s ... " % host, 2)
connected = False
ssh = None
if self.private_key_path:
if not path.exists(self.private_key_path):
raise TidenException("Private key %s not found" % self.private_key_path)
elif not self.use_ssh_agent:
raise TidenException("Either private_key_path or use_ssh_agent must be configured in the environment")
while attempt < self.retries and not connected:
try:
attempt += 1
ssh = SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(AutoAddPolicy())
if self.use_ssh_agent:
ssh.connect(
host,
username=self.username,
allow_agent=True,
)
else:
ssh.connect(
host,
username=self.username,
key_filename=self.private_key_path,
)
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command('uptime')
for line in ssh_stdout:
if 'load average' in str(line):
log_print('ok', 3)
self.clients[host] = ssh
attempt = self.retries + 1
connected = True
break
# Check whether host is a Docker container
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command('cat /proc/1/cgroup')
if any(map(lambda l: 'docker' in l, ssh_stdout)):
log_print(f'{host} is a Docker container')
self.docker_hosts.add(host)
except socket.gaierror as e:
log_print('', 2)
log_print("ERROR: host '%s' is incorrect \n" % host, color='red')
log_print("%s\n" % str(e))
exit(1)
except TimeoutError as e:
log_add('T ', 3)
if attempt == self.retries:
log_print('', 2)
log_print("ERROR: connection timeout to host %s\n" % host, color='red')
log_print("%s\n" % str(e))
exit(1)
except (SSHException, socket.error) as e:
log_add('E ', 3)
if attempt == self.retries:
log_print('', 2)
log_print("ERROR: SSH error for host=%s, username=%s, key=%s" %
(host, str(self.username), str(self.private_key_path)), 2, color='red')
log_print(str(e), 2)
exit(1)
def download(self, remote_paths, local_path, prepend_host=True):
if debug_ssh_pool:
log_print('download: \nremote_paths:' + repr(remote_paths) + '\nlocal_paths: ' + repr(local_path))
files_for_hosts = []
if type(remote_paths) != type({}):
remote_paths = {host: remote_paths for host in self.hosts}
for host in self.hosts:
if host not in remote_paths.keys():
continue
host_remote_paths = remote_paths[host]
if type(host_remote_paths) != type([]):
host_remote_paths = [host_remote_paths]
if path.isdir(local_path):
_rem_arr = []
_loc_arr = []
for remote_path in host_remote_paths:
file = local_path
if prepend_host is True:
file = "%s/%s%s" % (file, host, path.basename(remote_path))
else:
file = "%s/%s" % (file, path.basename(remote_path))
_rem_arr.append(remote_path)
_loc_arr.append(file)
files_for_hosts.append(
[host, _rem_arr.copy(), _loc_arr.copy()]
)
else:
if len(host_remote_paths) > 1:
raise TidenException("When downloading many files, local_path must be a directory! \n"
"remote_paths: " + repr(host_remote_paths) + '\n' +
'local_path: ' + str(local_path))
files_for_hosts.append(
[host, host_remote_paths, [local_path]]
)
pool = ThreadPool(self.threads_num)
raw_results = pool.starmap(self.download_from_host, files_for_hosts)
results = []
for raw_result in raw_results:
results.extend(raw_result)
pool.close()
pool.join()
return results
def download_from_host(self, host, remote_paths, local_paths):
if debug_ssh_pool:
log_print('download_from_host: \nhost: ' + repr(host) +
'\nremote_paths:' + repr(remote_paths) + '\nlocal_paths: ' + repr(local_paths))
result = []
try:
if type(remote_paths) != type([]):
remote_paths = [remote_paths]
local_paths = [local_paths]
ssh_client: SSHClient = self.clients.get(host)
sftp: SFTPClient = ssh_client.open_sftp()
for i, remote_path in enumerate(remote_paths):
sftp.get(remote_path, local_paths[i])
result.append(local_paths[i])
except SSHException as e:
log_print('WARN: can\'t download file(s) from host ' + str(repr(remote_paths)) + ', ' + str(e), color='red')
return result
def exec(self, commands, **kwargs):
"""
:param commands: the list of commands to execute for hosts or dict of list of commands indexed by host
:return: the list of lines
"""
from functools import partial
commands_for_hosts = []
output = []
if isinstance(commands, list):
for host in self.hosts:
commands_for_hosts.append(
[host, commands]
)
elif isinstance(commands, dict):
for host in commands.keys():
commands_for_hosts.append(
[host, commands[host]]
)
else:
for host in self.hosts:
commands_for_hosts.append(
[host, [commands]]
)
pool = ThreadPool(self.threads_num)
raw_results = pool.starmap(partial(self.exec_on_host, **kwargs), commands_for_hosts)
results = {}
for raw_result in raw_results:
for host in raw_result.keys():
results[host] = raw_result[host]
pool.close()
pool.join()
return results
def exec_on_host(self, host, commands, **kwargs):
"""
Execute the list of commands on the particular host
:param host: host or ip address
:param commands: the command or the list of commands
:return: dictionary:
<host>: [ <string containing the output of executed commands>, ... ]
"""
output = []
client = self.clients[host]
env_vars = ''
timeout = kwargs.get('timeout', int(self.config['default_timeout']))
if self.config.get('env_vars'):
for env_var_name in self.config['env_vars'].keys():
val = self.config['env_vars'][env_var_name]
env_vars += f"export {env_var_name}=\"{val}\";"
for command in commands:
try:
if '2>&1' not in command:
command += ' 2>&1'
if env_vars != '' and command.split()[0] not in self.no_java_commands:
command = f"{env_vars}{command}"
# Remove sudo with options if the host is a Docker container
if host in self.docker_hosts:
command = sub(r'sudo(\s+[-]{1,2}\S*)*', '', command)
# TODO we should handle stderr
get_logger('ssh_pool').debug(f'{host} >> {command}')
stdin, stdout, stderr = client.exec_command(command, timeout=timeout)
command_output = ''
for line in stdout:
if line.strip() != '':
command_output += line
for line in stderr:
if line.strip() != '':
command_output += line
output.append(command_output)
formatted_output = ''.join(output).encode('utf-8')
get_logger('ssh_pool').debug(f'{host} << {formatted_output}')
except SSHException as e:
if str(e) == 'SSH session not active' and not kwargs.get('repeat'):
# reconnect
for i in range(10):
try:
log_print('ssh reconnect')
self.connect()
except SSHException:
sleep(10)
continue
break
kwargs['repeat'] = True
return self.exec_on_host(host, commands, **kwargs)
print(str(e))
except (PipeTimeout, socket.timeout) as e:
raise RemoteOperationTimeout(f'Timeout {timeout} reached while executing command:\n'
f'Host: {host}\n'
f'{command}')
return {host: output}
@staticmethod
def _reserved_java_processes():
"""
These java processes are hidden from jps and killall('java').
The first item of list must always be 'jps' itself.
:return:
"""
return [
'sun.tools.jps.Jps',
'jenkins.war',
'com.intellij.idea.Main',
'org.jetbrains.idea.maven.server.RemoteMavenServer',
'org.jetbrains.jps.cmdline.Launcher'
]
def get_process_and_owners(self, hosts=None, skip_reserved_java_processes=True):
"""
Returns parsed and filtered for output of `ps -ef | grep java` command executed on hosts.
:param hosts: (optional) array of hosts to run command at
:param skip_reserved_java_processes: (optional, default True)
:return: list of dictionaries:
'host': host
'owner': java process owner
'pid': java process pid
"""
jps_command = ['ps -ef | grep java | grep -v grep']
if hosts is not None:
jps_command = {host: jps_command for host in hosts}
raw_results = self.exec(jps_command)
results = []
for host in raw_results.keys():
for line in raw_results[host][0].splitlines():
if skip_reserved_java_processes:
is_reserved_process = False
for proc_name in SshPool._reserved_java_processes():
if proc_name in line:
is_reserved_process = True
break
if is_reserved_process:
continue
else:
# skip only 'jps'
if SshPool._reserved_java_processes()[0] in line:
continue
m = search('^([0-9\w]+)\s+([0-9]+)', line)
if m:
results.append({'host': host, 'owner': m.group(1), 'pid': m.group(2)})
return results
def jps(self, jps_args=None, hosts=None, skip_reserved_java_processes=True):
"""
Returns parsed and filtered for output of `jps` command executed on hosts.
:param jps_args: (optional) array of 'jps' arguments, defaults to '-l' for full main java class name
:param hosts: (optional) array of hosts to run command at
:param skip_reserved_java_processes: (optional, default True)
:return: list of dictionaries:
'host': host
'pid': java process pid
'name': java process name
"""
jps_command = ['jps']
if jps_args is not None:
jps_command.extend(jps_args)
else:
jps_command.append('-l')
jps_command = [" ".join(jps_command)]
if hosts is not None:
jps_command = {host: jps_command for host in hosts}
raw_results = self.exec(jps_command)
results = []
for host in raw_results.keys():
for line in raw_results[host][0].splitlines():
if skip_reserved_java_processes:
is_reserved_process = False
for proc_name in SshPool._reserved_java_processes():
if proc_name in line:
is_reserved_process = True
break
if is_reserved_process:
continue
else:
# skip only 'jps'
if SshPool._reserved_java_processes()[0] in line:
continue
m = search('^([0-9]+) (.+)$', line)
if m:
results.append({'host': host, 'pid': m.group(1), 'name': m.group(2)})
return results
def dirsize(self, dir_path, *args):
hosts = self.hosts
if len(args) == 1:
hosts = args[0]
cmd = {}
for host in hosts:
cmd[host] = ['du -sb %s' % dir_path]
result = self.exec(cmd)
cur_size = 0
for host in result.keys():
for line in result[host]:
m = search('^([0-9]+)\t', line)
if m:
cur_size += int(m.group(1))
return cur_size
def upload(self, files, remote_path, **kwargs):
self.upload_for_hosts(self.hosts, files, remote_path, internal_download=kwargs.get('internal_download'))
def upload_for_hosts(self, hosts, files, remote_path, internal_download=False):
files_for_hosts = []
if internal_download:
first_found_host = hosts[0]
other_hosts = hosts[1:]
self.upload_on_host(first_found_host, files, remote_path)
self.transfer_file(
{first_found_host: [f'{remote_path}/{basename(file)}' for file in files]},
dict([(host, remote_path) for host in other_hosts])
)
else:
for host in hosts:
files_for_hosts.append(
[host, files, remote_path]
)
pool = ThreadPool(self.threads_num)
pool.starmap(self.upload_on_host, files_for_hosts)
pool.close()
pool.join()
def transfer_file(self, source: Dict[str, List[str]], target: Dict[str, str]):
command = {}
for source_host, source_files in source.items():
for target_host, target_path in target.items():
if not target_path.endswith('/'):
target_path += '/'
for source_file in source_files:
command[target_host] = command.get(target_host, []) + [
f'rsync {source_host}:{source_file} {target_path}'
]
failed_to_download = []
download_res = self.exec(command)
for host, res in download_res.items():
for idx, output in enumerate(res[1:]):
if 'total size' not in output:
failed_to_download.append(f'Failed to download '
f'{",".join([s for s in source.keys()])}->'
f'{",".join([s for s in target.keys()])}')
def not_uploaded(self, files, remote_path):
outdated = []
for file in files:
file_name = path.basename(file)
local_md5 = md5(open(file, 'rb').read()).hexdigest()
remote_file = f"{remote_path}/{file_name}"
results = self.exec([f'md5sum {remote_file}'])
matched_count = 0
for host in results.keys():
if len(results[host]) > 0:
if f'{local_md5} ' in results[host][0]:
matched_count += 1
if matched_count < len(results.keys()):
outdated.append(file)
return outdated
def upload_on_host(self, host, files, remote_dir):
try:
sftp = self.clients.get(host).open_sftp()
for local_file in files:
remote_path = f'{remote_dir}/{path.basename(local_file)}'
get_logger('ssh_pool').debug(f'sftp_put on host {host}: {local_file} -> {remote_path}')
sftp.put(local_file, remote_path)
except SSHException as e:
print(str(e))
def killall(self, name, sig=-9, skip_reserved_java_processes=True, hosts=None):
"""
Kill all java processes that might interfere grid at all hosts of connected pool.
:param name: name of processes to kill
:param sig: signal to send, default -9 (SIG_KILL)
:param skip_reserved_java_processes: (default True) skip known developer/debugger java processes
:param hosts: hosts where to kill java processes (default None means all hosts)
:return:
"""
if name != 'java' or not skip_reserved_java_processes:
kill_command = [
'nohup '
' sudo '
' -n '
' killall '
' %s '
' %s '
'& > /dev/null 2>&1' % (
sig,
name
)]
else:
kill_command = [
'ps -AF '
'| grep [j]ava 2>/dev/null '
'| grep -vE "(%s)" 2>/dev/null '
'| awk "{print \$2}" '
'| xargs -i{} '
' nohup '
' sudo '
' -n '
' /bin/kill '
' %s {} '
' & >/dev/null 2>&1'
% (
"|".join([s.replace('.', '\.') for s in SshPool._reserved_java_processes()]),
sig,
)
]
if hosts is not None:
kill_command = {host: kill_command for host in hosts}
res = self.exec(kill_command)
# print_blue(res)
return res
| 42.232342 | 120 | 0.511729 |
1e0621e63f06dfd68447a955e838ec1503195324 | 2,283 | py | Python | src/.history/Socket_Control/Hiwin_RT605_Socket_TCPcmd_20190711103103.py | SamKaiYang/2019_Hiwin_Shaking | d599f8c87dc4da89eae266990d12eb3a8b0f3e16 | [
"MIT"
] | null | null | null | src/.history/Socket_Control/Hiwin_RT605_Socket_TCPcmd_20190711103103.py | SamKaiYang/2019_Hiwin_Shaking | d599f8c87dc4da89eae266990d12eb3a8b0f3e16 | [
"MIT"
] | null | null | null | src/.history/Socket_Control/Hiwin_RT605_Socket_TCPcmd_20190711103103.py | SamKaiYang/2019_Hiwin_Shaking | d599f8c87dc4da89eae266990d12eb3a8b0f3e16 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# license removed for brevity
import Hiwin_RT605_Socket_Taskcmd as Taskcmd
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
def SetPtoP(GripCmd,RA,CtrlMode,x,y,z,pitch,roll,yaw,setvel ):
Cmd = "# " + str(int(Taskcmd.Action_Type.PtoP))+" "\
+str(int(GripCmd))+" "\
+str(int(RA))+" "\
+str(int(CtrlMode))+ " "\
+str(float(x))+ " "\
+str(float(y))+ " "\
+str(float(z))+ " "\
+str(float(pitch))+ " "\
+str(float(roll))+ " "\
+str(float(yaw))+" "\
+str(float(setvel))+" *"
return Cmd
def SetLine(GripCmd,RA,CtrlMode,x,y,z,pitch,roll,yaw,setvel ):
Cmd = "# " + str(int(Taskcmd.Action_Type.Line))+" "\
+str(int(GripCmd))+" "\
+str(int(RA))+" "\
+str(int(CtrlMode))+ " "\
+str(float(x))+ " "\
+str(float(y))+ " "\
+str(float(z))+ " "\
+str(float(pitch))+ " "\
+str(float(roll))+ " "\
+str(float(yaw))+" "\
+str(float(setvel))+" *"
return Cmd
def SetVel(GripCmd,Vel):
Cmd = "# " + str(int(Taskcmd.Action_Type.SetVel))+" "\
+str(int(GripCmd))+" "\
+str(int(Vel))+" *"
return Cmd
def SetDelay(GripCmd,Delay):
Cmd = "# " + str(int(Taskcmd.Action_Type.Delay))+" "\
+str(int(GripCmd))+" "\
+str(int(Delay))+ " *"
return Cmd
def Set_SpeedMode(GripCmd,Mode):
Cmd = "# " + str(int(Taskcmd.Action_Type.Mode))+" "\
+str(int(GripCmd))+" "\
+str(int(Mode))+ " *"
return Cmd
def Is_busy(Cmd):
feedback = Cmd
return feedback
def feedback_check():
Cmd = "# " + str(int(Taskcmd.Action_Type.Check))+" "\
+str(int(0))+" "\
+str(int(0))+" "\
" *"
return Cmd
def Set_Suction(GripCmd,Suction):
Cmd = "# " + str(int(Taskcmd.Action_Type.Mode))+" "\
+str(int(GripCmd))+" "\
+str(int(Suction))+ " *"
return Cmd | 27.841463 | 62 | 0.546649 |
b3a4ae6a458a4e628fbbfc140bc7097f85f5c108 | 8,222 | py | Python | python/istio_api/mcp/v1alpha1/metadata_pb2.py | istio-testing/api | 0d1db548cf57ed2184cd0c35888e8b207564903b | [
"Apache-2.0"
] | null | null | null | python/istio_api/mcp/v1alpha1/metadata_pb2.py | istio-testing/api | 0d1db548cf57ed2184cd0c35888e8b207564903b | [
"Apache-2.0"
] | null | null | null | python/istio_api/mcp/v1alpha1/metadata_pb2.py | istio-testing/api | 0d1db548cf57ed2184cd0c35888e8b207564903b | [
"Apache-2.0"
] | null | null | null | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: mcp/v1alpha1/metadata.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from gogoproto import gogo_pb2 as gogoproto_dot_gogo__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='mcp/v1alpha1/metadata.proto',
package='istio.mcp.v1alpha1',
syntax='proto3',
serialized_options=_b('Z\031istio.io/api/mcp/v1alpha1\250\342\036\001'),
serialized_pb=_b('\n\x1bmcp/v1alpha1/metadata.proto\x12\x12istio.mcp.v1alpha1\x1a\x14gogoproto/gogo.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/protobuf/struct.proto\"\xbb\x02\n\x08Metadata\x12\x0c\n\x04name\x18\x01 \x01(\t\x12/\n\x0b\x63reate_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0f\n\x07version\x18\x03 \x01(\t\x12\x38\n\x06labels\x18\x04 \x03(\x0b\x32(.istio.mcp.v1alpha1.Metadata.LabelsEntry\x12\x42\n\x0b\x61nnotations\x18\x05 \x03(\x0b\x32-.istio.mcp.v1alpha1.Metadata.AnnotationsEntry\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x32\n\x10\x41nnotationsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\x1fZ\x19istio.io/api/mcp/v1alpha1\xa8\xe2\x1e\x01\x62\x06proto3')
,
dependencies=[gogoproto_dot_gogo__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,])
_METADATA_LABELSENTRY = _descriptor.Descriptor(
name='LabelsEntry',
full_name='istio.mcp.v1alpha1.Metadata.LabelsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='istio.mcp.v1alpha1.Metadata.LabelsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='istio.mcp.v1alpha1.Metadata.LabelsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=355,
serialized_end=400,
)
_METADATA_ANNOTATIONSENTRY = _descriptor.Descriptor(
name='AnnotationsEntry',
full_name='istio.mcp.v1alpha1.Metadata.AnnotationsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='istio.mcp.v1alpha1.Metadata.AnnotationsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='istio.mcp.v1alpha1.Metadata.AnnotationsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=402,
serialized_end=452,
)
_METADATA = _descriptor.Descriptor(
name='Metadata',
full_name='istio.mcp.v1alpha1.Metadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='istio.mcp.v1alpha1.Metadata.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='create_time', full_name='istio.mcp.v1alpha1.Metadata.create_time', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='version', full_name='istio.mcp.v1alpha1.Metadata.version', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='labels', full_name='istio.mcp.v1alpha1.Metadata.labels', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='annotations', full_name='istio.mcp.v1alpha1.Metadata.annotations', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_METADATA_LABELSENTRY, _METADATA_ANNOTATIONSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=137,
serialized_end=452,
)
_METADATA_LABELSENTRY.containing_type = _METADATA
_METADATA_ANNOTATIONSENTRY.containing_type = _METADATA
_METADATA.fields_by_name['create_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_METADATA.fields_by_name['labels'].message_type = _METADATA_LABELSENTRY
_METADATA.fields_by_name['annotations'].message_type = _METADATA_ANNOTATIONSENTRY
DESCRIPTOR.message_types_by_name['Metadata'] = _METADATA
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Metadata = _reflection.GeneratedProtocolMessageType('Metadata', (_message.Message,), dict(
LabelsEntry = _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), dict(
DESCRIPTOR = _METADATA_LABELSENTRY,
__module__ = 'mcp.v1alpha1.metadata_pb2'
# @@protoc_insertion_point(class_scope:istio.mcp.v1alpha1.Metadata.LabelsEntry)
))
,
AnnotationsEntry = _reflection.GeneratedProtocolMessageType('AnnotationsEntry', (_message.Message,), dict(
DESCRIPTOR = _METADATA_ANNOTATIONSENTRY,
__module__ = 'mcp.v1alpha1.metadata_pb2'
# @@protoc_insertion_point(class_scope:istio.mcp.v1alpha1.Metadata.AnnotationsEntry)
))
,
DESCRIPTOR = _METADATA,
__module__ = 'mcp.v1alpha1.metadata_pb2'
# @@protoc_insertion_point(class_scope:istio.mcp.v1alpha1.Metadata)
))
_sym_db.RegisterMessage(Metadata)
_sym_db.RegisterMessage(Metadata.LabelsEntry)
_sym_db.RegisterMessage(Metadata.AnnotationsEntry)
DESCRIPTOR._options = None
_METADATA_LABELSENTRY._options = None
_METADATA_ANNOTATIONSENTRY._options = None
# @@protoc_insertion_point(module_scope)
| 41.11 | 816 | 0.757358 |
2dcc618f01f3c9c7e54df2869cf18af5a9913b36 | 7,653 | py | Python | sentenai/stream/events.py | sentenai/py-sentenai | fec672ae1ac195523067d8f882cfe3419ab4c042 | [
"BSD-3-Clause"
] | 1 | 2018-01-09T18:49:06.000Z | 2018-01-09T18:49:06.000Z | sentenai/stream/events.py | sentenai/py-sentenai | fec672ae1ac195523067d8f882cfe3419ab4c042 | [
"BSD-3-Clause"
] | 168 | 2017-03-15T20:24:52.000Z | 2022-03-15T14:41:26.000Z | sentenai/stream/events.py | sentenai/py-sentenai | fec672ae1ac195523067d8f882cfe3419ab4c042 | [
"BSD-3-Clause"
] | 4 | 2017-07-22T04:03:08.000Z | 2017-12-22T00:21:21.000Z | import math
from copy import copy
from datetime import datetime, timedelta
import numpy as np
from sentenai.api import API, dt64, td64, iso8601
import collections
def flatten(d, parent_key='', sep='_'):
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(flatten(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
class Events(API):
def __init__(self, parent):
API.__init__(self, parent._credentials, *parent._prefix, "events", params=parent._params)
self._parent = parent
def __repr__(self):
return repr(self._parent) + ".events"
def __iter__(self):
i = 0
n = 100
while True:
e = self[i::n]
if len(e) == 0:
raise StopIteration
else:
for x in e:
yield e
i += n
def __delitem__(self, i):
res = self._delete(i)
if res.status_code == 404:
raise KeyError("Event does not exist.")
elif res.status_code != 204:
raise Exception(res.status_code)
def __len__(self):
res = self._head(params=self._params)
return int(res.headers['events'])
def __setitem__(self, key, event):
event.id = key
self.insert(event)
def __getitem__(self, i):
if isinstance(i, str):
# this is get by id
res = self._get(i)
if res.status_code == 200:
ej = res.json()
try:
ts = int(res.headers['timestamp'])
except:
ts = res.headers['timestamp']
return Event(id=i, ts=ts, duration=res.headers.get('duration'), data=ej)
elif res.status_code == 404:
raise KeyError("Events does not exist")
else:
raise Exception(res.status_code)
params = copy(self._params)
if isinstance(i, int):
raise TypeError("Integer not valid")
elif isinstance(i, slice):
# time slice
t0 = self._parent.t0
params['sort'] = 'asc'
if i.start is not None:
if t0 is None:
params['start'] = int(i.start)
else:
params['start'] = iso8601(i.start)
if i.stop is not None:
if t0 is None:
params['end'] = int(i.stop)
else:
params['end'] = iso8601(i.stop)
if i.step is not None:
params['limit'] = abs(i.step)
if i.step < 0:
params['sort'] = 'desc'
if i.start is not None and i.stop is not None:
if i.start > i.stop:
params['start'], params['end'] = params['end'], params['start']
params['sort'] = 'desc'
resp = self._get(params=params)
if resp.status_code == 200:
evts = []
for ej in resp.json():
try:
ts = int(ej['ts'])
except:
ts = ej['ts']
evts.append(
Event(
id=ej['id'],
ts=ts,
duration=ej.get("duration"),
data=ej['event'] or None
)
)
return evts
else:
raise Exception(resp.status_code)
else:
raise ValueError("input must be either string or slice")
def update(self, evt):
hdrs = {}
if evt.id is None:
raise ValueError("Event id required for updates.")
if evt.ts is not None:
hdrs["timestamp"] = iso8601(dt64(evt.ts))
if evt.duration is not None:
hdrs["duration"] = str(td64(evt.duration).astype(float) / 1000000000.)
self._put(evt.id, json=evt.data, headers=hdrs)
def insert(self, evt):
hdrs = {'content-type': 'application/json'}
if evt.ts is not None and evt.duration is None:
hdrs["timestamp"] = iso8601(evt.ts)
elif evt.duration is not None:
hdrs['start'] = iso8601(evt.ts)
hdrs["end"] = iso8601(evt.ts + evt.duration)
if evt.id is not None:
r = self._put(evt.id, json=evt.data, headers=hdrs)
if r.status_code in [200, 201]:
return evt
#return self[r.headers['Location']]
else:
raise Exception(r.status_code)
else:
r = self._post(json=evt.data, headers=hdrs)
if r.status_code in [200, 201]:
return Event(id=r.headers['Location'], data=evt.data, ts=evt.ts, duration=evt.duration)
#return self[r.headers['Location']]
else:
raise Exception(r.status_code)
def remove(self, evt):
del self[evt.id]
class Event(object):
def __init__(self, id=None, ts=None, duration=None, data=None):
self.id = str(id) if id is not None else None
self.ts = dt64(ts) if ts is not None else None
self.duration = td64(duration) if duration is not None else None
self.data = data
def as_record(self):
return flatten({'id': self.id, 'ts': self.ts, 'duration': self.duration, 'event': self.data}, '', '/')
def __getitem__(self, pth):
if isinstance(pth, str):
return self.data[pth]
else:
d = self.data
for s in pth:
d = d[s]
return d
def __repr__(self):
x = ["{}={}".format(k, repr(getattr(self, k)))
for k in ("id", "ts", "duration", "data")
if getattr(self, k) is not None]
return "Event({})".format(", ".join(x))
def __len__(self):
return self.duration
@property
def start(self):
if self.ts and self.duration:
return self.ts
@property
def end(self):
if self.ts and self.duration:
return self.ts + self.duration
def __lt__(self, other):
if not isinstance(other, Event):
raise TypeError("Can only compare events.")
return self.ts < (other.ts or datetime.max) or self.ts == other.ts and self.duration < other.duration
def __le__(self, other):
if not isinstance(other, Event):
raise TypeError("Can only compare events.")
return self.ts < other.ts or self.ts == other.ts and self.duration <= other.duration
def __eq__(self, other):
if not isinstance(other, Event):
raise TypeError("Can only compare events.")
return self.ts == other.ts and self.duration == other.duration
def __gt__(self, other):
if not isinstance(other, Event):
raise TypeError("Can only compare events.")
return self.ts > other.ts or self.ts == other.ts and self.duration > other.duration
def __ge__(self, other):
if not isinstance(other, Event):
raise TypeError("Can only compare events.")
return self.ts > other.ts or self.ts == other.ts and self.duration >= other.duration
def __ne__(self, other):
if not isinstance(other, Event):
raise TypeError("Can only compare events.")
return self.ts != other.ts or self.duration != other.duration
| 33.565789 | 110 | 0.516921 |
427a7e10f7429a3de0bc85a779d3e69bb978b20e | 104 | py | Python | backend/Django/logproplogsite/logproplogapp/admin.py | emg/proplog | 6728bf2c143f204bb4dfa403c97df36d12c91891 | [
"MIT"
] | null | null | null | backend/Django/logproplogsite/logproplogapp/admin.py | emg/proplog | 6728bf2c143f204bb4dfa403c97df36d12c91891 | [
"MIT"
] | null | null | null | backend/Django/logproplogsite/logproplogapp/admin.py | emg/proplog | 6728bf2c143f204bb4dfa403c97df36d12c91891 | [
"MIT"
] | null | null | null | from django.contrib import admin
from logproplogapp import models
admin.site.register(models.Answer)
| 14.857143 | 34 | 0.826923 |
4228f74af0a26c33592b359d93f78052b2618241 | 435 | py | Python | old_and_simple_and_deprecated/M-UZ_and_M-PROJ.py | mcjczapiewski/work | 2540afa6b18bf6ff92a7c07b16695035785c0dd8 | [
"MIT"
] | null | null | null | old_and_simple_and_deprecated/M-UZ_and_M-PROJ.py | mcjczapiewski/work | 2540afa6b18bf6ff92a7c07b16695035785c0dd8 | [
"MIT"
] | null | null | null | old_and_simple_and_deprecated/M-UZ_and_M-PROJ.py | mcjczapiewski/work | 2540afa6b18bf6ff92a7c07b16695035785c0dd8 | [
"MIT"
] | null | null | null | import os
for subdir, dirs, files in os.walk(r"D:\_MACIEK_\python_proby\seba"):
dirs.sort()
if "merge" in os.listdir(subdir):
if any("M-UZ" in fname for fname in os.listdir(subdir)) and (
any("M-PROJ" in fname for fname in os.listdir(subdir))
or any("M-WPROJ" in fname for fname in os.listdir(subdir))
):
for file in files:
print(os.path.join(subdir, file))
| 36.25 | 70 | 0.590805 |
105c2215acc8c58ec84e6b31e512ab3a118efc48 | 9,293 | py | Python | demos/OptimalPlanning.py | jmainpri/ompl | 6f7445180aa787806055ded249c96e1266ee9a99 | [
"BSD-3-Clause"
] | 2 | 2018-08-10T18:11:35.000Z | 2021-04-06T00:33:13.000Z | demos/OptimalPlanning.py | jmainpri/ompl | 6f7445180aa787806055ded249c96e1266ee9a99 | [
"BSD-3-Clause"
] | null | null | null | demos/OptimalPlanning.py | jmainpri/ompl | 6f7445180aa787806055ded249c96e1266ee9a99 | [
"BSD-3-Clause"
] | 1 | 2018-08-10T18:11:36.000Z | 2018-08-10T18:11:36.000Z | #!/usr/bin/env python
######################################################################
# Software License Agreement (BSD License)
#
# Copyright (c) 2010, Rice University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Rice University nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
######################################################################
# Author: Luis G. Torres, Mark Moll
try:
from ompl import util as ou
from ompl import base as ob
from ompl import geometric as og
except:
# if the ompl module is not in the PYTHONPATH assume it is installed in a
# subdirectory of the parent directory called "py-bindings."
from os.path import abspath, dirname, join
import sys
sys.path.insert(0, join(dirname(dirname(abspath(__file__))),'py-bindings'))
from ompl import util as ou
from ompl import base as ob
from ompl import geometric as og
from math import sqrt
from sys import argv
## @cond IGNORE
# Our "collision checker". For this demo, our robot's state space
# lies in [0,1]x[0,1], with a circular obstacle of radius 0.25
# centered at (0.5,0.5). Any states lying in this circular region are
# considered "in collision".
class ValidityChecker(ob.StateValidityChecker):
def __init__(self, si):
super(ValidityChecker, self).__init__(si)
# Returns whether the given state's position overlaps the
# circular obstacle
def isValid(self, state):
return self.clearance(state) > 0.0
# Returns the distance from the given state's position to the
# boundary of the circular obstacle.
def clearance(self, state):
# Extract the robot's (x,y) position from its state
x = state[0]
y = state[1]
# Distance formula between two points, offset by the circle's
# radius
return sqrt((x-0.5)*(x-0.5) + (y-0.5)*(y-0.5)) - 0.25
## Returns a structure representing the optimization objective to use
# for optimal motion planning. This method returns an objective
# which attempts to minimize the length in configuration space of
# computed paths.
def getPathLengthObjective(si):
return ob.PathLengthOptimizationObjective(si)
## Returns an optimization objective which attempts to minimize path
# length that is satisfied when a path of length shorter than 1.51
# is found.
def getThresholdPathLengthObj(si):
obj = ob.PathLengthOptimizationObjective(si)
obj.setCostThreshold(ob.Cost(1.51))
return obj
## Defines an optimization objective which attempts to steer the
# robot away from obstacles. To formulate this objective as a
# minimization of path cost, we can define the cost of a path as a
# summation of the costs of each of the states along the path, where
# each state cost is a function of that state's clearance from
# obstacles.
#
# The class StateCostIntegralObjective represents objectives as
# summations of state costs, just like we require. All we need to do
# then is inherit from that base class and define our specific state
# cost function by overriding the stateCost() method.
#
class ClearanceObjective(ob.StateCostIntegralObjective):
def __init__(self, si):
super(ClearanceObjective, self).__init__(si, True)
self.si_ = si
# Our requirement is to maximize path clearance from obstacles,
# but we want to represent the objective as a path cost
# minimization. Therefore, we set each state's cost to be the
# reciprocal of its clearance, so that as state clearance
# increases, the state cost decreases.
def stateCost(self, s):
return ob.Cost(1 / self.si_.getStateValidityChecker().clearance(s))
## Return an optimization objective which attempts to steer the robot
# away from obstacles.
def getClearanceObjective(si):
return ClearanceObjective(si)
## Create an optimization objective which attempts to optimize both
# path length and clearance. We do this by defining our individual
# objectives, then adding them to a MultiOptimizationObjective
# object. This results in an optimization objective where path cost
# is equivalent to adding up each of the individual objectives' path
# costs.
#
# When adding objectives, we can also optionally specify each
# objective's weighting factor to signify how important it is in
# optimal planning. If no weight is specified, the weight defaults to
# 1.0.
def getBalancedObjective1(si):
lengthObj = ob.PathLengthOptimizationObjective(si)
clearObj = ClearanceObjective(si)
opt = ob.MultiOptimizationObjective(si)
opt.addObjective(lengthObj, 5.0)
opt.addObjective(clearObj, 1.0)
return opt
## Create an optimization objective equivalent to the one returned by
# getBalancedObjective1(), but use an alternate syntax.
# THIS DOESN'T WORK YET. THE OPERATORS SOMEHOW AREN'T EXPORTED BY Py++.
# def getBalancedObjective2(si):
# lengthObj = ob.PathLengthOptimizationObjective(si)
# clearObj = ClearanceObjective(si)
#
# return 5.0*lengthObj + clearObj
## Create an optimization objective for minimizing path length, and
# specify a cost-to-go heuristic suitable for this optimal planning
# problem.
def getPathLengthObjWithCostToGo(si):
obj = ob.PathLengthOptimizationObjective(si)
obj.setCostToGoHeuristic(ob.CostToGoHeuristic(ob.goalRegionCostToGo))
return obj
def plan(fname = None):
# Construct the robot state space in which we're planning. We're
# planning in [0,1]x[0,1], a subset of R^2.
space = ob.RealVectorStateSpace(2)
# Set the bounds of space to be in [0,1].
space.setBounds(0.0, 1.0)
# Construct a space information instance for this state space
si = ob.SpaceInformation(space)
# Set the object used to check which states in the space are valid
validityChecker = ValidityChecker(si)
si.setStateValidityChecker(validityChecker)
si.setup()
# Set our robot's starting state to be the bottom-left corner of
# the environment, or (0,0).
start = ob.State(space)
start[0] = 0.0
start[1] = 0.0
# Set our robot's goal state to be the top-right corner of the
# environment, or (1,1).
goal = ob.State(space)
goal[0] = 1.0
goal[1] = 1.0
# Create a problem instance
pdef = ob.ProblemDefinition(si)
# Set the start and goal states
pdef.setStartAndGoalStates(start, goal)
# Since we want to find an optimal plan, we need to define what
# is optimal with an OptimizationObjective structure. Un-comment
# exactly one of the following 6 lines to see some examples of
# optimization objectives.
pdef.setOptimizationObjective(getPathLengthObjective(si))
# pdef.setOptimizationObjective(getThresholdPathLengthObj(si))
# pdef.setOptimizationObjective(getClearanceObjective(si))
# pdef.setOptimizationObjective(getBalancedObjective1(si))
# pdef.setOptimizationObjective(getBalancedObjective2(si))
# pdef.setOptimizationObjective(getPathLengthObjWithCostToGo(si))
# Construct our optimal planner using the RRTstar algorithm.
optimizingPlanner = og.RRTstar(si)
# Set the problem instance for our planner to solve
optimizingPlanner.setProblemDefinition(pdef)
optimizingPlanner.setup()
# attempt to solve the planning problem within one second of
# planning time
solved = optimizingPlanner.solve(10.0)
if solved:
# Output the length of the path found
print("Found solution of path length %g" % pdef.getSolutionPath().length())
# If a filename was specified, output the path as a matrix to
# that file for visualization
if fname:
with open(fname,'w') as outFile:
outFile.write(pdef.getSolutionPath().printAsMatrix())
else:
print("No solution found.")
if __name__ == "__main__":
fname = None if len(argv)<2 else argv[1]
plan(fname)
## @endcond
| 38.882845 | 83 | 0.717637 |
f9f8bf014882ae3e8a8d7f630ebed6c1d0978880 | 3,550 | py | Python | admin/desk/views.py | hmoco/osf.io | a02869f9b5c198bafae7cea0c216674bbcba62f7 | [
"Apache-2.0"
] | 1 | 2015-10-02T18:35:53.000Z | 2015-10-02T18:35:53.000Z | admin/desk/views.py | hmoco/osf.io | a02869f9b5c198bafae7cea0c216674bbcba62f7 | [
"Apache-2.0"
] | 4 | 2016-05-13T14:24:16.000Z | 2017-03-30T15:28:31.000Z | admin/desk/views.py | hmoco/osf.io | a02869f9b5c198bafae7cea0c216674bbcba62f7 | [
"Apache-2.0"
] | null | null | null | from django.shortcuts import render
from django.views.generic import ListView, DetailView
from django.contrib.auth.mixins import PermissionRequiredMixin
from osf.models.user import OSFUser
from admin.desk.utils import DeskClient, DeskError, DeskCustomerNotFound
class DeskCaseList(PermissionRequiredMixin, ListView):
template_name = 'desk/cases.html'
ordering = 'updated_at'
context_object_name = 'cases'
paginate_by = 100
paginate_orphans = 5
permission_required = 'osf.view_desk'
raise_exception = True
def dispatch(self, request, *args, **kwargs):
try:
return super(DeskCaseList, self).dispatch(request, *args, **kwargs)
except DeskError as e:
return render(request, 'desk/desk_error.html',
context={
'error': e.message,
'status': e.status_code,
'content': e.content,
},
status=e.status_code
)
def get_queryset(self):
customer_id = self.kwargs.get('user_id', None)
customer = OSFUser.load(customer_id)
email = customer.emails[0]
desk = DeskClient(self.request.user)
params = {
'email': email,
}
queryset = desk.cases(params)
return queryset
def get_context_data(self, **kwargs):
kwargs.setdefault('user_id', self.kwargs.get('user_id'))
kwargs.setdefault('desk_case', 'https://{}.desk.com/web/agent/case/'.format(DeskClient.SITE_NAME))
kwargs.setdefault('desk_customer', 'https://{}.desk.com/web/agent/customer/'.format(DeskClient.SITE_NAME))
return super(DeskCaseList, self).get_context_data(**kwargs)
class DeskCustomer(PermissionRequiredMixin, DetailView):
template_name = 'desk/customer.html'
context_object_name = 'customer'
permission_required = 'osf.view_desk'
raise_exception = True
def dispatch(self, request, *args, **kwargs):
try:
return super(DeskCustomer, self).dispatch(request, *args, **kwargs)
except DeskCustomerNotFound as e:
return render(request, 'desk/user_not_found.html',
context={
'message': e.message,
'desk_inbox': 'https://{}.desk.com/web/agent/filters/inbox'.format(DeskClient.SITE_NAME)
},
status=404
)
except DeskError as e:
return render(request, 'desk/desk_error.html',
context={
'error': e.message,
'status': e.status_code,
'content': e.content,
},
status=e.status_code
)
def get_object(self, queryset=None):
customer_id = self.kwargs.get('user_id', None)
customer = OSFUser.load(customer_id)
email = customer.emails[0]
desk = DeskClient(self.request.user)
params = {'email': email}
customer = desk.find_customer(params)
return customer
def get_context_data(self, **kwargs):
kwargs.setdefault('user_id', self.kwargs.get('user_id'))
kwargs.setdefault('desk_link', 'https://{}.desk.com/web/agent/customer/'.format(DeskClient.SITE_NAME))
return super(DeskCustomer, self).get_context_data(**kwargs)
| 39.444444 | 118 | 0.574366 |
cde8c080a02c6726af83f2095f552e89c4762d27 | 2,629 | py | Python | assignments/10_exception_handling/10-4_monthly_sales/commands.py | MrDDaye/cna_cp1855 | 5b83f5877d373e9499e379ba93b04c394c13db5f | [
"MIT"
] | null | null | null | assignments/10_exception_handling/10-4_monthly_sales/commands.py | MrDDaye/cna_cp1855 | 5b83f5877d373e9499e379ba93b04c394c13db5f | [
"MIT"
] | null | null | null | assignments/10_exception_handling/10-4_monthly_sales/commands.py | MrDDaye/cna_cp1855 | 5b83f5877d373e9499e379ba93b04c394c13db5f | [
"MIT"
] | null | null | null | """Monthly sales program command functions."""
from sys import exit
from csv import reader, writer
def read_sales_file(file: str) -> None:
"""Read the monthly sales file from configuration."""
try:
with open(file, encoding='utf-8') as monthly_sales_file:
csv = reader(monthly_sales_file)
return [row for row in csv]
except FileNotFoundError:
print('Monthly Sales file not found!')
print('Exiting program. Bye!')
exit()
def view_monthly_sales(sales: list[list[str]]) -> None:
"""Display monthly sales to console."""
for row in sales:
print(f'{row[0]} - {row[-1]}')
def view_yearly_summary(sales: list[list[str]]) -> None:
"""Calculate and display yearly sales summary."""
yearly_total: int = calculate_yearly_total(sales)
print(f'Yearly total: {yearly_total}')
monthly_average: float = calculate_monthly_average(yearly_total, sales)
print(f'Monthly average: {round(monthly_average, 2)}')
def calculate_yearly_total(sales: list[list[str]]) -> int:
"""Calculate the total yearly sales."""
total: int = 0
for row in sales:
total += _convert_monthly_sales_to_int(row)
return total
def _convert_monthly_sales_to_int(row: list[str]) -> int:
"""Convert sales value of row to int or 0 if invalid format."""
try:
return int(row[-1])
except ValueError:
print(f'Using sales amount of 0 for {row[0]}.')
return 0
def calculate_monthly_average(total: int, sales: list[list[str]]) -> float:
"""Calculate the monthly sale average."""
return total / len(sales)
def edit_month(sales: list[list[str]], file: str) -> None:
"""Edit the month designated by the three letter month abbreviation."""
try:
month_abbreviation: str = input('Three-letter Month: ')
for row in sales:
if row[0] == month_abbreviation.capitalize():
new_amount: str = input('Sales Amount: ')
row[-1] = new_amount
_save_to_csv(file, sales)
print(f'Sales amount for {row[0]} was modified.')
return
raise NameError('Not a valid month abbreviation')
except NameError as exception:
print(exception)
return edit_month(sales)
def _save_to_csv(file: str, sales: list[list[str]]) -> None:
"""Save sales data to target file from configuration."""
with open(file, 'w', newline='', encoding='utf-8') as monthly_sales_file:
csv = writer(monthly_sales_file)
csv.writerows(sales) | 38.661765 | 78 | 0.627235 |
2137a8f131ba9131c811a82159f90ea8f807edc5 | 1,826 | py | Python | multiview/coherence.py | zhongerqiandan/OpenDialog | f478b2a912c8c742da5ced510ac40da59217ddb3 | [
"MIT"
] | 98 | 2020-07-16T06:27:29.000Z | 2022-03-12T15:21:51.000Z | multiview/coherence.py | zhongerqiandan/OpenDialog | f478b2a912c8c742da5ced510ac40da59217ddb3 | [
"MIT"
] | 2 | 2020-07-22T12:00:17.000Z | 2021-02-24T01:19:14.000Z | multiview/coherence.py | gmftbyGMFTBY/OpenDialog | 8eb56b7a21cea1172131db7a56d2656364144771 | [
"MIT"
] | 19 | 2020-07-16T08:36:09.000Z | 2021-09-14T05:36:54.000Z | from .header import *
'''
COHERENCE(BERT-RUBER) 指标本质上就是一个检索模型
TODO:
1. 直接加载BERT检索模型作为BERT-RUBER
2. 使用NLI迁移后训练的BERT检索模型
3. 人工相似度评价,之后如果结果理想,可以直接使用BERT-RUBER做后期迭代的依据
'''
class COHERENCE(RetrievalBaseAgent):
'''
do not train it, just inference for scoring
'''
def __init__(self):
super(COHERENCE, self).__init__(searcher=False)
self.vocab = BertTokenizer(vocab_file='data/vocab/vocab_small')
self.model = BERTRetrieval()
self.pad = 0
if torch.cuda.is_available():
self.model.cuda()
def reload_model(self, state_dict):
self.model.load_state_dict(state_dict)
print(f'[!] reload the coherence model parameters')
@torch.no_grad()
def scores(self, msgs, resps):
'''
msgs: {context}[SEP]{response}, a batch of the pair of context and response
'''
msgs = [f'{m} [SEP] {r}' for m, r in zip(msgs, resps)]
ids = [torch.LongTensor(self.vocab.encode(i)[-300:]) for i in msgs]
ids = pad_sequence(ids, batch_first=True, padding_value=self.pad)
if torch.cuda.is_available():
ids = ids.cuda() # [batch, seq]
output = self.model(ids)
output = F.softmax(output, dim=-1)[:, 1] # [batch] gather the positive scores
output = output.cpu().tolist()
return output # [batch]
@torch.no_grad()
def scores_(self, cid, rid):
ipdb.set_trace()
cid = torch.cat((cid, rid), dim=1) # [batch, seq]
output = self.model(cid)
output = F.softmax(output, dim=-1)[:, 1]
output = output.cpu().tolist()
return output
if __name__ == "__main__":
model = COHERENCE()
# run this in the root path: python -m multiview.coherence
model.load_model('ckpt/zh50w/bertretrieval/best.pt')
| 32.035088 | 88 | 0.610624 |
d8cb571021b775ef52011d4f7a01b45a2e69a2a9 | 3,461 | py | Python | sidhulabs/elastic/documents.py | sidhulabs/sidhulabs-py | f3b6cc6dbd12dce3179bf8b129cfc002bc366d83 | [
"Apache-2.0"
] | null | null | null | sidhulabs/elastic/documents.py | sidhulabs/sidhulabs-py | f3b6cc6dbd12dce3179bf8b129cfc002bc366d83 | [
"Apache-2.0"
] | null | null | null | sidhulabs/elastic/documents.py | sidhulabs/sidhulabs-py | f3b6cc6dbd12dce3179bf8b129cfc002bc366d83 | [
"Apache-2.0"
] | null | null | null | from __future__ import annotations
from collections import deque
from typing import Any, Dict, Iterable, List
from elasticsearch import Elasticsearch
from elasticsearch.helpers import parallel_bulk, scan
def get_all(es_client: Elasticsearch, index: str, **kwargs) -> Iterable[Dict[str, Any]]:
"""
Get all documents from an index.
Parameters
----------
es_client : Elasticsearch
Elasticsearch client.
index : str
Elasticsearch index
**kwargs:
Keyword arguments to pass to `scan` from elasticsearch.helpers.
Returns
-------
List[Dict[str, Any]]
List of documents.
Examples
--------
>>> from sidhulabs.elastic.documents import get_all
>>> es_client = get_elastic_client("https://elastic.sidhulabs.ca:443")
>>> get_all(es_client, "test-index")
"""
return scan(es_client, index=index, **kwargs)
def insert(es_client: Elasticsearch, index: str, docs: Dict[str, Any] | List[Dict[str, Any]], **kwargs) -> Any:
"""
Inserts documents into an index.
If passing in a List of documents, kwargs gets passed into `parallel_bulk` from elasticsearch.helpers.
If passing in a a single document, kwargs gets passed into `index` from the elasticsearch client.
Parameters
----------
es_client : Elasticsearch
Elasticsearch client.
index : str
Index name.
docs : Dict[str, Any] or List[Dict[str, Any]]
List of documents to insert.
**kwargs
Keyword arguments.
Examples
--------
>>> from sidhulabs.elastic.documents import insert
>>> es_client = get_elastic_client("https://elastic.sidhulabs.ca:443")
>>> insert(es_client, "test-index", {"id": 1, "name": "John Doe"})
>>> insert(es_client, "test-index", [{"id": 1, "name": "John Doe"}, {"id": 2, "name": "Jane Doe"}])
"""
if isinstance(docs, list):
resp = deque(parallel_bulk(client=es_client, actions=docs, index=index, **kwargs), maxlen=0)
elif isinstance(docs, dict):
resp = es_client.index(index=index, body=docs, **kwargs)
else:
raise ValueError("`docs` must be a list or a dict.")
return resp
def delete(es_client: Elasticsearch, index: str, doc_ids: List[str | int] | str | int, **kwargs) -> Any:
"""
Deletes a document from an index.
Kwargs gets passed into the delete function of the Elasticsearch client.
Parameters
----------
es_client : Elasticsearch
Elasticsearch client.
index : str
Index name.
doc_id : str or int
Document ID.
doc : Dict[str, Any]
Document to delete.
**kwargs
Keyword arguments to pass to delete function of Elasticsearch client.
Examples
--------
>>> from sidhulabs.elastic.documents import delete
>>> es_client = get_elastic_client("https://elastic.sidhulabs.ca:443")
>>> delete(es_client=es_client, index="test-index", doc_ids=1)
>>> delete(es_client=es_client, index="test-index", doc_ids=[1, 2])
"""
if isinstance(doc_ids, list):
actions = [{"_op_type": "delete", "_id": doc_id} for doc_id in doc_ids]
resp = deque(parallel_bulk(client=es_client, actions=actions, index=index, **kwargs), maxlen=0)
elif isinstance(doc_ids, (str, int)):
resp = es_client.delete(index=index, id=doc_ids, **kwargs)
else:
raise ValueError("`doc_ids` must be a list, string, or integer.")
return resp
| 31.18018 | 111 | 0.6423 |
8fe6a7e3806b3106b3876702bcf495323f32ce40 | 2,566 | py | Python | docker_registry/lib/xtarfile.py | mfojtik/openshift-docker-registry | dea0fbe1ab3b279d98bc3c5fcfdafa14976287a3 | [
"Apache-2.0"
] | 2 | 2015-01-20T09:47:15.000Z | 2015-11-05T23:51:51.000Z | docker_registry/lib/xtarfile.py | mfojtik/openshift-docker-registry | dea0fbe1ab3b279d98bc3c5fcfdafa14976287a3 | [
"Apache-2.0"
] | null | null | null | docker_registry/lib/xtarfile.py | mfojtik/openshift-docker-registry | dea0fbe1ab3b279d98bc3c5fcfdafa14976287a3 | [
"Apache-2.0"
] | null | null | null | '''
This is __proc_pax from ./Lib/tarfile.py from v2.7.6
catching raw (non-utf8) bytes to support some xattr headers in tar archives
This is for the use-case of reading the tar archive, not for the use case of
interacting with inodes on the filesystem that have xattr's.
-- vbatts
'''
import re
import tarfile
def _proc_pax(self, filetar):
"""Process an extended or global header as described in
POSIX.1-2001.
"""
# Read the header information.
buf = filetar.fileobj.read(self._block(self.size))
# A pax header stores supplemental information for either
# the following file (extended) or all following files
# (global).
if self.type == tarfile.XGLTYPE:
pax_headers = filetar.pax_headers
else:
pax_headers = filetar.pax_headers.copy()
# Parse pax header information. A record looks like that:
# "%d %s=%s\n" % (length, keyword, value). length is the size
# of the complete record including the length field itself and
# the newline. keyword and value are both UTF-8 encoded strings.
regex = re.compile(r"(\d+) ([^=]+)=", re.U)
pos = 0
while True:
match = regex.match(buf, pos)
if not match:
break
length, keyword = match.groups()
length = int(length)
value = buf[match.end(2) + 1:match.start(1) + length - 1]
try:
keyword = keyword.decode("utf8")
except Exception:
# just leave the raw bytes
pass
try:
value = value.decode("utf8")
except Exception:
# just leave the raw bytes
pass
pax_headers[keyword] = value
pos += length
# Fetch the next header.
try:
next = self.fromtarfile(filetar)
except tarfile.HeaderError:
raise tarfile.SubsequentHeaderError("missing or bad subsequent header")
if self.type in (tarfile.XHDTYPE, tarfile.SOLARIS_XHDTYPE):
# Patch the TarInfo object with the extended header info.
next._apply_pax_info(pax_headers, filetar.encoding, filetar.errors)
next.offset = self.offset
if "size" in pax_headers:
# If the extended header replaces the size field,
# we need to recalculate the offset where the next
# header starts.
offset = next.offset_data
if next.isreg() or next.type not in tarfile.SUPPORTED_TYPES:
offset += next._block(next.size)
filetar.offset = offset
return next
tarfile.TarInfo._proc_pax = _proc_pax
| 31.292683 | 79 | 0.630553 |
0ac383bd385f6d293af5aa11dd72965ba9a21a64 | 9,087 | py | Python | var/spack/repos/builtin/packages/slepc/package.py | NeuralEnsemble/spack | bed3a694985466544aa16a19af0f0a13221b51a9 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/slepc/package.py | NeuralEnsemble/spack | bed3a694985466544aa16a19af0f0a13221b51a9 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 6 | 2022-01-08T08:41:11.000Z | 2022-03-14T19:28:07.000Z | var/spack/repos/builtin/packages/slepc/package.py | foeroyingur/spack | 5300cbbb2e569190015c72d0970d25425ea38647 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import sys
from spack import *
class Slepc(Package, CudaPackage, ROCmPackage):
"""Scalable Library for Eigenvalue Problem Computations."""
homepage = "https://slepc.upv.es"
url = "https://slepc.upv.es/download/distrib/slepc-3.16.1.tar.gz"
git = "https://gitlab.com/slepc/slepc.git"
maintainers = ['joseeroman', 'balay']
tags = ['e4s']
test_requires_compiler = True
version('main', branch='main')
version('3.16.1', sha256='b1a8ad8db1ad88c60616e661ab48fc235d5a8b6965023cb6d691b9a2cfa94efb')
version('3.16.0', sha256='be7292b85430e52210eb389c4f434b67164e96d19498585e82d117e850d477f4')
version('3.15.2', sha256='15fd317c4dd07bb41a994ad4c27271a6675af5f2abe40b82a64a27eaae2e632a')
version('3.15.1', sha256='9c7c3a45f0d9df51decf357abe090ef05114c38a69b7836386a19a96fb203aea')
version('3.15.0', sha256='e53783ae13acadce274ea65c67186b5ab12332cf17125a694e21d598aa6b5f00')
version('3.14.2', sha256='3e54578dda1f4c54d35ac27d02f70a43f6837906cb7604dbcec0e033cfb264c8')
version('3.14.1', sha256='cc78a15e34d26b3e6dde003d4a30064e595225f6185c1975bbd460cb5edd99c7')
version('3.14.0', sha256='37f8bb270169d1d3f5d43756ac8929d56204e596bd7a78a7daff707513472e46')
version('3.13.4', sha256='ddc9d58e1a4413218f4e67ea3b255b330bd389d67f394403a27caedf45afa496')
version('3.13.3', sha256='23d179c22b4b2f22d29fa0ac0a62f5355a964d3bc245a667e9332347c5aa8f81')
version('3.13.2', sha256='04cb8306cb5d4d990509710d7f8ae949bdc2c7eb850930b8d0b0b5ca99f6c70d')
version('3.13.1', sha256='f4a5ede4ebdee5e15153ce31c1421209c7b794bd94be1430018615fb0838b879')
version('3.13.0', sha256='f1f3c2d13a1a6914e7bf4746d38761e107ea866f50927b639e4ad5918dd1e53b')
version('3.12.2', sha256='a586ce572a928ed87f04961850992a9b8e741677397cbaa3fb028323eddf4598')
version('3.12.1', sha256='a1cc2e93a81c9f6b86abd81022c9d64b0dc2161e77fb54b987f963bc292e286d')
version('3.12.0', sha256='872831d961cf76389fafb7553231ae1a6676555850c98ea0e893c06f596b2e9e')
version('3.11.2', sha256='cd6a73ac0c9f689c12f2987000a7a28fa7df53fdc069fb59a2bb148699e741dd')
version('3.11.1', sha256='4816070d4ecfeea6212c6944cee22dc7b4763df1eaf6ab7847cc5ac5132608fb')
version('3.11.0', sha256='bf29043c311fe2c549a25e2b0835095723a3eebc1dff288a233b32913b5762a2')
version('3.10.2', sha256='0594972293f6586458a54b7c1e1121b311a9c9449060355d52bb3bf09ad6812b', deprecated=True)
version('3.10.1', sha256='f64787c8c2ab3d2f6db3c67d2bfe6ee84f741ce3dfde1d2f8221e131820a12a1', deprecated=True)
version('3.10.0', sha256='069d7a579995e0be1567c5bc869251e29c00044369a786933ca3040149d0412a', deprecated=True)
version('3.9.2', sha256='247585b3f8c10bf50b9464cb8ef7b5f22bead6f96524384897a37ec4146eb03e', deprecated=True)
version('3.9.1', sha256='e174ea7c127d9161eef976b0288f0c56d443a58d6ab2dc8af1e8bd66f156ce17', deprecated=True)
version('3.9.0', sha256='1f3930db56b4065aaf214ea758ddff1a70bf19d45544cbdfd19d2787db4bfe0b', deprecated=True)
version('3.8.2', sha256='1e7d20d20eb26da307d36017461fe4a55f40e947e232739179dbe6412e22ed13', deprecated=True)
version('3.8.0', sha256='c58ccc4e852d1da01112466c48efa41f0839649f3a265925788237d76cd3d963', deprecated=True)
version('3.7.4', sha256='2fb782844e3bc265a8d181c3c3e2632a4ca073111c874c654f1365d33ca2eb8a', deprecated=True)
version('3.7.3', sha256='3ef9bcc645a10c1779d56b3500472ceb66df692e389d635087d30e7c46424df9', deprecated=True)
version('3.7.1', sha256='670216f263e3074b21e0623c01bc0f562fdc0bffcd7bd42dd5d8edbe73a532c2', deprecated=True)
version('3.6.3', sha256='384939d009546db37bc05ed81260c8b5ba451093bf891391d32eb7109ccff876', deprecated=True)
version('3.6.2', sha256='2ab4311bed26ccf7771818665991b2ea3a9b15f97e29fd13911ab1293e8e65df', deprecated=True)
variant('arpack', default=True, description='Enables Arpack wrappers')
variant('blopex', default=False, description='Enables BLOPEX wrappers')
# NOTE: make sure PETSc and SLEPc use the same python.
depends_on('python@2.6:2.8', type='build', when='@:3.10')
depends_on('python@2.6:2.8,3.4:', type='build', when='@3.11:')
# Cannot mix release and development versions of SLEPc and PETSc:
depends_on('petsc@main', when='@main')
depends_on('petsc@3.16.0:3.16', when='@3.16.0:3.16')
depends_on('petsc@3.15.0:3.15', when='@3.15.0:3.15')
depends_on('petsc@3.14.0:3.14', when='@3.14.0:3.14')
depends_on('petsc@3.13.0:3.13', when='@3.13.0:3.13')
depends_on('petsc@3.12.0:3.12', when='@3.12.0:3.12')
depends_on('petsc@3.11.0:3.11', when='@3.11.0:3.11')
depends_on('petsc@3.10.0:3.10', when='@3.10.0:3.10')
depends_on('petsc@3.9.0:3.9', when='@3.9.0:3.9')
depends_on('petsc@3.8.0:3.8', when='@3.8.0:3.8')
depends_on('petsc@3.7:3.7.7', when='@3.7.1:3.7.4')
depends_on('petsc@3.6.3:3.6.4', when='@3.6.2:3.6.3')
depends_on('petsc+cuda', when='+cuda')
depends_on('petsc+rocm', when='+rocm')
depends_on('arpack-ng~mpi', when='+arpack^petsc~mpi~int64')
depends_on('arpack-ng+mpi', when='+arpack^petsc+mpi~int64')
patch('install_name_371.patch', when='@3.7.1')
# Arpack can not be used with 64bit integers.
conflicts('+arpack', when='@:3.12 ^petsc+int64')
conflicts('+blopex', when='^petsc+int64')
resource(name='blopex',
url='https://slepc.upv.es/download/external/blopex-1.1.2.tar.gz',
sha256='0081ee4c4242e635a8113b32f655910ada057c59043f29af4b613508a762f3ac',
destination=join_path('installed-arch-' + sys.platform + '-c-opt',
'externalpackages'),
when='@:3.12+blopex')
resource(name='blopex',
git='https://github.com/lobpcg/blopex',
commit='6eba31f0e071f134a6e4be8eccfb8d9d7bdd5ac7',
destination=join_path('installed-arch-' + sys.platform + '-c-opt',
'externalpackages'),
when='@3.13.0:+blopex')
def install(self, spec, prefix):
# set SLEPC_DIR for installation
# Note that one should set the current (temporary) directory instead
# its symlink in spack/stage/ !
os.environ['SLEPC_DIR'] = os.getcwd()
if self.spec.satisfies('%cce'):
filter_file(' flags = l',
' flags = l\n flags += ["-fuse-ld=gold"]',
'config/package.py')
options = []
if '+arpack' in spec:
options.extend([
'--with-arpack-dir=%s' % spec['arpack-ng'].prefix,
])
if spec.satisfies('@:3.12'):
arpackopt = '--with-arpack-flags'
else:
arpackopt = '--with-arpack-lib'
if 'arpack-ng~mpi' in spec:
options.extend([
arpackopt + '=-larpack'
])
else:
options.extend([
arpackopt + '=-lparpack,-larpack'
])
# It isn't possible to install BLOPEX separately and link to it;
# BLOPEX has to be downloaded with SLEPc at configure time
if '+blopex' in spec:
options.append('--download-blopex')
python('configure', '--prefix=%s' % prefix, *options)
make('MAKE_NP=%s' % make_jobs, parallel=False)
if self.run_tests:
make('test', parallel=False)
make('install', parallel=False)
def setup_run_environment(self, env):
# set SLEPC_DIR & PETSC_DIR in the module file
env.set('SLEPC_DIR', self.prefix)
env.set('PETSC_DIR', self.spec['petsc'].prefix)
def setup_dependent_build_environment(self, env, dependent_spec):
# Set up SLEPC_DIR for dependent packages built with SLEPc
env.set('SLEPC_DIR', self.prefix)
def run_hello_test(self):
"""Run stand alone test: hello"""
test_dir = self.test_suite.current_test_data_dir
if not os.path.exists(test_dir):
print('Skipping slepc test')
return
exe = 'hello'
cc_exe = os.environ['CC']
self.run_test(exe=cc_exe,
options=['-I{0}'.format(self.prefix.include),
'-L', self.prefix.lib, '-l', 'slepc',
'-L', self.spec['petsc'].prefix.lib, '-l', 'petsc',
'-L', self.spec['mpi'].prefix.lib, '-l', 'mpi',
'-o', exe, join_path(test_dir, 'hello.c')],
purpose='test: compile {0} example'.format(exe),
work_dir=test_dir)
self.run_test(exe=exe,
options=[],
expected=['Hello world'],
purpose='test: run {0} example'.format(exe),
work_dir=test_dir)
def test(self):
self.run_hello_test()
| 49.655738 | 113 | 0.659514 |
2a2af6acec5efe3260d2341fc03ce4ce20d8b2e1 | 32,050 | py | Python | Lib/yp_test/test_iter.py | Syeberman/nohtyP | 59d7214a5a5474a03c54f45d79ad4fd037989a79 | [
"CNRI-Python-GPL-Compatible"
] | null | null | null | Lib/yp_test/test_iter.py | Syeberman/nohtyP | 59d7214a5a5474a03c54f45d79ad4fd037989a79 | [
"CNRI-Python-GPL-Compatible"
] | null | null | null | Lib/yp_test/test_iter.py | Syeberman/nohtyP | 59d7214a5a5474a03c54f45d79ad4fd037989a79 | [
"CNRI-Python-GPL-Compatible"
] | null | null | null | # Test iterators.
from yp import *
from yp_test import yp_unittest
from yp_test.support import run_unittest, TESTFN, unlink, cpython_only
import pickle
import collections.abc
# Extra assurance that we're not accidentally testing Python's data types
def iter( *args, **kwargs ): raise NotImplementedError( "convert script to yp_iter here" )
def bytes( *args, **kwargs ): raise NotImplementedError( "convert script to yp_bytes here" )
def bytearray( *args, **kwargs ): raise NotImplementedError( "convert script to yp_bytearray here" )
def str( *args, **kwargs ): raise NotImplementedError( "convert script to yp_str here" )
def tuple( *args, **kwargs ): raise NotImplementedError( "convert script to yp_tuple here" )
def list( *args, **kwargs ): raise NotImplementedError( "convert script to yp_list here" )
def frozenset( *args, **kwargs ): raise NotImplementedError( "convert script to yp_frozenset here" )
def set( *args, **kwargs ): raise NotImplementedError( "convert script to yp_set here" )
def dict( *args, **kwargs ): raise NotImplementedError( "convert script to yp_dict here" )
# TODO same for yp_range, yp_min, yp_max, etc
# TODO yp_iter(x) throws TypeError if x not a ypObject
# Test result of triple loop (too big to inline)
TRIPLETS = yp_list(
[(0, 0, 0), (0, 0, 1), (0, 0, 2),
(0, 1, 0), (0, 1, 1), (0, 1, 2),
(0, 2, 0), (0, 2, 1), (0, 2, 2),
(1, 0, 0), (1, 0, 1), (1, 0, 2),
(1, 1, 0), (1, 1, 1), (1, 1, 2),
(1, 2, 0), (1, 2, 1), (1, 2, 2),
(2, 0, 0), (2, 0, 1), (2, 0, 2),
(2, 1, 0), (2, 1, 1), (2, 1, 2),
(2, 2, 0), (2, 2, 1), (2, 2, 2)])
# Helper classes
class BasicIterClass:
def __init__(self, n):
self.n = yp_int(n)
self.i = yp_int(0)
def __next__(self):
res = self.i
if res >= self.n:
raise StopIteration
self.i = res + 1
return res
def __iter__(self):
return self
class IteratingSequenceClass:
def __init__(self, n):
self.n = yp_int(n)
def __iter__(self):
return BasicIterClass(self.n)
class SequenceClass:
def __init__(self, n):
self.n = yp_int(n)
def __getitem__(self, i):
if 0 <= i < self.n:
return yp_int(i)
else:
raise IndexError
# Main test suite
class TestCase(yp_unittest.TestCase):
# Helper to check that an iterator returns a given sequence
def check_iterator(self, it, seq, pickle=True):
if pickle:
self.check_pickle(it, seq)
res = yp_list()
while 1:
try:
val = next(it)
except StopIteration:
break
res.append(val)
self.assertEqual(res, seq)
# Helper to check that a for loop generates a given sequence
def check_for_loop(self, expr, seq, pickle=True):
if pickle:
self.check_pickle(yp_iter(expr), seq)
res = yp_list()
for val in expr:
res.append(val)
self.assertEqual(res, seq)
# Helper to check picklability
@yp_unittest.skip_pickling
def check_pickle(self, itorg, seq):
d = pickle.dumps(itorg)
it = pickle.loads(d)
# Cannot assert type equality because dict iterators unpickle as list
# iterators.
# self.assertEqual(type(itorg), type(it))
self.assertTrue(isinstance(it, collections.abc.Iterator))
self.assertEqual(yp_list(it), seq)
it = pickle.loads(d)
try:
next(it)
except StopIteration:
return
d = pickle.dumps(it)
it = pickle.loads(d)
self.assertEqual(yp_list(it), seq[1:])
# Test basic use of iter() function
def test_iter_basic(self):
self.check_iterator(yp_iter(range(10)), yp_list(range(10)))
# Test that iter(iter(x)) is the same as iter(x)
def test_iter_idempotency(self):
seq = yp_list(range(10))
it = yp_iter(seq)
it2 = yp_iter(it)
self.assertIs(it, it2)
# Test that for loops over iterators work
def test_iter_for_loop(self):
self.check_for_loop(yp_iter(range(10)), yp_list(range(10)))
# Test several independent iterators over the same list
def test_iter_independence(self):
seq = range(3)
res = yp_list()
for i in yp_iter(seq):
for j in yp_iter(seq):
for k in yp_iter(seq):
res.append(yp_tuple((i, j, k)))
self.assertEqual(res, TRIPLETS)
# Test triple list comprehension using iterators
def test_nested_comprehensions_iter(self):
seq = range(3)
res = yp_list([yp_tuple((i, j, k))
for i in yp_iter(seq) for j in yp_iter(seq) for k in yp_iter(seq)])
self.assertEqual(res, TRIPLETS)
# Test triple list comprehension without iterators
def test_nested_comprehensions_for(self):
seq = range(3)
res = yp_list([yp_tuple((i, j, k)) for i in seq for j in seq for k in seq])
self.assertEqual(res, TRIPLETS)
# Test a class with __iter__ in a for loop
def test_iter_class_for(self):
self.check_for_loop(IteratingSequenceClass(10), yp_list(range(10)))
# Test a class with __iter__ with explicit iter()
def test_iter_class_iter(self):
self.check_iterator(yp_iter(IteratingSequenceClass(10)), yp_list(range(10)))
# Test for loop on a sequence class without __iter__
def test_seq_class_for(self):
self.check_for_loop(SequenceClass(10), yp_list(range(10)))
# Test iter() on a sequence class without __iter__
def test_seq_class_iter(self):
self.check_iterator(yp_iter(SequenceClass(10)), yp_list(range(10)))
# Test a new_style class with __iter__ but no next() method
def test_new_style_iter_class(self):
class IterClass(object):
def __iter__(self):
return self
self.assertRaises(TypeError, yp_iter, IterClass())
# Test two-argument iter() with callable instance
def test_iter_callable(self):
class C:
def __init__(self):
self.i = 0
def __call__(self):
i = self.i
self.i = i + 1
if i > 100:
raise IndexError # Emergency stop
return i
self.check_iterator(yp_iter(C(), 10), yp_list(range(10)), pickle=False)
# Test two-argument iter() with function
def test_iter_function(self):
def spam(state=[0]):
i = state[0]
state[0] = i+1
return i
self.check_iterator(yp_iter(spam, 10), yp_list(range(10)), pickle=False)
# Test two-argument iter() with function that raises StopIteration
def test_iter_function_stop(self):
def spam(state=[0]):
i = state[0]
if i == 10:
raise StopIteration
state[0] = i+1
return i
self.check_iterator(yp_iter(spam, 20), yp_list(range(10)), pickle=False)
# Test exception propagation through function iterator
def test_exception_function(self):
def spam(state=[0]):
i = state[0]
state[0] = i+1
if i == 10:
raise RuntimeError
return i
res = yp_list()
try:
for x in yp_iter(spam, 20):
res.append(x)
except RuntimeError:
self.assertEqual(res, yp_list(range(10)))
else:
self.fail("should have raised RuntimeError")
# Test exception propagation through sequence iterator
def test_exception_sequence(self):
class MySequenceClass(SequenceClass):
def __getitem__(self, i):
if i == 10:
raise RuntimeError
return SequenceClass.__getitem__(self, i)
res = yp_list()
try:
for x in MySequenceClass(20):
res.append(x)
except RuntimeError:
self.assertEqual(res, yp_list(range(10)))
else:
self.fail("should have raised RuntimeError")
# Test for StopIteration from __getitem__
def test_stop_sequence(self):
class MySequenceClass(SequenceClass):
def __getitem__(self, i):
if i == 10:
raise StopIteration
return SequenceClass.__getitem__(self, i)
self.check_for_loop(MySequenceClass(20), yp_list(range(10)), pickle=False)
# Test a big range
def test_iter_big_range(self):
self.check_for_loop(yp_iter(range(10000)), yp_list(range(10000)))
# Test an empty list
def test_iter_empty(self):
self.check_for_loop(yp_iter(yp_list()), [])
# Test a tuple
def test_iter_tuple(self):
self.check_for_loop(yp_iter(yp_tuple((0,1,2,3,4,5,6,7,8,9))), yp_list(range(10)))
# Test a range
def test_iter_range(self):
self.check_for_loop(yp_iter(range(10)), yp_list(range(10)))
# Test a string
def test_iter_string(self):
self.check_for_loop(yp_iter(yp_str("abcde")), ["a", "b", "c", "d", "e"])
# Test a directory
def test_iter_dict(self):
dict = yp_dict()
for i in range(10):
dict[i] = None
self.check_for_loop(dict, yp_list(dict.keys()))
# Test a file
@yp_unittest.skip_files
def test_iter_file(self):
f = open(TESTFN, "w")
try:
for i in range(5):
f.write("%d\n" % i)
finally:
f.close()
f = open(TESTFN, "r")
try:
self.check_for_loop(f, ["0\n", "1\n", "2\n", "3\n", "4\n"], pickle=False)
self.check_for_loop(f, [], pickle=False)
finally:
f.close()
try:
unlink(TESTFN)
except OSError:
pass
# Test list()'s use of iterators.
def test_builtin_list(self):
self.assertEqual(yp_list(SequenceClass(5)), yp_list(range(5)))
self.assertEqual(yp_list(SequenceClass(0)), [])
self.assertEqual(yp_list(yp_tuple()), [])
d = yp_dict({"one": 1, "two": 2, "three": 3})
self.assertEqual(yp_list(d), yp_list(d.keys()))
self.assertRaises(TypeError, yp_list, yp_list)
self.assertRaises(TypeError, yp_list, 42)
f = open(TESTFN, "w")
try:
for i in range(5):
f.write("%d\n" % i)
finally:
f.close()
f = open(TESTFN, "r")
try:
self.assertEqual(yp_list(f), ["0\n", "1\n", "2\n", "3\n", "4\n"])
f.seek(0, 0)
self.assertEqual(yp_list(f),
["0\n", "1\n", "2\n", "3\n", "4\n"])
finally:
f.close()
try:
unlink(TESTFN)
except OSError:
pass
# Test tuples()'s use of iterators.
def test_builtin_tuple(self):
self.assertEqual(yp_tuple(SequenceClass(5)), (0, 1, 2, 3, 4))
self.assertEqual(yp_tuple(SequenceClass(0)), ())
self.assertEqual(yp_tuple(yp_list()), ())
self.assertEqual(yp_tuple(), ())
self.assertEqual(yp_tuple(yp_str("abc")), ("a", "b", "c"))
d = yp_dict({"one": 1, "two": 2, "three": 3})
self.assertEqual(yp_tuple(d), yp_tuple(d.keys()))
self.assertRaises(TypeError, yp_tuple, yp_list)
self.assertRaises(TypeError, yp_tuple, 42)
f = open(TESTFN, "w")
try:
for i in range(5):
f.write("%d\n" % i)
finally:
f.close()
f = open(TESTFN, "r")
try:
self.assertEqual(yp_tuple(f), ("0\n", "1\n", "2\n", "3\n", "4\n"))
f.seek(0, 0)
self.assertEqual(yp_tuple(f),
("0\n", "1\n", "2\n", "3\n", "4\n"))
finally:
f.close()
try:
unlink(TESTFN)
except OSError:
pass
# Test filter()'s use of iterators.
@yp_unittest.skip_filter
def test_builtin_filter(self):
self.assertEqual(yp_list(filter(None, SequenceClass(5))),
yp_list(range(1, 5)))
self.assertEqual(yp_list(filter(None, SequenceClass(0))), [])
self.assertEqual(yp_list(filter(None, ())), [])
self.assertEqual(yp_list(filter(None, "abc")), ["a", "b", "c"])
d = yp_dict({"one": 1, "two": 2, "three": 3})
self.assertEqual(yp_list(filter(None, d)), yp_list(d.keys()))
self.assertRaises(TypeError, filter, None, yp_list)
self.assertRaises(TypeError, filter, None, 42)
#class Boolean:
# def __init__(self, truth):
# self.truth = truth
# def __bool__(self):
# return self.truth
#bTrue = Boolean(True)
#bFalse = Boolean(False)
bTrue = yp_dict({1:1})
bFalse = yp_dict()
class Seq:
def __init__(self, *args):
self.vals = args
def __iter__(self):
class SeqIter:
def __init__(self, vals):
self.vals = vals
self.i = 0
def __iter__(self):
return self
def __next__(self):
i = self.i
self.i = i + 1
if i < len(self.vals):
return self.vals[i]
else:
raise StopIteration
return SeqIter(self.vals)
seq = Seq(*([bTrue, bFalse] * 25))
self.assertEqual(yp_list(filter(lambda x: not x, seq)), [bFalse]*25)
self.assertEqual(yp_list(filter(lambda x: not x, yp_iter(seq))), [bFalse]*25)
# Test max() and min()'s use of iterators.
@yp_unittest.skip_min
def test_builtin_max_min(self):
self.assertEqual(max(SequenceClass(5)), 4)
self.assertEqual(min(SequenceClass(5)), 0)
self.assertEqual(max(8, -1), 8)
self.assertEqual(min(8, -1), -1)
d = yp_dict({"one": 1, "two": 2, "three": 3})
self.assertEqual(max(d), "two")
self.assertEqual(min(d), "one")
self.assertEqual(max(d.values()), 3)
self.assertEqual(min(yp_iter(d.values())), 1)
f = open(TESTFN, "w")
try:
f.write("medium line\n")
f.write("xtra large line\n")
f.write("itty-bitty line\n")
finally:
f.close()
f = open(TESTFN, "r")
try:
self.assertEqual(min(f), "itty-bitty line\n")
f.seek(0, 0)
self.assertEqual(max(f), "xtra large line\n")
finally:
f.close()
try:
unlink(TESTFN)
except OSError:
pass
# Test map()'s use of iterators.
@yp_unittest.skip_map
def test_builtin_map(self):
self.assertEqual(yp_list(map(lambda x: x+1, SequenceClass(5))),
yp_list(range(1, 6)))
d = yp_dict({"one": 1, "two": 2, "three": 3})
self.assertEqual(yp_list(map(lambda k, d=d: (k, d[k]), d)),
yp_list(d.items()))
dkeys = yp_list(d.keys())
expected = [(i < len(d) and dkeys[i] or None,
i,
i < len(d) and dkeys[i] or None)
for i in range(3)]
f = open(TESTFN, "w")
try:
for i in range(10):
f.write("xy" * i + "\n") # line i has len 2*i+1
finally:
f.close()
f = open(TESTFN, "r")
try:
self.assertEqual(yp_list(map(len, f)), yp_list(range(1, 21, 2)))
finally:
f.close()
try:
unlink(TESTFN)
except OSError:
pass
# Test zip()'s use of iterators.
@yp_unittest.skip_zip
def test_builtin_zip(self):
self.assertEqual(yp_list(zip()), [])
self.assertEqual(yp_list(zip(*yp_list())), [])
self.assertEqual(yp_list(zip(*yp_list([(1, 2), 'ab']))), [(1, 'a'), (2, 'b')])
self.assertRaises(TypeError, zip, None)
self.assertRaises(TypeError, zip, range(10), 42)
self.assertRaises(TypeError, zip, range(10), zip)
self.assertEqual(yp_list(zip(IteratingSequenceClass(3))),
[(0,), (1,), (2,)])
self.assertEqual(yp_list(zip(SequenceClass(3))),
[(0,), (1,), (2,)])
d = yp_dict({"one": 1, "two": 2, "three": 3})
self.assertEqual(yp_list(d.items()), yp_list(zip(d, d.values())))
# Generate all ints starting at constructor arg.
class IntsFrom:
def __init__(self, start):
self.i = start
def __iter__(self):
return self
def __next__(self):
i = self.i
self.i = i+1
return i
f = open(TESTFN, "w")
try:
f.write("a\n" "bbb\n" "cc\n")
finally:
f.close()
f = open(TESTFN, "r")
try:
self.assertEqual(yp_list(zip(IntsFrom(0), f, IntsFrom(-100))),
[(0, "a\n", -100),
(1, "bbb\n", -99),
(2, "cc\n", -98)])
finally:
f.close()
try:
unlink(TESTFN)
except OSError:
pass
self.assertEqual(yp_list(zip(range(5))), [(i,) for i in range(5)])
# Classes that lie about their lengths.
class NoGuessLen5:
def __getitem__(self, i):
if i >= 5:
raise IndexError
return i
class Guess3Len5(NoGuessLen5):
def __len__(self):
return 3
class Guess30Len5(NoGuessLen5):
def __len__(self):
return 30
def lzip(*args):
return yp_list(zip(*args))
self.assertEqual(yp_len(Guess3Len5()), 3)
self.assertEqual(yp_len(Guess30Len5()), 30)
self.assertEqual(lzip(NoGuessLen5()), lzip(range(5)))
self.assertEqual(lzip(Guess3Len5()), lzip(range(5)))
self.assertEqual(lzip(Guess30Len5()), lzip(range(5)))
expected = [(i, i) for i in range(5)]
for x in NoGuessLen5(), Guess3Len5(), Guess30Len5():
for y in NoGuessLen5(), Guess3Len5(), Guess30Len5():
self.assertEqual(lzip(x, y), expected)
@yp_unittest.skip_user_defined_types
def test_unicode_join_endcase(self):
# This class inserts a Unicode object into its argument's natural
# iteration, in the 3rd position.
class OhPhooey:
def __init__(self, seq):
self.it = yp_iter(seq)
self.i = 0
def __iter__(self):
return self
def __next__(self):
i = self.i
self.i = i+1
if i == 2:
return yp_str("fooled you!")
return next(self.it)
f = open(TESTFN, "w")
try:
f.write("a\n" + "b\n" + "c\n")
finally:
f.close()
f = open(TESTFN, "r")
# Nasty: string.join(s) can't know whether unicode.join() is needed
# until it's seen all of s's elements. But in this case, f's
# iterator cannot be restarted. So what we're testing here is
# whether string.join() can manage to remember everything it's seen
# and pass that on to unicode.join().
try:
got = yp_str(" - ").join(OhPhooey(f))
self.assertEqual(got, "a\n - b\n - fooled you! - c\n")
finally:
f.close()
try:
unlink(TESTFN)
except OSError:
pass
# Test iterators with 'x in y' and 'x not in y'.
def test_in_and_not_in(self):
for sc5 in IteratingSequenceClass(5), SequenceClass(5):
for i in range(5):
self.assertIn(yp_int(i), sc5)
for i in (yp_str("abc"), yp_int(-1), yp_int(5), yp_float(42.42), yp_tuple((3, 4)),
yp_list(), yp_dict({1: 1})):
self.assertNotIn(i, sc5)
self.assertRaises(TypeError, lambda: 3 in 12)
self.assertRaises(TypeError, lambda: 3 not in map)
d = yp_dict({"one": 1, "two": 2, "three": 3, 1.1: 2.1})
for k in d:
self.assertIn(k, d)
self.assertNotIn(k, d.values())
for v in d.values():
self.assertIn(v, d.values())
self.assertNotIn(v, d)
for k, v in d.items():
self.assertIn(yp_tuple((k, v)), d.items())
self.assertNotIn(yp_tuple((v, k)), d.items())
f = open(TESTFN, "w")
try:
f.write("a\n" "b\n" "c\n")
finally:
f.close()
f = open(TESTFN, "r")
try:
for chunk in yp_str("abc"):
f.seek(0, 0)
self.assertNotIn(chunk, f)
f.seek(0, 0)
self.assertIn((chunk + "\n"), f)
finally:
f.close()
try:
unlink(TESTFN)
except OSError:
pass
# Test iterators with operator.countOf (PySequence_Count).
def test_countOf(self):
from operator import countOf
with self.nohtyPCheck(enabled=False):
self.assertEqual(countOf(yp_list([1,2,2,3,2,5]), 2), 3)
self.assertEqual(countOf(yp_tuple((1,2,2,3,2,5)), 2), 3)
self.assertEqual(countOf(yp_str("122325"), "2"), 3)
self.assertEqual(countOf(yp_str("122325"), "6"), 0)
self.assertRaises(TypeError, countOf, yp_int(42), 1)
self.assertRaises(TypeError, countOf, yp_func_chr, yp_func_chr)
d = yp_dict({"one": 3, "two": 3, "three": 3, 1.1: 2.2})
for k in d:
self.assertEqual(countOf(d, k), 1)
self.assertEqual(countOf(d.values(), 3), 3)
self.assertEqual(countOf(d.values(), 2.2), 1)
self.assertEqual(countOf(d.values(), 1.1), 0)
@yp_unittest.skip_files
def test_countOf_file(self):
f = open(TESTFN, "w")
try:
f.write("a\n" "b\n" "c\n" "b\n")
finally:
f.close()
f = open(TESTFN, "r")
try:
for letter, count in yp_tuple((("a", 1), ("b", 2), ("c", 1), ("d", 0))):
f.seek(0, 0)
self.assertEqual(countOf(f, letter + "\n"), count)
finally:
f.close()
try:
unlink(TESTFN)
except OSError:
pass
# Test iterators with operator.indexOf (PySequence_Index).
def test_indexOf(self):
from operator import indexOf
with self.nohtyPCheck(enabled=False):
self.assertEqual(indexOf(yp_list([1,2,2,3,2,5]), 1), 0)
self.assertEqual(indexOf(yp_tuple((1,2,2,3,2,5)), 2), 1)
self.assertEqual(indexOf(yp_tuple((1,2,2,3,2,5)), 3), 3)
self.assertEqual(indexOf(yp_tuple((1,2,2,3,2,5)), 5), 5)
self.assertRaises(ValueError, indexOf, yp_tuple((1,2,2,3,2,5)), 0)
self.assertRaises(ValueError, indexOf, yp_tuple((1,2,2,3,2,5)), 6)
self.assertEqual(indexOf(yp_str("122325"), "2"), 1)
self.assertEqual(indexOf(yp_str("122325"), "5"), 5)
self.assertRaises(ValueError, indexOf, yp_str("122325"), "6")
self.assertRaises(TypeError, indexOf, yp_int(42), 1)
self.assertRaises(TypeError, indexOf, yp_func_chr, yp_func_chr)
@yp_unittest.skip_files
def test_indexOf_file(self):
f = open(TESTFN, "w")
try:
f.write("a\n" "b\n" "c\n" "d\n" "e\n")
finally:
f.close()
f = open(TESTFN, "r")
try:
fiter = yp_iter(f)
self.assertEqual(indexOf(fiter, "b\n"), 1)
self.assertEqual(indexOf(fiter, "d\n"), 1)
self.assertEqual(indexOf(fiter, "e\n"), 0)
self.assertRaises(ValueError, indexOf, fiter, "a\n")
finally:
f.close()
try:
unlink(TESTFN)
except OSError:
pass
@yp_unittest.skip_user_defined_types
def test_indexOf_class(self):
iclass = IteratingSequenceClass(3)
for i in range(3):
self.assertEqual(indexOf(iclass, i), i)
self.assertRaises(ValueError, indexOf, iclass, -1)
# Test iterators with file.writelines().
@yp_unittest.skip_files
def test_writelines(self):
f = open(TESTFN, "w")
try:
self.assertRaises(TypeError, f.writelines, None)
self.assertRaises(TypeError, f.writelines, 42)
f.writelines(yp_list(["1\n", "2\n"]))
f.writelines(yp_tuple(("3\n", "4\n")))
f.writelines(yp_dict({'5\n': None}))
f.writelines(yp_dict())
# Try a big chunk too.
class Iterator:
def __init__(self, start, finish):
self.start = start
self.finish = finish
self.i = self.start
def __next__(self):
if self.i >= self.finish:
raise StopIteration
result = yp_str(self.i) + '\n'
self.i += 1
return result
def __iter__(self):
return self
class Whatever:
def __init__(self, start, finish):
self.start = start
self.finish = finish
def __iter__(self):
return Iterator(self.start, self.finish)
f.writelines(Whatever(6, 6+2000))
f.close()
f = open(TESTFN)
expected = [yp_str(i) + "\n" for i in range(1, 2006)]
self.assertEqual(yp_list(f), expected)
finally:
f.close()
try:
unlink(TESTFN)
except OSError:
pass
# Test iterators on RHS of unpacking assignments.
@yp_unittest.skip_unpack
def test_unpack_iter(self):
a, b = 1, 2
self.assertEqual((a, b), (1, 2))
a, b, c = IteratingSequenceClass(3)
self.assertEqual((a, b, c), (0, 1, 2))
try: # too many values
a, b = IteratingSequenceClass(3)
except ValueError:
pass
else:
self.fail("should have raised ValueError")
try: # not enough values
a, b, c = IteratingSequenceClass(2)
except ValueError:
pass
else:
self.fail("should have raised ValueError")
try: # not iterable
a, b, c = yp_len
except TypeError:
pass
else:
self.fail("should have raised TypeError")
a, b, c = {1: 42, 2: 42, 3: 42}.values()
self.assertEqual((a, b, c), (42, 42, 42))
f = open(TESTFN, "w")
lines = ("a\n", "bb\n", "ccc\n")
try:
for line in lines:
f.write(line)
finally:
f.close()
f = open(TESTFN, "r")
try:
a, b, c = f
self.assertEqual((a, b, c), lines)
finally:
f.close()
try:
unlink(TESTFN)
except OSError:
pass
(a, b), (c,) = IteratingSequenceClass(2), {42: 24}
self.assertEqual((a, b, c), (0, 1, 42))
@yp_unittest.skip_not_applicable
@cpython_only
def test_ref_counting_behavior(self):
class C(object):
count = 0
def __new__(cls):
cls.count += 1
return object.__new__(cls)
def __del__(self):
cls = self.__class__
assert cls.count > 0
cls.count -= 1
x = C()
self.assertEqual(C.count, 1)
del x
self.assertEqual(C.count, 0)
l = [C(), C(), C()]
self.assertEqual(C.count, 3)
try:
a, b = yp_iter(l)
except ValueError:
pass
del l
self.assertEqual(C.count, 0)
# Make sure StopIteration is a "sink state".
# This tests various things that weren't sink states in Python 2.2.1,
# plus various things that always were fine.
def test_sinkstate_list(self):
# This used to fail
a = yp_list(range(5))
b = yp_iter(a)
self.assertEqual(yp_list(b), yp_list(range(5)))
a.extend(range(5, 10))
self.assertEqual(yp_list(b), [])
def test_sinkstate_tuple(self):
a = yp_tuple((0, 1, 2, 3, 4))
b = yp_iter(a)
self.assertEqual(yp_list(b), yp_list(range(5)))
self.assertEqual(yp_list(b), [])
def test_sinkstate_string(self):
a = yp_str("abcde")
b = yp_iter(a)
self.assertEqual(yp_list(b), ['a', 'b', 'c', 'd', 'e'])
self.assertEqual(yp_list(b), [])
def test_sinkstate_sequence(self):
# This used to fail
a = SequenceClass(5)
b = yp_iter(a)
self.assertEqual(yp_list(b), yp_list(range(5)))
a.n = 10
self.assertEqual(yp_list(b), [])
def test_sinkstate_callable(self):
# This used to fail
def spam(state=[0]):
i = state[0]
state[0] = i+1
if i == 10:
raise AssertionError("shouldn't have gotten this far")
return i
b = yp_iter(spam, 5)
self.assertEqual(yp_list(b), yp_list(range(5)))
self.assertEqual(yp_list(b), [])
def test_sinkstate_dict(self):
# XXX For a more thorough test, see towards the end of:
# http://mail.python.org/pipermail/python-dev/2002-July/026512.html
a = yp_dict({1:1, 2:2, 0:0, 4:4, 3:3})
for b in yp_iter(a), a.keys(), a.items(), a.values():
b = yp_iter(a)
self.assertEqual(yp_len(yp_list(b)), 5)
self.assertEqual(yp_list(b), [])
def test_sinkstate_yield(self):
def gen():
for i in range(5):
yield i
b = gen()
self.assertEqual(yp_list(b), yp_list(range(5)))
self.assertEqual(yp_list(b), [])
def test_sinkstate_range(self):
a = range(5)
b = yp_iter(a)
self.assertEqual(yp_list(b), yp_list(range(5)))
self.assertEqual(yp_list(b), [])
def test_sinkstate_enumerate(self):
a = range(5)
e = enumerate(a)
b = yp_iter(e)
self.assertEqual(yp_list(b), yp_list(zip(range(5), range(5))))
self.assertEqual(yp_list(b), [])
def test_3720(self):
# Avoid a crash, when an iterator deletes its next() method.
class BadIterator(object):
def __iter__(self):
return self
def __next__(self):
del BadIterator.__next__
return 1
try:
for i in BadIterator() :
pass
except TypeError:
pass
def test_extending_list_with_iterator_does_not_segfault(self):
# The code to extend a list with an iterator has a fair
# amount of nontrivial logic in terms of guessing how
# much memory to allocate in advance, "stealing" refs,
# and then shrinking at the end. This is a basic smoke
# test for that scenario.
def gen():
for i in range(500):
yield i
lst = yp_list([0]) * 500
for i in range(240):
lst.pop(0)
lst.extend(gen())
self.assertEqual(yp_len(lst), 760)
def test_main():
run_unittest(TestCase)
if __name__ == "__main__":
test_main()
| 33.246888 | 100 | 0.524524 |
812c306a62dcb79d83ae7d59ec1c15b6336e7815 | 7,969 | py | Python | scripts/collect.py | andreafioraldi/FuzzSplore | a8023b1f59f438ff3eb98da37b93d90b2a218159 | [
"Apache-2.0"
] | 32 | 2020-09-14T09:56:36.000Z | 2022-01-18T20:26:04.000Z | scripts/collect.py | andreafioraldi/FuzzSplore | a8023b1f59f438ff3eb98da37b93d90b2a218159 | [
"Apache-2.0"
] | null | null | null | scripts/collect.py | andreafioraldi/FuzzSplore | a8023b1f59f438ff3eb98da37b93d90b2a218159 | [
"Apache-2.0"
] | 7 | 2020-09-15T06:59:56.000Z | 2021-07-23T23:41:16.000Z | #!/usr/bin/python3
import subprocess
import argparse
import json
import sys
import os
import numpy as np
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
DESCR=''''''
script_dir = os.path.dirname(os.path.realpath(os.path.abspath(__file__)))
opt = argparse.ArgumentParser(description=DESCR, formatter_class=argparse.RawTextHelpFormatter)
opt.add_argument("--conf", help="Configuration json", action='store', required=True)
opt.add_argument("--output", help="Output prefix", action='store', required=True)
args = opt.parse_args()
AFL_SHOWMAP = os.path.join(script_dir, "../AFLplusplus/afl-showmap")
OUT_FILE = "virgin_map.bin"
'''
[
{
"name": "ngram3",
"corpus": "libpng/out/queue",
"cmd": ["libpng/harness-ngram3", "@@"]
}
]
'''
conf = json.load(open(args.conf))
print(conf)
# id:000213,src:000003,time:9769,op:havoc,rep:16,+cov
def parse_filename(name):
name = name.split("/")[-1]
src = None
time = None
id = int(name[3: name.find(",")])
i = name.find("src:")
if i >= 0:
src = name[i+4: name.find(",", i)]
src = list(map(int, src.split("+")))
i = name.find("time:")
if i >= 0:
time = int(name[i+5: name.find(",", i)])
return id, src, time
def iterate_files(path):
for subdir, dirs, files in os.walk(path):
for file in files:
yield os.path.join(subdir, file)
break
def run_showmap(f, cmd):
cmd = cmd[:]
os.system("rm %s" % OUT_FILE)
for i in range(len(cmd)):
if cmd[i] == "@@":
cmd[i] = f
showmap_args = [AFL_SHOWMAP, "-b", "-o", OUT_FILE, "--"] + cmd
subprocess.check_call(showmap_args, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
def merge_showmap(virgin_bits):
new_bits = 0
interesting = False
f = open(OUT_FILE, "rb")
bytes_data = f.read()
i = 0
for b in bytes_data:
if b != 0:
if virgin_bits[i] == 0:
new_bits += 1
if b > virgin_bits[i]:
interesting = True
virgin_bits[i] = b
i += 1
f.close()
return bytes_data, new_bits, interesting
testcases = {}
graph = {}
timeline = {}
coverage_over_time = {}
#vects = open(args.output + '_vectors.csv', 'w')
#vects.write('NAME,ID,X,Y\n')
all_bitmaps = []
fuzzers_bitmaps = []
idx_to_id = {}
idx_to_time = {}
for fuzzer in conf:
queue_dir = fuzzer["corpus"]
cmd = fuzzer["cmd"]
name = fuzzer['name']
bitmaps = []
#virgin_bits = [0] * (2 ** 16)
cov_virgin_bits = [0] * (2 ** 16)
i = 0
idx_to_id[name] = {}
idx_to_time[name] = {}
for f in sorted(iterate_files(queue_dir)):
#print(name, f)
id, src, time = parse_filename(f)
sec = time // 1000
if sec > 100: continue
idx_to_id[name][i] = id
idx_to_time[name][i] = sec
i += 1
#run_showmap(f, cmd)
#bitmap, new_bits, interesting = merge_showmap(virgin_bits)
#if interesting:
graph[name] = graph.get(name, {})
graph[name][id] = graph[name].get(id, [])
timeline[name] = timeline.get(name, {})
timeline[name][sec] = timeline[name].get(sec, [])
timeline[name][sec] += [id]
if src is not None:
for srcid in src:
graph[name] = graph.get(name, {})
graph[name][srcid] = graph[name].get(srcid, [])
graph[name][srcid] += [id]
graph[name][srcid] = list(set(graph[name][srcid]))
break
#cov_new_bits = new_bits
#if conf[0]['name'] != name:
print(conf[0]['name'], f)
run_showmap(f, conf[0]['cmd'])
bitmap, cov_new_bits, _ = merge_showmap(cov_virgin_bits)
if cov_new_bits:
coverage_over_time[sec] = coverage_over_time.get(sec, {})
coverage_over_time[sec][name] = coverage_over_time[sec].get(name, 0)
coverage_over_time[sec][name] += cov_new_bits
bitmaps.append(list(bitmap))
testcases[name] = testcases.get(name, {})
testcases[name][id] = testcases[name].get(id, {})
testcases[name][id]['time'] = sec
#testcases[name][id]['interesting'] = interesting
#testcases[name][id]['new_bits'] = new_bits
testcases[name][id]['cross'] = testcases[name][id].get('cross', [])
for fuzzer2 in conf:
if name == fuzzer2['name']: continue
#queue_dir = fuzzer2["corpus"]
virgin_bits = [0] * (2 ** 16)
for f in sorted(iterate_files(queue_dir)):
id, src, time = parse_filename(f)
sec = time // 1000
if sec > 100: continue
print('cross', fuzzer2['name'], f)
run_showmap(f, fuzzer2['cmd'])
_, new_bits, interesting = merge_showmap(virgin_bits)
if interesting:
testcases[name][id]['cross'] = testcases[name][id].get('cross', [])
testcases[name][id]['cross'].append(fuzzer2['name'])
plen = len(all_bitmaps)
all_bitmaps += bitmaps
fuzzers_bitmaps.append((name, plen, len(all_bitmaps)))
#X = np.array(bitmaps)
#pca = PCA(n_components=2)
#pca.fit(X)
#print("TSNE...")
#X_embedded = tsne.fit_transform(X)
#np.savetxt(args.output + '_' + name + '_vectors.csv', X_embedded, delimiter=",", header='X,Y', comments='')
#for i in range(len(bitmaps)):
# vects.write('%s,%d,%f,%f\n' % (name, idx_to_id[name][i], X_embedded[i][0], X_embedded[i][1]))
#print("Saving to %s_vectors.csv..." % args.output)
#vects.close()
X = np.array(all_bitmaps)
X_len = len(all_bitmaps)
del all_bitmaps
print("TSNE...")
X_embedded = TSNE(n_components=2).fit_transform(X)
print("Saving to %s_vectors.csv..." % args.output)
vects = open(args.output + '_vectors.csv', 'w')
vects.write('NAME,ID,TIME,X,Y\n')
for i in range(X_len):
for name, start, end in fuzzers_bitmaps:
if i in range(start, end):
vects.write('%s,%d,%d,%f,%f\n' % (name, idx_to_id[name][i - start], idx_to_time[name][i - start], X_embedded[i][0], X_embedded[i][1]))
break
vects.close()
print("Saving to %s_coverage.csv..." % args.output)
prev_cov = {}
covf = open(args.output + '_coverage.csv', 'w')
covf.write('NAME,TIME,VAL\n')
for sec in sorted(coverage_over_time.keys()):
for name in coverage_over_time[sec]:
prev_cov[name] = prev_cov.get(name, 0)
prev_cov[name] += coverage_over_time[sec][name]
covf.write('%s,%d,%d\n' % (name, sec, prev_cov[name]))
covf.close()
print("Saving to %s_inputs.csv..." % args.output)
inpf = open(args.output + '_inputs.csv', 'w')
inpf.write('NAME,TIME,VAL\n')
'''for sec in sorted(inputs_for_seconds.keys()):
for name in inputs_for_seconds[sec]:
inpf.write('%s,%d,%d\n' % (name, sec, inputs_for_seconds[sec][name]))'''
for name in timeline:
for sec in timeline[name]:
inpf.write('%s,%d,%s\n' % (name, sec, len(timeline[name][sec])))
inpf.close()
print("Saving to %s_timeline.csv..." % args.output)
timef = open(args.output + '_timeline.csv', 'w')
timef.write('NAME,TIME,IDS\n')
for name in timeline:
for sec in timeline[name]:
timef.write('%s,%d,%s\n' % (name, sec, ':'.join(map(str, timeline[name][sec]))))
timef.close()
def visit(fuzz, id):
return {
"name": id,
"children": [visit(fuzz, child) for child in graph[fuzz][id]],
"fuzzer": fuzz,
**testcases[fuzz][id]
}
d3graph = {}
for name in graph:
d3graph[name] = visit(name, 0)
print("Saving to %s_graphs..." % args.output)
with open(args.output + '_graphs.json', 'w') as f:
json.dump(d3graph, f)
'''
print("Saving to %s..." % args.output)
with open(args.output + '_data.json', 'w') as f:
json.dump({
'testcases': testcases,
'inputs_for_seconds': inputs_for_seconds,
'coverage_over_time': coverage_over_time,
'timeline': timeline,
'graph': graph,
}, f)
'''
print("Done.")
| 30.30038 | 146 | 0.590789 |
e2f9697692bd710f035e2b06a55f6415541f09bf | 1,130 | py | Python | easing.py | longhaoteng/captcha-crack | 34a5ddead6551ba8f24ee941872310def74a487a | [
"Apache-2.0"
] | null | null | null | easing.py | longhaoteng/captcha-crack | 34a5ddead6551ba8f24ee941872310def74a487a | [
"Apache-2.0"
] | null | null | null | easing.py | longhaoteng/captcha-crack | 34a5ddead6551ba8f24ee941872310def74a487a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'mr.long'
import math
class Easing(object):
"""
Easing functions
"""
PI = math.pi
c1 = 1.70158
c2 = c1 * 1.525
c3 = c1 + 1
c4 = (2 * PI) / 3
c5 = (2 * PI) / 4.5
def ease_out_sine(x):
return math.sin(x * Easing.PI / 2)
def ease_out_cubic(x):
return 1 - pow(1 - x, 3)
def ease_out_quint(x):
return 1 - pow(1 - x, 5)
def ease_out_circ(x):
return math.sqrt(1 - pow(x - 1, 2))
def ease_out_quad(x):
return 1 - (1 - x) * (1 - x)
def ease_out_quart(x):
return 1 - pow(1 - x, 4)
def ease_out_expo(x):
if x == 1:
return 1
else:
return 1 - pow(2, -10 * x)
def ease_out_back(x):
return 1 + Easing.c3 * pow(x - 1, 3) + Easing.c1 * pow(x - 1, 2)
def ease_out_bounce(x):
n1 = 7.5625
d1 = 2.75
if x < 1 / d1:
return n1 * x * x
elif x < 2 / d1:
x -= 1.5 / d1
return n1 * x * x + 0.75
elif x < 2.5 / d1:
x -= 2.25 / d1
return n1 * x * x + 0.9375
else:
x -= 2.625 / d1
return n1 * x * x + 0.984375
| 16.142857 | 68 | 0.490265 |
cce880c22fc7016d511abff1ed9b4c74e3665116 | 38,557 | py | Python | cinder/tests/unit/objects/test_base.py | 2020human/cinder | 04528318848620e4ce2639ea2dd5323783dc7a1f | [
"Apache-2.0"
] | null | null | null | cinder/tests/unit/objects/test_base.py | 2020human/cinder | 04528318848620e4ce2639ea2dd5323783dc7a1f | [
"Apache-2.0"
] | null | null | null | cinder/tests/unit/objects/test_base.py | 2020human/cinder | 04528318848620e4ce2639ea2dd5323783dc7a1f | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import uuid
from iso8601 import iso8601
import mock
from oslo_versionedobjects import fields
from sqlalchemy import sql
from cinder import context
from cinder import db
from cinder.db.sqlalchemy import models
from cinder import exception
from cinder import objects
from cinder.objects import fields as c_fields
from cinder import test
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_objects
from cinder.tests.unit import objects as test_objects
class TestCinderObjectVersionHistory(test_objects.BaseObjectsTestCase):
def test_add(self):
history = test_objects.obj_base.CinderObjectVersionsHistory()
v10 = {'Backup': '2.0'}
v11 = {'Backup': '2.1'}
history.add('1.0', v10)
history.add('1.1', v11)
# We have 3 elements because we have the liberty version by default
self.assertEqual(2 + 1, len(history))
expected_v10 = history['liberty'].copy()
expected_v10.update(v10)
expected_v11 = history['liberty'].copy()
expected_v11.update(v11)
self.assertEqual('1.1', history.get_current())
self.assertEqual(expected_v11, history.get_current_versions())
self.assertEqual(expected_v10, history['1.0'])
def test_add_existing(self):
history = test_objects.obj_base.CinderObjectVersionsHistory()
history.add('1.0', {'Backup': '1.0'})
self.assertRaises(exception.ProgrammingError,
history.add, '1.0', {'Backup': '1.0'})
class TestCinderObject(test_objects.BaseObjectsTestCase):
"""Tests methods from CinderObject."""
def setUp(self):
super(TestCinderObject, self).setUp()
self.obj = fake_objects.ChildObject(
scheduled_at=None,
uuid=uuid.uuid4(),
text='text')
self.obj.obj_reset_changes()
def test_cinder_obj_get_changes_no_changes(self):
self.assertDictEqual({}, self.obj.cinder_obj_get_changes())
def test_cinder_obj_get_changes_other_changes(self):
self.obj.text = 'text2'
self.assertDictEqual({'text': 'text2'},
self.obj.cinder_obj_get_changes())
def test_cinder_obj_get_changes_datetime_no_tz(self):
now = datetime.datetime.utcnow()
self.obj.scheduled_at = now
self.assertDictEqual({'scheduled_at': now},
self.obj.cinder_obj_get_changes())
def test_cinder_obj_get_changes_datetime_tz_utc(self):
now_tz = iso8601.parse_date('2015-06-26T22:00:01Z')
now = now_tz.replace(tzinfo=None)
self.obj.scheduled_at = now_tz
self.assertDictEqual({'scheduled_at': now},
self.obj.cinder_obj_get_changes())
def test_cinder_obj_get_changes_datetime_tz_non_utc_positive(self):
now_tz = iso8601.parse_date('2015-06-26T22:00:01+01')
now = now_tz.replace(tzinfo=None) - datetime.timedelta(hours=1)
self.obj.scheduled_at = now_tz
self.assertDictEqual({'scheduled_at': now},
self.obj.cinder_obj_get_changes())
def test_cinder_obj_get_changes_datetime_tz_non_utc_negative(self):
now_tz = iso8601.parse_date('2015-06-26T10:00:01-05')
now = now_tz.replace(tzinfo=None) + datetime.timedelta(hours=5)
self.obj.scheduled_at = now_tz
self.assertDictEqual({'scheduled_at': now},
self.obj.cinder_obj_get_changes())
@mock.patch('cinder.objects.base.CinderPersistentObject.get_by_id')
def test_refresh(self, get_by_id):
@objects.base.CinderObjectRegistry.register_if(False)
class MyTestObject(objects.base.CinderObject,
objects.base.CinderObjectDictCompat,
objects.base.CinderComparableObject,
objects.base.CinderPersistentObject):
fields = {'id': fields.UUIDField(),
'name': fields.StringField()}
test_obj = MyTestObject(id=fake.OBJECT_ID, name='foo')
refresh_obj = MyTestObject(id=fake.OBJECT_ID, name='bar')
get_by_id.return_value = refresh_obj
test_obj.refresh()
self._compare(self, refresh_obj, test_obj)
@mock.patch('cinder.objects.base.CinderPersistentObject.get_by_id')
def test_refresh_readonly(self, get_by_id_mock):
@objects.base.CinderObjectRegistry.register_if(False)
class MyTestObject(objects.base.CinderObject,
objects.base.CinderObjectDictCompat,
objects.base.CinderComparableObject,
objects.base.CinderPersistentObject):
fields = {'id': fields.UUIDField(),
'name': fields.StringField(read_only=True)}
test_obj = MyTestObject(id=fake.OBJECT_ID, name='foo')
refresh_obj = MyTestObject(id=fake.OBJECT_ID, name='bar')
get_by_id_mock.return_value = refresh_obj
test_obj.refresh()
self._compare(self, refresh_obj, test_obj)
def test_refresh_no_id_field(self):
@objects.base.CinderObjectRegistry.register_if(False)
class MyTestObjectNoId(objects.base.CinderObject,
objects.base.CinderObjectDictCompat,
objects.base.CinderComparableObject,
objects.base.CinderPersistentObject):
fields = {'uuid': fields.UUIDField()}
test_obj = MyTestObjectNoId(uuid=fake.OBJECT_ID, name='foo')
self.assertRaises(NotImplementedError, test_obj.refresh)
@mock.patch('cinder.objects.base.objects', mock.Mock())
def test_cls_init(self):
"""Test that class init method gets called on registration."""
@objects.base.CinderObjectRegistry.register
class MyTestObject(objects.base.CinderObject,
objects.base.CinderPersistentObject):
cinder_ovo_cls_init = mock.Mock()
MyTestObject.cinder_ovo_cls_init.assert_called_once_with()
class TestCinderComparableObject(test_objects.BaseObjectsTestCase):
def test_comparable_objects(self):
@objects.base.CinderObjectRegistry.register
class MyComparableObj(objects.base.CinderObject,
objects.base.CinderObjectDictCompat,
objects.base.CinderComparableObject):
fields = {'foo': fields.Field(fields.Integer())}
class NonVersionedObject(object):
pass
obj1 = MyComparableObj(foo=1)
obj2 = MyComparableObj(foo=1)
obj3 = MyComparableObj(foo=2)
obj4 = NonVersionedObject()
self.assertTrue(obj1 == obj2)
self.assertFalse(obj1 == obj3)
self.assertFalse(obj1 == obj4)
self.assertNotEqual(obj1, None)
class TestCinderObjectConditionalUpdate(test.TestCase):
def setUp(self):
super(TestCinderObjectConditionalUpdate, self).setUp()
self.context = context.get_admin_context()
def _create_volume(self):
vol = {
'display_description': 'Test Desc',
'size': 1,
'status': 'available',
'availability_zone': 'az',
'host': 'dummy',
'attach_status': c_fields.VolumeAttachStatus.DETACHED,
}
volume = objects.Volume(context=self.context, **vol)
volume.create()
return volume
def _create_snapshot(self, volume):
snapshot = objects.Snapshot(context=self.context, volume_id=volume.id)
snapshot.create()
return snapshot
def _check_volume(self, volume, status, size, reload=False, dirty_keys=(),
**kwargs):
if reload:
volume = objects.Volume.get_by_id(self.context, volume.id)
self.assertEqual(status, volume.status)
self.assertEqual(size, volume.size)
dirty = volume.cinder_obj_get_changes()
self.assertEqual(list(dirty_keys), list(dirty.keys()))
for key, value in kwargs.items():
self.assertEqual(value, getattr(volume, key))
def test_conditional_update_non_iterable_expected(self):
volume = self._create_volume()
# We also check that we can check for None values
self.assertTrue(volume.conditional_update(
{'status': 'deleting', 'size': 2},
{'status': 'available', 'migration_status': None}))
# Check that the object in memory has been updated
self._check_volume(volume, 'deleting', 2)
# Check that the volume in the DB also has been updated
self._check_volume(volume, 'deleting', 2, True)
def test_conditional_update_non_iterable_expected_model_field(self):
volume = self._create_volume()
# We also check that we can check for None values
self.assertTrue(volume.conditional_update(
{'status': 'deleting', 'size': 2,
'previous_status': volume.model.status},
{'status': 'available', 'migration_status': None}))
# Check that the object in memory has been updated
self._check_volume(volume, 'deleting', 2, previous_status='available')
# Check that the volume in the DB also has been updated
self._check_volume(volume, 'deleting', 2, True,
previous_status='available')
def test_conditional_update_non_iterable_expected_save_all(self):
volume = self._create_volume()
volume.size += 1
# We also check that we can check for not None values
self.assertTrue(volume.conditional_update(
{'status': 'deleting'},
{'status': 'available', 'availability_zone': volume.Not(None)},
save_all=True))
# Check that the object in memory has been updated and that the size
# is not a dirty key
self._check_volume(volume, 'deleting', 2)
# Check that the volume in the DB also has been updated
self._check_volume(volume, 'deleting', 2, True)
def test_conditional_update_non_iterable_expected_dont_save_all(self):
volume = self._create_volume()
volume.size += 1
self.assertTrue(volume.conditional_update(
{'status': 'deleting'},
{'status': 'available'}, save_all=False))
# Check that the object in memory has been updated with the new status
# but that size has not been saved and is a dirty key
self._check_volume(volume, 'deleting', 2, False, ['size'])
# Check that the volume in the DB also has been updated but not the
# size
self._check_volume(volume, 'deleting', 1, True)
def test_conditional_update_fail_non_iterable_expected_save_all(self):
volume = self._create_volume()
volume.size += 1
self.assertFalse(volume.conditional_update(
{'status': 'available'},
{'status': 'deleting'}, save_all=True))
# Check that the object in memory has not been updated and that the
# size is still a dirty key
self._check_volume(volume, 'available', 2, False, ['size'])
# Check that the volume in the DB hasn't been updated
self._check_volume(volume, 'available', 1, True)
def test_default_conditional_update_non_iterable_expected(self):
volume = self._create_volume()
self.assertTrue(volume.conditional_update({'status': 'deleting'}))
# Check that the object in memory has been updated
self._check_volume(volume, 'deleting', 1)
# Check that the volume in the DB also has been updated
self._check_volume(volume, 'deleting', 1, True)
def test_default_conditional_fail_update_non_iterable_expected(self):
volume_in_db = self._create_volume()
volume = objects.Volume.get_by_id(self.context, volume_in_db.id)
volume_in_db.size += 1
volume_in_db.save()
# This will fail because size in DB is different
self.assertFalse(volume.conditional_update({'status': 'deleting'}))
# Check that the object in memory has not been updated
self._check_volume(volume, 'available', 1)
# Check that the volume in the DB hasn't changed the status but has
# the size we changed before the conditional update
self._check_volume(volume_in_db, 'available', 2, True)
def test_default_conditional_update_non_iterable_expected_with_dirty(self):
volume_in_db = self._create_volume()
volume = objects.Volume.get_by_id(self.context, volume_in_db.id)
volume_in_db.size += 1
volume_in_db.save()
volume.size = 33
# This will fail because even though we have excluded the size from
# the default condition when we dirtied it in the volume object, we
# still have the last update timestamp that will be included in the
# condition
self.assertFalse(volume.conditional_update({'status': 'deleting'}))
# Check that the object in memory has not been updated
self._check_volume(volume, 'available', 33, False, ['size'])
# Check that the volume in the DB hasn't changed the status but has
# the size we changed before the conditional update
self._check_volume(volume_in_db, 'available', 2, True)
def test_conditional_update_negated_non_iterable_expected(self):
volume = self._create_volume()
self.assertTrue(volume.conditional_update(
{'status': 'deleting', 'size': 2},
{'status': db.Not('in-use'), 'size': db.Not(2)}))
# Check that the object in memory has been updated
self._check_volume(volume, 'deleting', 2)
# Check that the volume in the DB also has been updated
self._check_volume(volume, 'deleting', 2, True)
def test_conditional_update_non_iterable_expected_filter(self):
# Volume we want to change
volume = self._create_volume()
# Another volume that has no snapshots
volume2 = self._create_volume()
# A volume with snapshots
volume3 = self._create_volume()
self._create_snapshot(volume3)
# Update only it it has no snapshot
filters = (~sql.exists().where(
models.Snapshot.volume_id == models.Volume.id),)
self.assertTrue(volume.conditional_update(
{'status': 'deleting', 'size': 2},
{'status': 'available'},
filters))
# Check that the object in memory has been updated
self._check_volume(volume, 'deleting', 2)
# Check that the volume in the DB also has been updated
self._check_volume(volume, 'deleting', 2, True)
# Check that the other volumes in the DB haven't changed
self._check_volume(volume2, 'available', 1, True)
self._check_volume(volume3, 'available', 1, True)
def test_conditional_update_iterable_expected(self):
volume = self._create_volume()
self.assertTrue(volume.conditional_update(
{'status': 'deleting', 'size': 20},
{'status': ('error', 'available'), 'size': range(10)}))
# Check that the object in memory has been updated
self._check_volume(volume, 'deleting', 20)
# Check that the volume in the DB also has been updated
self._check_volume(volume, 'deleting', 20, True)
def test_conditional_update_negated_iterable_expected(self):
volume = self._create_volume()
self.assertTrue(volume.conditional_update(
{'status': 'deleting', 'size': 20},
{'status': db.Not(('creating', 'in-use')), 'size': range(10)}))
# Check that the object in memory has been updated
self._check_volume(volume, 'deleting', 20)
# Check that the volume in the DB also has been updated
self._check_volume(volume, 'deleting', 20, True)
def test_conditional_update_fail_non_iterable_expected(self):
volume = self._create_volume()
self.assertFalse(volume.conditional_update(
{'status': 'deleting'},
{'status': 'available', 'size': 2}))
# Check that the object in memory hasn't changed
self._check_volume(volume, 'available', 1)
# Check that the volume in the DB hasn't changed either
self._check_volume(volume, 'available', 1, True)
def test_conditional_update_fail_negated_non_iterable_expected(self):
volume = self._create_volume()
result = volume.conditional_update({'status': 'deleting'},
{'status': db.Not('in-use'),
'size': 2})
self.assertFalse(result)
# Check that the object in memory hasn't changed
self._check_volume(volume, 'available', 1)
# Check that the volume in the DB hasn't changed either
self._check_volume(volume, 'available', 1, True)
def test_conditional_update_fail_iterable_expected(self):
volume = self._create_volume()
self.assertFalse(volume.conditional_update(
{'status': 'available'},
{'status': ('error', 'creating'), 'size': range(2, 10)}))
# Check that the object in memory hasn't changed
self._check_volume(volume, 'available', 1)
# Check that the volume in the DB hasn't changed either
self._check_volume(volume, 'available', 1, True)
def test_conditional_update_fail_negated_iterable_expected(self):
volume = self._create_volume()
self.assertFalse(volume.conditional_update(
{'status': 'error'},
{'status': db.Not(('available', 'in-use')), 'size': range(2, 10)}))
# Check that the object in memory hasn't changed
self._check_volume(volume, 'available', 1)
# Check that the volume in the DB hasn't changed either
self._check_volume(volume, 'available', 1, True)
def test_conditional_update_fail_non_iterable_expected_filter(self):
# Volume we want to change
volume = self._create_volume()
self._create_snapshot(volume)
# A volume that has no snapshots
volume2 = self._create_volume()
# Another volume with snapshots
volume3 = self._create_volume()
self._create_snapshot(volume3)
# Update only it it has no snapshot
filters = (~sql.exists().where(
models.Snapshot.volume_id == models.Volume.id),)
self.assertFalse(volume.conditional_update(
{'status': 'deleting', 'size': 2},
{'status': 'available'},
filters))
# Check that the object in memory hasn't been updated
self._check_volume(volume, 'available', 1)
# Check that no volume in the DB also has been updated
self._check_volume(volume, 'available', 1, True)
self._check_volume(volume2, 'available', 1, True)
self._check_volume(volume3, 'available', 1, True)
def test_conditional_update_non_iterable_case_value(self):
# Volume we want to change and has snapshots
volume = self._create_volume()
self._create_snapshot(volume)
# Filter that checks if a volume has snapshots
has_snapshot_filter = sql.exists().where(
models.Snapshot.volume_id == models.Volume.id)
# We want the updated value to depend on whether it has snapshots or
# not
case_values = volume.Case([(has_snapshot_filter, 'has-snapshot')],
else_='no-snapshot')
self.assertTrue(volume.conditional_update({'status': case_values},
{'status': 'available'}))
# Check that the object in memory has been updated
self._check_volume(volume, 'has-snapshot', 1)
# Check that the volume in the DB also has been updated
self._check_volume(volume, 'has-snapshot', 1, True)
def test_conditional_update_non_iterable_case_value_else(self):
# Volume we want to change
volume = self._create_volume()
# Filter that checks if a volume has snapshots
has_snapshot_filter = sql.exists().where(
models.Snapshot.volume_id == models.Volume.id)
# We want the updated value to depend on whether it has snapshots or
# not
case_values = volume.Case([(has_snapshot_filter, 'has-snapshot')],
else_='no-snapshot')
self.assertTrue(volume.conditional_update({'status': case_values},
{'status': 'available'}))
# Check that the object in memory has been updated
self._check_volume(volume, 'no-snapshot', 1)
# Check that the volume in the DB also has been updated
self._check_volume(volume, 'no-snapshot', 1, True)
def test_conditional_update_non_iterable_case_value_fail(self):
# Volume we want to change doesn't have snapshots
volume = self._create_volume()
# Filter that checks if a volume has snapshots
has_snapshot_filter = sql.exists().where(
models.Snapshot.volume_id == models.Volume.id)
# We want the updated value to depend on whether it has snapshots or
# not
case_values = volume.Case([(has_snapshot_filter, 'has-snapshot')],
else_='no-snapshot')
# We won't update because volume status is available
self.assertFalse(volume.conditional_update({'status': case_values},
{'status': 'deleting'}))
# Check that the object in memory has not been updated
self._check_volume(volume, 'available', 1)
# Check that the volume in the DB also hasn't been updated either
self._check_volume(volume, 'available', 1, True)
def test_conditional_update_iterable_with_none_expected(self):
volume = self._create_volume()
# We also check that we can check for None values in an iterable
self.assertTrue(volume.conditional_update(
{'status': 'deleting'},
{'status': (None, 'available'),
'migration_status': (None, 'finished')}))
# Check that the object in memory has been updated
self._check_volume(volume, 'deleting', 1)
# Check that the volume in the DB also has been updated
self._check_volume(volume, 'deleting', 1, True)
def test_conditional_update_iterable_with_not_none_expected(self):
volume = self._create_volume()
# We also check that we can check for None values in a negated iterable
self.assertTrue(volume.conditional_update(
{'status': 'deleting'},
{'status': volume.Not((None, 'in-use'))}))
# Check that the object in memory has been updated
self._check_volume(volume, 'deleting', 1)
# Check that the volume in the DB also has been updated
self._check_volume(volume, 'deleting', 1, True)
def test_conditional_update_iterable_with_not_includes_null(self):
volume = self._create_volume()
# We also check that negation includes None values by default like we
# do in Python and not like MySQL does
self.assertTrue(volume.conditional_update(
{'status': 'deleting'},
{'status': 'available',
'migration_status': volume.Not(('migrating', 'error'))}))
# Check that the object in memory has been updated
self._check_volume(volume, 'deleting', 1)
# Check that the volume in the DB also has been updated
self._check_volume(volume, 'deleting', 1, True)
def test_conditional_update_iterable_with_not_includes_null_fails(self):
volume = self._create_volume()
# We also check that negation excludes None values if we ask it to
self.assertFalse(volume.conditional_update(
{'status': 'deleting'},
{'status': 'available',
'migration_status': volume.Not(('migrating', 'error'),
auto_none=False)}))
# Check that the object in memory has not been updated
self._check_volume(volume, 'available', 1, False)
# Check that the volume in the DB hasn't been updated
self._check_volume(volume, 'available', 1, True)
def test_conditional_update_use_operation_in_value(self):
volume = self._create_volume()
expected_size = volume.size + 1
# We also check that using fields in requested changes will work as
# expected
self.assertTrue(volume.conditional_update(
{'status': 'deleting',
'size': volume.model.size + 1},
{'status': 'available'}))
# Check that the object in memory has been updated
self._check_volume(volume, 'deleting', expected_size, False)
# Check that the volume in the DB has also been updated
self._check_volume(volume, 'deleting', expected_size, True)
def test_conditional_update_auto_order(self):
volume = self._create_volume()
has_snapshot_filter = sql.exists().where(
models.Snapshot.volume_id == models.Volume.id)
case_values = volume.Case([(has_snapshot_filter, 'has-snapshot')],
else_='no-snapshot')
values = {'status': 'deleting',
'previous_status': volume.model.status,
'migration_status': case_values}
with mock.patch('cinder.db.sqlalchemy.api.model_query') as model_query:
update = model_query.return_value.filter.return_value.update
update.return_value = 0
self.assertFalse(volume.conditional_update(
values, {'status': 'available'}))
# We check that we are passing values to update to SQLAlchemy in the
# right order
self.assertEqual(1, update.call_count)
self.assertListEqual(
[('previous_status', volume.model.status),
('migration_status', mock.ANY),
('status', 'deleting')],
list(update.call_args[0][0]))
self.assertDictEqual(
{'synchronize_session': False,
'update_args': {'preserve_parameter_order': True}},
update.call_args[1])
def test_conditional_update_force_order(self):
volume = self._create_volume()
has_snapshot_filter = sql.exists().where(
models.Snapshot.volume_id == models.Volume.id)
case_values = volume.Case([(has_snapshot_filter, 'has-snapshot')],
else_='no-snapshot')
values = {'status': 'deleting',
'previous_status': volume.model.status,
'migration_status': case_values}
order = ['status']
with mock.patch('cinder.db.sqlalchemy.api.model_query') as model_query:
update = model_query.return_value.filter.return_value.update
update.return_value = 0
self.assertFalse(volume.conditional_update(
values, {'status': 'available'}, order=order))
# We check that we are passing values to update to SQLAlchemy in the
# right order
self.assertEqual(1, update.call_count)
self.assertListEqual(
[('status', 'deleting'),
('previous_status', volume.model.status),
('migration_status', mock.ANY)],
list(update.call_args[0][0]))
self.assertDictEqual(
{'synchronize_session': False,
'update_args': {'preserve_parameter_order': True}},
update.call_args[1])
def test_conditional_update_no_order(self):
volume = self._create_volume()
values = {'status': 'deleting',
'previous_status': 'available',
'migration_status': None}
with mock.patch('cinder.db.sqlalchemy.api.model_query') as model_query:
update = model_query.return_value.filter.return_value.update
update.return_value = 0
self.assertFalse(volume.conditional_update(
values, {'status': 'available'}))
# Check that arguments passed to SQLAlchemy's update are correct (order
# is not relevant).
self.assertEqual(1, update.call_count)
arg = update.call_args[0][0]
self.assertIsInstance(arg, dict)
self.assertEqual(set(values.keys()), set(arg.keys()))
def test_conditional_update_multitable_fail(self):
volume = self._create_volume()
self.assertRaises(exception.ProgrammingError,
volume.conditional_update,
{'status': 'deleting',
objects.Snapshot.model.status: 'available'},
{'status': 'available'})
def test_conditional_update_multitable_fail_fields_different_models(self):
volume = self._create_volume()
self.assertRaises(exception.ProgrammingError,
volume.conditional_update,
{objects.Backup.model.status: 'available',
objects.Snapshot.model.status: 'available'})
def test_conditional_update_not_multitable(self):
volume = self._create_volume()
with mock.patch('cinder.db.sqlalchemy.api._create_facade_lazily') as m:
res = volume.conditional_update(
{objects.Volume.model.status: 'deleting',
objects.Volume.model.size: 12}, reflect_changes=False)
self.assertTrue(res)
self.assertTrue(m.called)
class TestCinderDictObject(test_objects.BaseObjectsTestCase):
@objects.base.CinderObjectRegistry.register_if(False)
class TestDictObject(objects.base.CinderObjectDictCompat,
objects.base.CinderObject):
obj_extra_fields = ['foo']
fields = {
'abc': fields.StringField(nullable=True),
'def': fields.IntegerField(nullable=True),
}
@property
def foo(self):
return 42
def test_dict_objects(self):
obj = self.TestDictObject()
self.assertNotIn('non_existing', obj)
self.assertEqual('val', obj.get('abc', 'val'))
self.assertNotIn('abc', obj)
obj.abc = 'val2'
self.assertEqual('val2', obj.get('abc', 'val'))
self.assertEqual(42, obj.get('foo'))
self.assertEqual(42, obj.get('foo', None))
self.assertIn('foo', obj)
self.assertIn('abc', obj)
self.assertNotIn('def', obj)
@mock.patch('cinder.objects.base.OBJ_VERSIONS', fake_objects.MyHistory())
class TestCinderObjectSerializer(test_objects.BaseObjectsTestCase):
BACKPORT_MSG = ('Backporting %(obj_name)s from version %(src_vers)s to '
'version %(dst_vers)s')
def setUp(self):
super(TestCinderObjectSerializer, self).setUp()
self.obj = fake_objects.ChildObject(scheduled_at=None,
uuid=uuid.uuid4(),
text='text',
integer=1)
self.parent = fake_objects.ParentObject(uuid=uuid.uuid4(),
child=self.obj,
scheduled_at=None)
self.parent_list = fake_objects.ParentObjectList(objects=[self.parent])
def test_serialize_init_current_has_no_manifest(self):
"""Test that pinned to current version we have no manifest."""
serializer = objects.base.CinderObjectSerializer('1.6')
# Serializer should not have a manifest
self.assertIsNone(serializer.manifest)
def test_serialize_init_no_cap_has_no_manifest(self):
"""Test that without cap we have no manifest."""
serializer = objects.base.CinderObjectSerializer()
# Serializer should not have a manifest
self.assertIsNone(serializer.manifest)
def test_serialize_init_pinned_has_manifest(self):
"""Test that pinned to older version we have manifest."""
objs_version = '1.5'
serializer = objects.base.CinderObjectSerializer(objs_version)
# Serializer should have the right manifest
self.assertDictEqual(fake_objects.MyHistory()[objs_version],
serializer.manifest)
def test_serialize_entity_unknown_version(self):
"""Test that bad cap version will prevent serializer creation."""
self.assertRaises(exception.CappedVersionUnknown,
objects.base.CinderObjectSerializer, '0.9')
@mock.patch('cinder.objects.base.LOG.debug')
def test_serialize_entity_basic_no_backport(self, log_debug_mock):
"""Test single element serializer with no backport."""
serializer = objects.base.CinderObjectSerializer('1.6')
primitive = serializer.serialize_entity(self.context, self.obj)
self.assertEqual('1.2', primitive['versioned_object.version'])
data = primitive['versioned_object.data']
self.assertEqual(1, data['integer'])
self.assertEqual('text', data['text'])
log_debug_mock.assert_not_called()
@mock.patch('cinder.objects.base.LOG.debug')
def test_serialize_entity_basic_backport(self, log_debug_mock):
"""Test single element serializer with backport."""
serializer = objects.base.CinderObjectSerializer('1.5')
primitive = serializer.serialize_entity(self.context, self.obj)
self.assertEqual('1.1', primitive['versioned_object.version'])
data = primitive['versioned_object.data']
self.assertNotIn('integer', data)
self.assertEqual('text', data['text'])
log_debug_mock.assert_called_once_with(self.BACKPORT_MSG,
{'obj_name': 'ChildObject',
'src_vers': '1.2',
'dst_vers': '1.1'})
@mock.patch('cinder.objects.base.LOG.debug')
def test_serialize_entity_full_no_backport(self, log_debug_mock):
"""Test related elements serialization with no backport."""
serializer = objects.base.CinderObjectSerializer('1.6')
primitive = serializer.serialize_entity(self.context, self.parent_list)
self.assertEqual('1.1', primitive['versioned_object.version'])
parent = primitive['versioned_object.data']['objects'][0]
self.assertEqual('1.1', parent['versioned_object.version'])
child = parent['versioned_object.data']['child']
self.assertEqual('1.2', child['versioned_object.version'])
log_debug_mock.assert_not_called()
@mock.patch('cinder.objects.base.LOG.debug')
def test_serialize_entity_full_backport_last_children(self,
log_debug_mock):
"""Test related elements serialization with backport of the last child.
Test that using the manifest we properly backport a child object even
when all its parents have not changed their version.
"""
serializer = objects.base.CinderObjectSerializer('1.5')
primitive = serializer.serialize_entity(self.context, self.parent_list)
self.assertEqual('1.1', primitive['versioned_object.version'])
parent = primitive['versioned_object.data']['objects'][0]
self.assertEqual('1.1', parent['versioned_object.version'])
# Only the child has been backported
child = parent['versioned_object.data']['child']
self.assertEqual('1.1', child['versioned_object.version'])
# Check that the backport has been properly done
data = child['versioned_object.data']
self.assertNotIn('integer', data)
self.assertEqual('text', data['text'])
log_debug_mock.assert_called_once_with(self.BACKPORT_MSG,
{'obj_name': 'ChildObject',
'src_vers': '1.2',
'dst_vers': '1.1'})
@mock.patch('cinder.objects.base.LOG.debug')
def test_serialize_entity_full_backport(self, log_debug_mock):
"""Test backport of the whole tree of related elements."""
serializer = objects.base.CinderObjectSerializer('1.3')
primitive = serializer.serialize_entity(self.context, self.parent_list)
# List has been backported
self.assertEqual('1.0', primitive['versioned_object.version'])
parent = primitive['versioned_object.data']['objects'][0]
# Parent has been backported as well
self.assertEqual('1.0', parent['versioned_object.version'])
# And the backport has been properly done
data = parent['versioned_object.data']
self.assertNotIn('scheduled_at', data)
# And child as well
child = parent['versioned_object.data']['child']
self.assertEqual('1.1', child['versioned_object.version'])
# Check that the backport has been properly done
data = child['versioned_object.data']
self.assertNotIn('integer', data)
self.assertEqual('text', data['text'])
log_debug_mock.assert_has_calls([
mock.call(self.BACKPORT_MSG, {'obj_name': 'ParentObjectList',
'src_vers': '1.1',
'dst_vers': '1.0'}),
mock.call(self.BACKPORT_MSG, {'obj_name': 'ParentObject',
'src_vers': '1.1',
'dst_vers': '1.0'}),
mock.call(self.BACKPORT_MSG, {'obj_name': 'ChildObject',
'src_vers': '1.2',
'dst_vers': '1.1'})])
| 43.176932 | 79 | 0.629613 |
8b66632e30ab623529c9398c0b7a6277d01d8a43 | 52,058 | py | Python | python/ccxt/cex.py | dougvanzee/ccxt | ed06a2d180e02d1006f33be6ba65407ecb0a831b | [
"MIT"
] | 2 | 2021-05-27T03:58:42.000Z | 2022-02-21T03:24:08.000Z | python/ccxt/cex.py | dougvanzee/ccxt | ed06a2d180e02d1006f33be6ba65407ecb0a831b | [
"MIT"
] | null | null | null | python/ccxt/cex.py | dougvanzee/ccxt | ed06a2d180e02d1006f33be6ba65407ecb0a831b | [
"MIT"
] | 1 | 2022-01-11T07:39:19.000Z | 2022-01-11T07:39:19.000Z | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
import json
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import NullResponse
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import NotSupported
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import InvalidNonce
class cex(Exchange):
def describe(self):
return self.deep_extend(super(cex, self).describe(), {
'id': 'cex',
'name': 'CEX.IO',
'countries': ['GB', 'EU', 'CY', 'RU'],
'rateLimit': 1500,
'has': {
'cancelOrder': True,
'CORS': False,
'createOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchMarkets': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTrades': True,
},
'timeframes': {
'1m': '1m',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766442-8ddc33b0-5ed8-11e7-8b98-f786aef0f3c9.jpg',
'api': 'https://cex.io/api',
'www': 'https://cex.io',
'doc': 'https://cex.io/cex-api',
'fees': [
'https://cex.io/fee-schedule',
'https://cex.io/limits-commissions',
],
'referral': 'https://cex.io/r/0/up105393824/0/',
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
'uid': True,
},
'api': {
'public': {
'get': [
'currency_profile',
'currency_limits/',
'last_price/{pair}/',
'last_prices/{currencies}/',
'ohlcv/hd/{yyyymmdd}/{pair}',
'order_book/{pair}/',
'ticker/{pair}/',
'tickers/{currencies}/',
'trade_history/{pair}/',
],
'post': [
'convert/{pair}',
'price_stats/{pair}',
],
},
'private': {
'post': [
'active_orders_status/',
'archived_orders/{pair}/',
'balance/',
'cancel_order/',
'cancel_orders/{pair}/',
'cancel_replace_order/{pair}/',
'close_position/{pair}/',
'get_address/',
'get_myfee/',
'get_order/',
'get_order_tx/',
'open_orders/{pair}/',
'open_orders/',
'open_position/{pair}/',
'open_positions/{pair}/',
'place_order/{pair}/',
],
},
},
'fees': {
'trading': {
'maker': 0.16 / 100,
'taker': 0.25 / 100,
},
'funding': {
'withdraw': {
# 'USD': None,
# 'EUR': None,
# 'RUB': None,
# 'GBP': None,
'BTC': 0.001,
'ETH': 0.01,
'BCH': 0.001,
'DASH': 0.01,
'BTG': 0.001,
'ZEC': 0.001,
'XRP': 0.02,
},
'deposit': {
# 'USD': amount => amount * 0.035 + 0.25,
# 'EUR': amount => amount * 0.035 + 0.24,
# 'RUB': amount => amount * 0.05 + 15.57,
# 'GBP': amount => amount * 0.035 + 0.2,
'BTC': 0.0,
'ETH': 0.0,
'BCH': 0.0,
'DASH': 0.0,
'BTG': 0.0,
'ZEC': 0.0,
'XRP': 0.0,
'XLM': 0.0,
},
},
},
'exceptions': {
'exact': {},
'broad': {
'Insufficient funds': InsufficientFunds,
'Nonce must be incremented': InvalidNonce,
'Invalid Order': InvalidOrder,
'Order not found': OrderNotFound,
'limit exceeded': RateLimitExceeded, # {"error":"rate limit exceeded"}
'Invalid API key': AuthenticationError,
'There was an error while placing your order': InvalidOrder,
'Sorry, too many clients already': DDoSProtection,
},
},
'options': {
'fetchOHLCVWarning': True,
'createMarketBuyOrderRequiresPrice': True,
'order': {
'status': {
'c': 'canceled',
'd': 'closed',
'cd': 'canceled',
'a': 'open',
},
},
},
})
def fetch_currencies_from_cache(self, params={}):
# self method is now redundant
# currencies are now fetched before markets
options = self.safe_value(self.options, 'fetchCurrencies', {})
timestamp = self.safe_integer(options, 'timestamp')
expires = self.safe_integer(options, 'expires', 1000)
now = self.milliseconds()
if (timestamp is None) or ((now - timestamp) > expires):
response = self.publicGetCurrencyProfile(params)
self.options['fetchCurrencies'] = self.extend(options, {
'response': response,
'timestamp': now,
})
return self.safe_value(self.options['fetchCurrencies'], 'response')
def fetch_currencies(self, params={}):
response = self.fetch_currencies_from_cache(params)
self.options['currencies'] = {
'timestamp': self.milliseconds(),
'response': response,
}
#
# {
# "e":"currency_profile",
# "ok":"ok",
# "data":{
# "symbols":[
# {
# "code":"GHS",
# "contract":true,
# "commodity":true,
# "fiat":false,
# "description":"CEX.IO doesn't provide cloud mining services anymore.",
# "precision":8,
# "scale":0,
# "minimumCurrencyAmount":"0.00000001",
# "minimalWithdrawalAmount":-1
# },
# {
# "code":"BTC",
# "contract":false,
# "commodity":false,
# "fiat":false,
# "description":"",
# "precision":8,
# "scale":0,
# "minimumCurrencyAmount":"0.00000001",
# "minimalWithdrawalAmount":0.002
# },
# {
# "code":"ETH",
# "contract":false,
# "commodity":false,
# "fiat":false,
# "description":"",
# "precision":8,
# "scale":2,
# "minimumCurrencyAmount":"0.00000100",
# "minimalWithdrawalAmount":0.01
# }
# ],
# "pairs":[
# {
# "symbol1":"BTC",
# "symbol2":"USD",
# "pricePrecision":1,
# "priceScale":"/1000000",
# "minLotSize":0.002,
# "minLotSizeS2":20
# },
# {
# "symbol1":"ETH",
# "symbol2":"USD",
# "pricePrecision":2,
# "priceScale":"/10000",
# "minLotSize":0.1,
# "minLotSizeS2":20
# }
# ]
# }
# }
#
data = self.safe_value(response, 'data', [])
currencies = self.safe_value(data, 'symbols', [])
result = {}
for i in range(0, len(currencies)):
currency = currencies[i]
id = self.safe_string(currency, 'code')
code = self.safe_currency_code(id)
precision = self.safe_integer(currency, 'precision')
active = True
result[code] = {
'id': id,
'code': code,
'name': id,
'active': active,
'precision': precision,
'fee': None,
'limits': {
'amount': {
'min': self.safe_number(currency, 'minimumCurrencyAmount'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
'withdraw': {
'min': self.safe_number(currency, 'minimalWithdrawalAmount'),
'max': None,
},
},
'info': currency,
}
return result
def fetch_markets(self, params={}):
currenciesResponse = self.fetch_currencies_from_cache(params)
currenciesData = self.safe_value(currenciesResponse, 'data', {})
currencies = self.safe_value(currenciesData, 'symbols', [])
currenciesById = self.index_by(currencies, 'code')
pairs = self.safe_value(currenciesData, 'pairs', [])
response = self.publicGetCurrencyLimits(params)
#
# {
# "e":"currency_limits",
# "ok":"ok",
# "data": {
# "pairs":[
# {
# "symbol1":"BTC",
# "symbol2":"USD",
# "minLotSize":0.002,
# "minLotSizeS2":20,
# "maxLotSize":30,
# "minPrice":"1500",
# "maxPrice":"35000"
# },
# {
# "symbol1":"BCH",
# "symbol2":"EUR",
# "minLotSize":0.1,
# "minLotSizeS2":20,
# "maxLotSize":null,
# "minPrice":"25",
# "maxPrice":"8192"
# }
# ]
# }
# }
#
result = []
markets = self.safe_value(response['data'], 'pairs')
for i in range(0, len(markets)):
market = markets[i]
baseId = self.safe_string(market, 'symbol1')
quoteId = self.safe_string(market, 'symbol2')
id = baseId + '/' + quoteId
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
baseCurrency = self.safe_value(currenciesById, baseId, {})
quoteCurrency = self.safe_value(currenciesById, quoteId, {})
pricePrecision = self.safe_integer(quoteCurrency, 'precision', 8)
for j in range(0, len(pairs)):
pair = pairs[j]
if (pair['symbol1'] == baseId) and (pair['symbol2'] == quoteId):
# we might need to account for `priceScale` here
pricePrecision = self.safe_integer(pair, 'pricePrecision', pricePrecision)
baseCcyPrecision = self.safe_integer(baseCurrency, 'precision', 8)
baseCcyScale = self.safe_integer(baseCurrency, 'scale', 0)
amountPrecision = baseCcyPrecision - baseCcyScale
precision = {
'amount': amountPrecision,
'price': pricePrecision,
}
result.append({
'id': id,
'info': market,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'precision': precision,
'limits': {
'amount': {
'min': self.safe_number(market, 'minLotSize'),
'max': self.safe_number(market, 'maxLotSize'),
},
'price': {
'min': self.safe_number(market, 'minPrice'),
'max': self.safe_number(market, 'maxPrice'),
},
'cost': {
'min': self.safe_number(market, 'minLotSizeS2'),
'max': None,
},
},
'active': None,
})
return result
def fetch_balance(self, params={}):
self.load_markets()
response = self.privatePostBalance(params)
result = {'info': response}
ommited = ['username', 'timestamp']
balances = self.omit(response, ommited)
currencyIds = list(balances.keys())
for i in range(0, len(currencyIds)):
currencyId = currencyIds[i]
balance = self.safe_value(balances, currencyId, {})
account = self.account()
account['free'] = self.safe_string(balance, 'available')
# https://github.com/ccxt/ccxt/issues/5484
account['used'] = self.safe_string(balance, 'orders', '0')
code = self.safe_currency_code(currencyId)
result[code] = account
return self.parse_balance(result, False)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
request = {
'pair': self.market_id(symbol),
}
if limit is not None:
request['depth'] = limit
response = self.publicGetOrderBookPair(self.extend(request, params))
timestamp = self.safe_timestamp(response, 'timestamp')
return self.parse_order_book(response, timestamp)
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1591403940,
# 0.024972,
# 0.024972,
# 0.024969,
# 0.024969,
# 0.49999900
# ]
#
return [
self.safe_timestamp(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
if since is None:
since = self.milliseconds() - 86400000 # yesterday
else:
if self.options['fetchOHLCVWarning']:
raise ExchangeError(self.id + " fetchOHLCV warning: CEX can return historical candles for a certain date only, self might produce an empty or None reply. Set exchange.options['fetchOHLCVWarning'] = False or add({'options': {'fetchOHLCVWarning': False}}) to constructor params to suppress self warning message.")
ymd = self.ymd(since)
ymd = ymd.split('-')
ymd = ''.join(ymd)
request = {
'pair': market['id'],
'yyyymmdd': ymd,
}
try:
response = self.publicGetOhlcvHdYyyymmddPair(self.extend(request, params))
#
# {
# "time":20200606,
# "data1m":"[[1591403940,0.024972,0.024972,0.024969,0.024969,0.49999900]]",
# }
#
key = 'data' + self.timeframes[timeframe]
data = self.safe_string(response, key)
ohlcvs = json.loads(data)
return self.parse_ohlcvs(ohlcvs, market, timeframe, since, limit)
except Exception as e:
if isinstance(e, NullResponse):
return []
def parse_ticker(self, ticker, market=None):
timestamp = self.safe_timestamp(ticker, 'timestamp')
volume = self.safe_number(ticker, 'volume')
high = self.safe_number(ticker, 'high')
low = self.safe_number(ticker, 'low')
bid = self.safe_number(ticker, 'bid')
ask = self.safe_number(ticker, 'ask')
last = self.safe_number(ticker, 'last')
symbol = None
if market:
symbol = market['symbol']
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': high,
'low': low,
'bid': bid,
'bidVolume': None,
'ask': ask,
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': volume,
'quoteVolume': None,
'info': ticker,
}
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
currencies = list(self.currencies.keys())
request = {
'currencies': '/'.join(currencies),
}
response = self.publicGetTickersCurrencies(self.extend(request, params))
tickers = response['data']
result = {}
for t in range(0, len(tickers)):
ticker = tickers[t]
symbol = ticker['pair'].replace(':', '/')
market = self.markets[symbol]
result[symbol] = self.parse_ticker(ticker, market)
return self.filter_by_array(result, 'symbol', symbols)
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
}
ticker = self.publicGetTickerPair(self.extend(request, params))
return self.parse_ticker(ticker, market)
def parse_trade(self, trade, market=None):
timestamp = self.safe_timestamp(trade, 'date')
id = self.safe_string(trade, 'tid')
type = None
side = self.safe_string(trade, 'type')
price = self.safe_number(trade, 'price')
amount = self.safe_number(trade, 'amount')
cost = None
if amount is not None:
if price is not None:
cost = amount * price
symbol = None
if market is not None:
symbol = market['symbol']
return {
'info': trade,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': type,
'side': side,
'order': None,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': None,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
}
response = self.publicGetTradeHistoryPair(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def create_order(self, symbol, type, side, amount, price=None, params={}):
# for market buy it requires the amount of quote currency to spend
if (type == 'market') and (side == 'buy'):
if self.options['createMarketBuyOrderRequiresPrice']:
if price is None:
raise InvalidOrder(self.id + " createOrder() requires the price argument with market buy orders to calculate total order cost(amount to spend), where cost = amount * price. Supply a price argument to createOrder() call if you want the cost to be calculated for you from price and amount, or, alternatively, add .options['createMarketBuyOrderRequiresPrice'] = False to supply the cost in the amount argument(the exchange-specific behaviour)")
else:
amount = amount * price
self.load_markets()
request = {
'pair': self.market_id(symbol),
'type': side,
'amount': amount,
}
if type == 'limit':
request['price'] = price
else:
request['order_type'] = type
response = self.privatePostPlaceOrderPair(self.extend(request, params))
#
# {
# "id": "12978363524",
# "time": 1586610022259,
# "type": "buy",
# "price": "0.033934",
# "amount": "0.10722802",
# "pending": "0.10722802",
# "complete": False
# }
#
placedAmount = self.safe_number(response, 'amount')
remaining = self.safe_number(response, 'pending')
timestamp = self.safe_value(response, 'time')
complete = self.safe_value(response, 'complete')
status = 'closed' if complete else 'open'
filled = None
if (placedAmount is not None) and (remaining is not None):
filled = max(placedAmount - remaining, 0)
return {
'id': self.safe_string(response, 'id'),
'info': response,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'type': type,
'side': self.safe_string(response, 'type'),
'symbol': symbol,
'status': status,
'price': self.safe_number(response, 'price'),
'amount': placedAmount,
'cost': None,
'average': None,
'remaining': remaining,
'filled': filled,
'fee': None,
'trades': None,
}
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
request = {
'id': id,
}
return self.privatePostCancelOrder(self.extend(request, params))
def parse_order(self, order, market=None):
# Depending on the call, 'time' can be a unix int, unix string or ISO string
# Yes, really
timestamp = self.safe_value(order, 'time')
if isinstance(timestamp, basestring) and timestamp.find('T') >= 0:
# ISO8601 string
timestamp = self.parse8601(timestamp)
else:
# either integer or string integer
timestamp = int(timestamp)
symbol = None
if market is None:
baseId = self.safe_string(order, 'symbol1')
quoteId = self.safe_string(order, 'symbol2')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
if symbol in self.markets:
market = self.market(symbol)
status = self.parse_order_status(self.safe_string(order, 'status'))
price = self.safe_number(order, 'price')
amount = self.safe_number(order, 'amount')
# sell orders can have a negative amount
# https://github.com/ccxt/ccxt/issues/5338
if amount is not None:
amount = abs(amount)
remaining = self.safe_number_2(order, 'pending', 'remains')
filled = amount - remaining
fee = None
cost = None
if market is not None:
symbol = market['symbol']
taCost = self.safe_number(order, 'ta:' + market['quote'])
ttaCost = self.safe_number(order, 'tta:' + market['quote'])
cost = self.sum(taCost, ttaCost)
baseFee = 'fa:' + market['base']
baseTakerFee = 'tfa:' + market['base']
quoteFee = 'fa:' + market['quote']
quoteTakerFee = 'tfa:' + market['quote']
feeRate = self.safe_number(order, 'tradingFeeMaker')
if not feeRate:
feeRate = self.safe_number(order, 'tradingFeeTaker', feeRate)
if feeRate:
feeRate /= 100.0 # convert to mathematically-correct percentage coefficients: 1.0 = 100%
if (baseFee in order) or (baseTakerFee in order):
baseFeeCost = self.safe_number_2(order, baseFee, baseTakerFee)
fee = {
'currency': market['base'],
'rate': feeRate,
'cost': baseFeeCost,
}
elif (quoteFee in order) or (quoteTakerFee in order):
quoteFeeCost = self.safe_number_2(order, quoteFee, quoteTakerFee)
fee = {
'currency': market['quote'],
'rate': feeRate,
'cost': quoteFeeCost,
}
if not cost:
cost = price * filled
side = order['type']
trades = None
orderId = order['id']
if 'vtx' in order:
trades = []
for i in range(0, len(order['vtx'])):
item = order['vtx'][i]
tradeSide = self.safe_string(item, 'type')
if tradeSide == 'cancel':
# looks like self might represent the cancelled part of an order
# {id: '4426729543',
# type: 'cancel',
# time: '2017-09-22T00:24:30.476Z',
# user: 'up106404164',
# c: 'user:up106404164:a:BCH',
# d: 'order:4426728375:a:BCH',
# a: '0.09935956',
# amount: '0.09935956',
# balance: '0.42580261',
# symbol: 'BCH',
# order: '4426728375',
# buy: null,
# sell: null,
# pair: null,
# pos: null,
# cs: '0.42580261',
# ds: 0}
continue
tradePrice = self.safe_number(item, 'price')
if tradePrice is None:
# self represents the order
# {
# "a": "0.47000000",
# "c": "user:up106404164:a:EUR",
# "d": "order:6065499239:a:EUR",
# "cs": "1432.93",
# "ds": "476.72",
# "id": "6065499249",
# "buy": null,
# "pos": null,
# "pair": null,
# "sell": null,
# "time": "2018-04-22T13:07:22.152Z",
# "type": "buy",
# "user": "up106404164",
# "order": "6065499239",
# "amount": "-715.97000000",
# "symbol": "EUR",
# "balance": "1432.93000000"}
continue
# todo: deal with these
if tradeSide == 'costsNothing':
continue
# --
# if side != tradeSide:
# raise Error(json.dumps(order, null, 2))
# if orderId != item['order']:
# raise Error(json.dumps(order, null, 2))
# --
# partial buy trade
# {
# "a": "0.01589885",
# "c": "user:up106404164:a:BTC",
# "d": "order:6065499239:a:BTC",
# "cs": "0.36300000",
# "ds": 0,
# "id": "6067991213",
# "buy": "6065499239",
# "pos": null,
# "pair": null,
# "sell": "6067991206",
# "time": "2018-04-22T23:09:11.773Z",
# "type": "buy",
# "user": "up106404164",
# "order": "6065499239",
# "price": 7146.5,
# "amount": "0.01589885",
# "symbol": "BTC",
# "balance": "0.36300000",
# "symbol2": "EUR",
# "fee_amount": "0.19"}
# --
# trade with zero amount, but non-zero fee
# {
# "a": "0.00000000",
# "c": "user:up106404164:a:EUR",
# "d": "order:5840654423:a:EUR",
# "cs": 559744,
# "ds": 0,
# "id": "5840654429",
# "buy": "5807238573",
# "pos": null,
# "pair": null,
# "sell": "5840654423",
# "time": "2018-03-15T03:20:14.010Z",
# "type": "sell",
# "user": "up106404164",
# "order": "5840654423",
# "price": 730,
# "amount": "0.00000000",
# "symbol": "EUR",
# "balance": "5597.44000000",
# "symbol2": "BCH",
# "fee_amount": "0.01"}
# --
# trade which should have an amount of exactly 0.002BTC
# {
# "a": "16.70000000",
# "c": "user:up106404164:a:GBP",
# "d": "order:9927386681:a:GBP",
# "cs": "86.90",
# "ds": 0,
# "id": "9927401610",
# "buy": "9927401601",
# "pos": null,
# "pair": null,
# "sell": "9927386681",
# "time": "2019-08-21T15:25:37.777Z",
# "type": "sell",
# "user": "up106404164",
# "order": "9927386681",
# "price": 8365,
# "amount": "16.70000000",
# "office": "UK",
# "symbol": "GBP",
# "balance": "86.90000000",
# "symbol2": "BTC",
# "fee_amount": "0.03"
# }
tradeTimestamp = self.parse8601(self.safe_string(item, 'time'))
tradeAmount = self.safe_number(item, 'amount')
feeCost = self.safe_number(item, 'fee_amount')
absTradeAmount = -tradeAmount if (tradeAmount < 0) else tradeAmount
tradeCost = None
if tradeSide == 'sell':
tradeCost = absTradeAmount
absTradeAmount = self.sum(feeCost, tradeCost) / tradePrice
else:
tradeCost = absTradeAmount * tradePrice
trades.append({
'id': self.safe_string(item, 'id'),
'timestamp': tradeTimestamp,
'datetime': self.iso8601(tradeTimestamp),
'order': orderId,
'symbol': symbol,
'price': tradePrice,
'amount': absTradeAmount,
'cost': tradeCost,
'side': tradeSide,
'fee': {
'cost': feeCost,
'currency': market['quote'],
},
'info': item,
'type': None,
'takerOrMaker': None,
})
return {
'id': orderId,
'clientOrderId': None,
'datetime': self.iso8601(timestamp),
'timestamp': timestamp,
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': 'market' if (price is None) else 'limit',
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': None,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': remaining,
'trades': trades,
'fee': fee,
'info': order,
'average': None,
}
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {}
method = 'privatePostOpenOrders'
market = None
if symbol is not None:
market = self.market(symbol)
request['pair'] = market['id']
method += 'Pair'
orders = getattr(self, method)(self.extend(request, params))
for i in range(0, len(orders)):
orders[i] = self.extend(orders[i], {'status': 'open'})
return self.parse_orders(orders, market, since, limit)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
method = 'privatePostArchivedOrdersPair'
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchClosedOrders() requires a symbol argument')
market = self.market(symbol)
request = {'pair': market['id']}
response = getattr(self, method)(self.extend(request, params))
return self.parse_orders(response, market, since, limit)
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
request = {
'id': str(id),
}
response = self.privatePostGetOrderTx(self.extend(request, params))
data = self.safe_value(response, 'data', {})
#
# {
# "id": "5442731603",
# "type": "sell",
# "time": 1516132358071,
# "lastTxTime": 1516132378452,
# "lastTx": "5442734452",
# "pos": null,
# "user": "up106404164",
# "status": "d",
# "symbol1": "ETH",
# "symbol2": "EUR",
# "amount": "0.50000000",
# "kind": "api",
# "price": "923.3386",
# "tfacf": "1",
# "fa:EUR": "0.55",
# "ta:EUR": "369.77",
# "remains": "0.00000000",
# "tfa:EUR": "0.22",
# "tta:EUR": "91.95",
# "a:ETH:cds": "0.50000000",
# "a:EUR:cds": "461.72",
# "f:EUR:cds": "0.77",
# "tradingFeeMaker": "0.15",
# "tradingFeeTaker": "0.23",
# "tradingFeeStrategy": "userVolumeAmount",
# "tradingFeeUserVolumeAmount": "2896912572",
# "orderId": "5442731603",
# "next": False,
# "vtx": [
# {
# "id": "5442734452",
# "type": "sell",
# "time": "2018-01-16T19:52:58.452Z",
# "user": "up106404164",
# "c": "user:up106404164:a:EUR",
# "d": "order:5442731603:a:EUR",
# "a": "104.53000000",
# "amount": "104.53000000",
# "balance": "932.71000000",
# "symbol": "EUR",
# "order": "5442731603",
# "buy": "5442734443",
# "sell": "5442731603",
# "pair": null,
# "pos": null,
# "office": null,
# "cs": "932.71",
# "ds": 0,
# "price": 923.3386,
# "symbol2": "ETH",
# "fee_amount": "0.16"
# },
# {
# "id": "5442731609",
# "type": "sell",
# "time": "2018-01-16T19:52:38.071Z",
# "user": "up106404164",
# "c": "user:up106404164:a:EUR",
# "d": "order:5442731603:a:EUR",
# "a": "91.73000000",
# "amount": "91.73000000",
# "balance": "563.49000000",
# "symbol": "EUR",
# "order": "5442731603",
# "buy": "5442618127",
# "sell": "5442731603",
# "pair": null,
# "pos": null,
# "office": null,
# "cs": "563.49",
# "ds": 0,
# "price": 924.0092,
# "symbol2": "ETH",
# "fee_amount": "0.22"
# },
# {
# "id": "5442731604",
# "type": "sell",
# "time": "2018-01-16T19:52:38.071Z",
# "user": "up106404164",
# "c": "order:5442731603:a:ETH",
# "d": "user:up106404164:a:ETH",
# "a": "0.50000000",
# "amount": "-0.50000000",
# "balance": "15.80995000",
# "symbol": "ETH",
# "order": "5442731603",
# "buy": null,
# "sell": null,
# "pair": null,
# "pos": null,
# "office": null,
# "cs": "0.50000000",
# "ds": "15.80995000"
# }
# ]
# }
#
return self.parse_order(data)
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'limit': limit,
'pair': market['id'],
'dateFrom': since,
}
response = self.privatePostArchivedOrdersPair(self.extend(request, params))
results = []
for i in range(0, len(response)):
# cancelled(unfilled):
# {id: '4005785516',
# type: 'sell',
# time: '2017-07-18T19:08:34.223Z',
# lastTxTime: '2017-07-18T19:08:34.396Z',
# lastTx: '4005785522',
# pos: null,
# status: 'c',
# symbol1: 'ETH',
# symbol2: 'GBP',
# amount: '0.20000000',
# price: '200.5625',
# remains: '0.20000000',
# 'a:ETH:cds': '0.20000000',
# tradingFeeMaker: '0',
# tradingFeeTaker: '0.16',
# tradingFeeUserVolumeAmount: '10155061217',
# orderId: '4005785516'}
# --
# cancelled(partially filled buy):
# {id: '4084911657',
# type: 'buy',
# time: '2017-08-05T03:18:39.596Z',
# lastTxTime: '2019-03-19T17:37:46.404Z',
# lastTx: '8459265833',
# pos: null,
# status: 'cd',
# symbol1: 'BTC',
# symbol2: 'GBP',
# amount: '0.05000000',
# price: '2241.4692',
# tfacf: '1',
# remains: '0.03910535',
# 'tfa:GBP': '0.04',
# 'tta:GBP': '24.39',
# 'a:BTC:cds': '0.01089465',
# 'a:GBP:cds': '112.26',
# 'f:GBP:cds': '0.04',
# tradingFeeMaker: '0',
# tradingFeeTaker: '0.16',
# tradingFeeUserVolumeAmount: '13336396963',
# orderId: '4084911657'}
# --
# cancelled(partially filled sell):
# {id: '4426728375',
# type: 'sell',
# time: '2017-09-22T00:24:20.126Z',
# lastTxTime: '2017-09-22T00:24:30.476Z',
# lastTx: '4426729543',
# pos: null,
# status: 'cd',
# symbol1: 'BCH',
# symbol2: 'BTC',
# amount: '0.10000000',
# price: '0.11757182',
# tfacf: '1',
# remains: '0.09935956',
# 'tfa:BTC': '0.00000014',
# 'tta:BTC': '0.00007537',
# 'a:BCH:cds': '0.10000000',
# 'a:BTC:cds': '0.00007537',
# 'f:BTC:cds': '0.00000014',
# tradingFeeMaker: '0',
# tradingFeeTaker: '0.18',
# tradingFeeUserVolumeAmount: '3466715450',
# orderId: '4426728375'}
# --
# filled:
# {id: '5342275378',
# type: 'sell',
# time: '2018-01-04T00:28:12.992Z',
# lastTxTime: '2018-01-04T00:28:12.992Z',
# lastTx: '5342275393',
# pos: null,
# status: 'd',
# symbol1: 'BCH',
# symbol2: 'BTC',
# amount: '0.10000000',
# kind: 'api',
# price: '0.17',
# remains: '0.00000000',
# 'tfa:BTC': '0.00003902',
# 'tta:BTC': '0.01699999',
# 'a:BCH:cds': '0.10000000',
# 'a:BTC:cds': '0.01699999',
# 'f:BTC:cds': '0.00003902',
# tradingFeeMaker: '0.15',
# tradingFeeTaker: '0.23',
# tradingFeeUserVolumeAmount: '1525951128',
# orderId: '5342275378'}
# --
# market order(buy):
# {"id": "6281946200",
# "pos": null,
# "time": "2018-05-23T11:55:43.467Z",
# "type": "buy",
# "amount": "0.00000000",
# "lastTx": "6281946210",
# "status": "d",
# "amount2": "20.00",
# "orderId": "6281946200",
# "remains": "0.00000000",
# "symbol1": "ETH",
# "symbol2": "EUR",
# "tfa:EUR": "0.05",
# "tta:EUR": "19.94",
# "a:ETH:cds": "0.03764100",
# "a:EUR:cds": "20.00",
# "f:EUR:cds": "0.05",
# "lastTxTime": "2018-05-23T11:55:43.467Z",
# "tradingFeeTaker": "0.25",
# "tradingFeeUserVolumeAmount": "55998097"}
# --
# market order(sell):
# {"id": "6282200948",
# "pos": null,
# "time": "2018-05-23T12:42:58.315Z",
# "type": "sell",
# "amount": "-0.05000000",
# "lastTx": "6282200958",
# "status": "d",
# "orderId": "6282200948",
# "remains": "0.00000000",
# "symbol1": "ETH",
# "symbol2": "EUR",
# "tfa:EUR": "0.07",
# "tta:EUR": "26.49",
# "a:ETH:cds": "0.05000000",
# "a:EUR:cds": "26.49",
# "f:EUR:cds": "0.07",
# "lastTxTime": "2018-05-23T12:42:58.315Z",
# "tradingFeeTaker": "0.25",
# "tradingFeeUserVolumeAmount": "56294576"}
order = response[i]
status = self.parse_order_status(self.safe_string(order, 'status'))
baseId = self.safe_string(order, 'symbol1')
quoteId = self.safe_string(order, 'symbol2')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
side = self.safe_string(order, 'type')
baseAmount = self.safe_number(order, 'a:' + baseId + ':cds')
quoteAmount = self.safe_number(order, 'a:' + quoteId + ':cds')
fee = self.safe_number(order, 'f:' + quoteId + ':cds')
amount = self.safe_number(order, 'amount')
price = self.safe_number(order, 'price')
remaining = self.safe_number(order, 'remains')
filled = amount - remaining
orderAmount = None
cost = None
average = None
type = None
if not price:
type = 'market'
orderAmount = baseAmount
cost = quoteAmount
average = orderAmount / cost
else:
ta = self.safe_number(order, 'ta:' + quoteId, 0)
tta = self.safe_number(order, 'tta:' + quoteId, 0)
fa = self.safe_number(order, 'fa:' + quoteId, 0)
tfa = self.safe_number(order, 'tfa:' + quoteId, 0)
if side == 'sell':
cost = self.sum(self.sum(ta, tta), self.sum(fa, tfa))
else:
cost = self.sum(ta, tta) - self.sum(fa, tfa)
type = 'limit'
orderAmount = amount
average = cost / filled
time = self.safe_string(order, 'time')
lastTxTime = self.safe_string(order, 'lastTxTime')
timestamp = self.parse8601(time)
results.append({
'id': self.safe_string(order, 'id'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastUpdated': self.parse8601(lastTxTime),
'status': status,
'symbol': symbol,
'side': side,
'price': price,
'amount': orderAmount,
'average': average,
'type': type,
'filled': filled,
'cost': cost,
'remaining': remaining,
'fee': {
'cost': fee,
'currency': quote,
},
'info': order,
})
return results
def parse_order_status(self, status):
return self.safe_string(self.options['order']['status'], status, status)
def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
if amount is None:
raise ArgumentsRequired(self.id + ' editOrder() requires a amount argument')
if price is None:
raise ArgumentsRequired(self.id + ' editOrder() requires a price argument')
self.load_markets()
market = self.market(symbol)
# see: https://cex.io/rest-api#/definitions/CancelReplaceOrderRequest
request = {
'pair': market['id'],
'type': side,
'amount': amount,
'price': price,
'order_id': id,
}
response = self.privatePostCancelReplaceOrderPair(self.extend(request, params))
return self.parse_order(response, market)
def fetch_deposit_address(self, code, params={}):
if code == 'XRP' or code == 'XLM':
# https://github.com/ccxt/ccxt/pull/2327#issuecomment-375204856
raise NotSupported(self.id + ' fetchDepositAddress does not support XRP and XLM addresses yet(awaiting docs from CEX.io)')
self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
}
response = self.privatePostGetAddress(self.extend(request, params))
address = self.safe_string(response, 'data')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': None,
'info': response,
}
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
nonce = str(self.nonce())
auth = nonce + self.uid + self.apiKey
signature = self.hmac(self.encode(auth), self.encode(self.secret))
body = self.json(self.extend({
'key': self.apiKey,
'signature': signature.upper(),
'nonce': nonce,
}, query))
headers = {
'Content-Type': 'application/json',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if isinstance(response, list):
return response # public endpoints may return []-arrays
if body == 'true':
return
if response is None:
raise NullResponse(self.id + ' returned ' + self.json(response))
if 'e' in response:
if 'ok' in response:
if response['ok'] == 'ok':
return
if 'error' in response:
message = self.safe_string(response, 'error')
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], message, feedback)
raise ExchangeError(feedback)
| 40.449106 | 461 | 0.424719 |
cff3ef3ad81e8dd92a99bb4ffbd1d3069799ad35 | 4,458 | py | Python | api_utils.py | fastent/fastent | ce49b250e7c5b2c475b981307f11a0595fc96a89 | [
"MIT"
] | 8 | 2018-04-16T09:12:59.000Z | 2020-12-08T12:35:56.000Z | api_utils.py | fastent/fastent | ce49b250e7c5b2c475b981307f11a0595fc96a89 | [
"MIT"
] | 10 | 2018-01-23T16:03:51.000Z | 2021-03-31T18:35:22.000Z | api_utils.py | fastent/fastent | ce49b250e7c5b2c475b981307f11a0595fc96a89 | [
"MIT"
] | 3 | 2018-06-26T18:19:38.000Z | 2021-08-09T14:16:45.000Z | import subprocess
import sys
import argparse
import requests
import shutil
import lxml.html as LH
import pandas as pd
import urllib.request
from .fast_utils import exact_word_match
class DownloadError(Exception):
def __init__(self, output):
self.output = output
def spacy_model_download(model_name, timeout = None):
"""
Downloads a spacy model with name
Args:
model_name (str): The model name for download
Returns:
(void) : download in the designated folder of fastent
"""
try :
if sys.version_info <=(3,4):
arguments = [python_exec, "-m",'spacy','download',model_name]
subprocess.call(arguments, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# process = subprocess.call(arguments, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
#
# output_cont = process.stdout.decode("ISO-8859-1", "ignore")
#
# if not exact_word_match('Successfully',output_cont):
# raise DownloadError(process.stdout.decode("ISO-8859-1", "ignore"))
# else:
# return filename.group(1)
else:
arguments = [python_exec(), '-m','spacy','download', model_name]
print("Dowload for model {} started".format(model_name))
process = subprocess.run(arguments, stdout=subprocess.PIPE, stderr=subprocess.PIPE, timeout=timeout)
output_cont = process.stdout.decode("ISO-8859-1", "ignore")
print("Dowload for model {} ended".format(model_name))
if not exact_word_match('Successfully',output_cont):
raise DownloadError(process.stdout.decode("ISO-8859-1", "ignore"))
else:
return output_cont
except (DownloadError, Exception) as e:
print(e)
def fasttext_list():
"""
Return a Dictionary of the possible fasttext models
Args:
None:
Returns:
diction_frac(dict) : Language to Model dictionary
"""
diction_frac = {}
try:
content = requests.get("https://github.com/facebookresearch/fastText/blob/master/pretrained-vectors.md").content
webpage = LH.fromstring(content)
allRefs = webpage.xpath('//a/@href')
allRefs = [i for i in allRefs if 'amazonaws' in i and not 'zip' in i]
allRefs
df = pd.read_html(content)
df = df[-1]
assert(len(allRefs) == len(df['Unnamed: 0']) + len(df['Unnamed: 1'])+len(df['Unnamed: 2']))
for i in range(len(allRefs)):
if i%3 == 0:
diction_frac[df['Unnamed: 0'][int(i/3)]] = allRefs[i]
if i%3 == 1:
diction_frac[df['Unnamed: 1'][int(i/3)]] = allRefs[i]
if i%3 == 2:
diction_frac[df['Unnamed: 2'][int(i/3)]] = allRefs[i]
except Exception as e:
print(e)
return None
return diction_frac
def fasttext_dowload(language_name, timeout = None):
"""
Downloads a fasttext model with language name
Args:
language_name (str): The language name for download
Returns:
(void) : download in the designated language model to fastent folder
"""
try:
full_lang_dict = fasttext_list()
url = ''
for key in full_lang_dict:
if language_name.lower() in key.lower():
url = full_lang_dict[key]
file_name = url.split('/')[-1]
with urllib.request.urlopen(url) as response, open(file_name, 'wb') as out_file:
shutil.copyfileobj(response, out_file)
except Exception as e:
print(e)
def python_exec():
if sys.version_info <(3,):
return 'python'
return 'python3'
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='API options')
parser.add_argument('-l', action="store", dest = 'location', help = 'Location of the model, i.e gensim, spacy, fastText etc etc')
parser.add_argument('-m', action="store", type=str, dest = 'model_name', help ='designated model name')
parser.add_argument('-t', action="store", type=str, dest = 'timeout', help ='timeout', default = None)
results = parser.parse_args()
print(results)
if 'spacy' in results.location.lower():
spacy_model_download(results.model_name, results.timeout)
if 'fasttext' in results.location.lower():
fasttext_dowload(results.model_name, results.timeout)
| 31.174825 | 133 | 0.614177 |
3669666af0d8011cb6caa4f388cfa37c0bd8a35c | 7,588 | py | Python | tests/conftest.py | 722C/django-prices-taxjar | b9d586fda740fe1539620a1950f565987d7597df | [
"BSD-3-Clause"
] | null | null | null | tests/conftest.py | 722C/django-prices-taxjar | b9d586fda740fe1539620a1950f565987d7597df | [
"BSD-3-Clause"
] | null | null | null | tests/conftest.py | 722C/django-prices-taxjar | b9d586fda740fe1539620a1950f565987d7597df | [
"BSD-3-Clause"
] | null | null | null | import os
import django
import pytest
def pytest_configure():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tests.settings')
django.setup()
@pytest.fixture
def json_error():
data = {'success': False, 'error': {'info': 'Invalid json'}}
return data
@pytest.fixture
def json_success():
data = {
"summary_rates": [
{
"country_code": "US",
"country": "United States",
"region_code": "CA",
"region": "California",
"minimum_rate": {
"label": "State Tax",
"rate": 0.065
},
"average_rate": {
"label": "Tax",
"rate": 0.0827
}
},
{
"country_code": "CA",
"country": "Canada",
"region_code": "BC",
"region": "British Columbia",
"minimum_rate": {
"label": "GST",
"rate": 0.05
},
"average_rate": {
"label": "PST",
"rate": 0.12
}
},
{
"country_code": "UK",
"country": "United Kingdom",
"region_code": None,
"region": None,
"minimum_rate": {
"label": "VAT",
"rate": 0.2
},
"average_rate": {
"label": "VAT",
"rate": 0.2
}
}
]
}
return data
@pytest.fixture
def json_types_success():
data = {
"categories": [
{
"name": "Clothing",
"product_tax_code": "20010",
"description": " All human wearing apparel suitable for general use"
},
{
"name": "Software as a Service",
"product_tax_code": "30070",
"description": "Pre-written software, delivered electronically, but access remotely."
},
{
"name": "Digital Goods",
"product_tax_code": "31000",
"description": "Digital products transferred electronically, meaning obtained by the purchaser by means other than tangible storage media."
},
{
"name": "Candy",
"product_tax_code": "40010",
"description": "Candy and similar items"
},
{
"name": "Supplements",
"product_tax_code": "40020",
"description": "Non-food dietary supplements"
},
{
"name": "Food & Groceries",
"product_tax_code": "40030",
"description": "Food for humans consumption, unprepared"
},
{
"name": "Soft Drinks",
"product_tax_code": "40050",
"description": "Soft drinks, soda, and other similar beverages. Does not include fruit juices and water."
},
{
"name": "Bottled Water",
"product_tax_code": "40060",
"description": "Bottled, drinkable water for human consumption."
},
{
"name": "Prepared Foods",
"product_tax_code": "41000",
"description": "Foods intended for on-site consumption. Ex. Restaurant meals."
},
{
"name": "Non-Prescription",
"product_tax_code": "51010",
"description": "Drugs for human use without a prescription"
},
{
"name": "Prescription",
"product_tax_code": "51020",
"description": "Drugs for human use with a prescription"
},
{
"name": "Books",
"product_tax_code": "81100",
"description": "Books, printed"
},
{
"name": "Textbook",
"product_tax_code": "81110",
"description": "Textbooks, printed"
},
{
"name": "Religious Books",
"product_tax_code": "81120",
"description": "Religious books and manuals, printed"
},
{
"name": "Magazines & Subscriptions",
"product_tax_code": "81300",
"description": "Periodicals, printed, sold by subscription"
},
{
"name": "Magazine",
"product_tax_code": "81310",
"description": "Periodicals, printed, sold individually"
},
{
"name": "Other Exempt",
"product_tax_code": "99999",
"description": "Item is exempt"
}
]
}
return data
@pytest.fixture
def json_success_for_address():
data = {
"rate": {
"zip": "05495-2086",
"country": "US",
"country_rate": "0.0",
"state": "VT",
"state_rate": "0.06",
"county": "CHITTENDEN",
"county_rate": "0.0",
"city": "WILLISTON",
"city_rate": "0.0",
"combined_district_rate": "0.01",
"combined_rate": "0.07",
"freight_taxable": True
}
}
return data
@pytest.fixture
def json_success_for_order():
data = {
"tax": {
"order_total_amount": 16.5,
"shipping": 1.5,
"taxable_amount": 15,
"amount_to_collect": 1.35,
"rate": 0.09,
"has_nexus": True,
"freight_taxable": False,
"tax_source": "destination",
"breakdown": {
"taxable_amount": 15,
"tax_collectable": 1.35,
"combined_tax_rate": 0.09,
"state_taxable_amount": 15,
"state_tax_rate": 0.0625,
"state_tax_collectable": 0.94,
"county_taxable_amount": 15,
"county_tax_rate": 0.0025,
"county_tax_collectable": 0.04,
"city_taxable_amount": 0,
"city_tax_rate": 0,
"city_tax_collectable": 0,
"special_district_taxable_amount": 15,
"special_tax_rate": 0.025,
"special_district_tax_collectable": 0.38,
"line_items": [
{
"id": "1",
"taxable_amount": 15,
"tax_collectable": 1.35,
"combined_tax_rate": 0.09,
"state_taxable_amount": 15,
"state_sales_tax_rate": 0.0625,
"state_amount": 0.94,
"county_taxable_amount": 15,
"county_tax_rate": 0.0025,
"county_amount": 0.04,
"city_taxable_amount": 0,
"city_tax_rate": 0,
"city_amount": 0,
"special_district_taxable_amount": 15,
"special_tax_rate": 0.025,
"special_district_amount": 0.38
}
]
}
}
}
return data
| 32.152542 | 155 | 0.418687 |
3da19c1b66db9ba1cf5db7075cb9b8558f665d75 | 8,810 | py | Python | Projects/DeepLearningTechniques/ShakeNet/imagenet/model.py | Tim232/Python-Things | 05f0f373a4cf298e70d9668c88a6e3a9d1cd8146 | [
"MIT"
] | 2 | 2020-12-05T07:42:55.000Z | 2021-01-06T23:23:18.000Z | Projects/DeepLearningTechniques/ShakeNet/imagenet/model.py | Tim232/Python-Things | 05f0f373a4cf298e70d9668c88a6e3a9d1cd8146 | [
"MIT"
] | null | null | null | Projects/DeepLearningTechniques/ShakeNet/imagenet/model.py | Tim232/Python-Things | 05f0f373a4cf298e70d9668c88a6e3a9d1cd8146 | [
"MIT"
] | null | null | null | import tensorflow.contrib.slim as slim
from Projects.DeepLearningTechniques.ShakeNet.imagenet.constants import *
class Model:
def __init__(self, sess, width, height, channel, lr, dr, is_training, is_tb_logging, name):
self.sess = sess
self.width = width
self.height = height
self.channel = channel
self.lr = lr
self.dr = dr
self.is_training = is_training
self.is_tb_logging = is_tb_logging
self.name = name
self.weights_initializers = tf.contrib.layers.xavier_initializer(uniform=False)
self.weights_regularizers = tf.contrib.layers.l2_regularizer(scale=flags.FLAGS.l2_scale)
self.summary_values = []
self._build_graph()
def _build_graph(self):
with tf.variable_scope(name_or_scope=self.name, reuse=tf.AUTO_REUSE):
with tf.variable_scope(name_or_scope='input_scope'):
self.x = tf.placeholder(dtype=tf.float32, shape=[None, self.height, self.width, self.channel], name='x')
self.y = tf.placeholder(dtype=tf.int64, shape=[None], name='y')
with tf.variable_scope(name_or_scope='body_scope'):
layer = self.conv2d(inputs=self.x, filters=32, kernel_size=3, strides=2, name='conv2d_0')
layer = self.batch_norm(inputs=layer, name='conv2d_0_batch')
layer = self.inverted_bottleneck(inputs=layer, filters=16, strides=1, repeat=1, factor=1, name='bottleneck_1')
layer = self.inverted_bottleneck(inputs=layer, filters=24, strides=2, repeat=2, factor=4, name='bottleneck_2')
layer = self.inverted_bottleneck(inputs=layer, filters=32, strides=2, repeat=3, factor=4, name='bottleneck_3')
layer = self.inverted_bottleneck(inputs=layer, filters=64, strides=2, repeat=4, factor=4, name='bottleneck_4')
layer = self.inverted_bottleneck(inputs=layer, filters=96, strides=1, repeat=1, factor=4, name='bottleneck_5')
layer = self.inverted_bottleneck(inputs=layer, filters=160, strides=2, repeat=3, factor=6, name='bottleneck_6')
layer = self.inverted_bottleneck(inputs=layer, filters=320, strides=1, repeat=1, factor=6, name='bottleneck_7')
if self.is_tb_logging:
self.summary_values.append(tf.summary.histogram('bottleneck_module', layer))
layer = self.conv2d(inputs=layer, filters=1280, name='conv2d_8')
layer = self.batch_norm(inputs=layer, name='conv2d_8_batch')
self.cam_layer = layer
layer = self.dropout(inputs=layer, rate=flags.FLAGS.dropout_rate, name='conv2d_8_dropout')
layer = tf.layers.average_pooling2d(inputs=layer, pool_size=7, strides=1, name='conv2d_8_avg_pool')
layer = self.conv2d(inputs=layer, filters=flags.FLAGS.image_class, name='conv2d_8_output')
self.logits = tf.squeeze(input=layer, axis=[1, 2], name='logits')
with tf.variable_scope(name_or_scope='output_scope'):
self.variables = [var for var in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) if self.name in var.name]
self.prob = tf.nn.softmax(logits=self.logits, name='softmax')
self.loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits, labels=self.y, name='ce_loss'))
self.loss = tf.add_n([self.loss] +
[var for var in tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) if self.name in var.name], name='tot_loss')
self.accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.logits, -1), self.y), dtype=tf.float32))
if self.is_tb_logging:
self.summary_values.append(tf.summary.scalar('loss', self.loss))
self.summary_values.append(tf.summary.scalar('accuracy', self.accuracy))
self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.lr)
update_opt = [var for var in tf.get_collection(tf.GraphKeys.UPDATE_OPS) if self.name in var.name]
with tf.control_dependencies(update_opt):
self.train_op = self.optimizer.minimize(self.loss, var_list=self.variables)
if self.is_tb_logging:
self.summary_merged_values = tf.summary.merge(inputs=self.summary_values)
def batch_norm(self, inputs, act=tf.nn.relu6, name='batch_norm_layer'):
'''
Batch Normalization
- scale=True, scale factor(gamma) 를 사용
- center=True, shift factor(beta) 를 사용
'''
with tf.variable_scope(name_or_scope=name):
return tf.contrib.layers.batch_norm(inputs=inputs, decay=0.9, center=True, scale=True, fused=True,
updates_collections=tf.GraphKeys.UPDATE_OPS, activation_fn=act,
is_training=self.is_training, scope='batch_norm')
def conv2d(self, inputs, filters, kernel_size=1, strides=1, padding='same', act=tf.identity, name='conv2d_layer'):
return tf.layers.conv2d(inputs=inputs, filters=filters, kernel_size=kernel_size, strides=strides,
padding=padding, activation=act,
kernel_initializer=self.weights_initializers,
bias_initializer=self.weights_initializers,
kernel_regularizer=self.weights_regularizers,
bias_regularizer=self.weights_regularizers,
name=name)
def dropout(self, inputs, rate, name):
with tf.variable_scope(name_or_scope=name):
return tf.layers.dropout(inputs=inputs, rate=rate, training=self.is_training, name='dropout')
def depthwise_conv2d(self, inputs, kernel_size=3, strides=2, padding='SAME', depth_multiplier=1, name=None):
layer = slim.separable_conv2d(inputs=inputs, num_outputs=None, kernel_size=kernel_size, activation_fn=tf.identity,
weights_initializer=self.weights_initializers, weights_regularizer=self.weights_regularizers,
depth_multiplier=depth_multiplier, stride=strides, padding=padding, scope=name)
return layer
def inverted_bottleneck(self, inputs, filters, strides, repeat, factor, name=None):
def _mobilenet_block(inputs, input_filters, output_filters, strides, name):
with tf.variable_scope(name_or_scope=name):
layer = self.conv2d(inputs=inputs, filters=input_filters * factor, name='bottleneck_layer')
layer = self.batch_norm(inputs=layer, name='bottleneck_batch')
layer = self.depthwise_conv2d(inputs=layer, strides=strides, name='depthwise_layer')
layer = self.batch_norm(inputs=layer, name='depthwise_batch')
layer = self.conv2d(inputs=layer, filters=output_filters, name='linear_layer')
layer = self.batch_norm(inputs=layer, act=tf.identity, name='linear_batch')
return layer
prev_layer = inputs
input_filters = inputs.get_shape().as_list()[-1]
with tf.variable_scope(name_or_scope=name):
for idx in range(repeat):
layer = _mobilenet_block(inputs=prev_layer, input_filters=input_filters, output_filters=filters,
strides=strides, name='mobilenet_block_{}'.format(idx))
'''inverted_bottleneck 내의 첫 번째 layer 가 strides=2 인 경우 shortcut connection 생략'''
if idx != 0 and strides != 2:
if prev_layer.get_shape().as_list()[-1] != layer.get_shape().as_list()[-1]:
prev_layer = self.conv2d(inputs=prev_layer, filters=filters, name='residual_match_{}'.format(idx))
layer = tf.add(prev_layer, layer, name='residual_add_{}'.format(idx))
'''마지막 repeat 단계는 제외'''
if idx != repeat-1:
strides = 1
prev_layer = layer
return layer
def train(self, x, y):
if self.is_tb_logging:
return self.sess.run([self.accuracy, self.loss, self.summary_merged_values, self.train_op], feed_dict={self.x: x, self.y: y})
else:
return self.sess.run([self.accuracy, self.loss, self.train_op], feed_dict={self.x: x, self.y: y})
def validation(self, x, y):
return self.sess.run([self.accuracy, self.loss, self.prob], feed_dict={self.x: x, self.y: y})
def test(self, x, y):
return self.sess.run([self.accuracy, self.loss, self.prob], feed_dict={self.x: x, self.y: y}) | 57.960526 | 150 | 0.629625 |
aadbd65d31ec111b41b3d4bcc38481ac8eacdfa1 | 1,837 | py | Python | config_original_milestone_optimizer_distiller.py | xijiali/BDBNet | e176be588652b1b4951d6f1eec6c434bae6a723b | [
"MIT"
] | null | null | null | config_original_milestone_optimizer_distiller.py | xijiali/BDBNet | e176be588652b1b4951d6f1eec6c434bae6a723b | [
"MIT"
] | null | null | null | config_original_milestone_optimizer_distiller.py | xijiali/BDBNet | e176be588652b1b4951d6f1eec6c434bae6a723b | [
"MIT"
] | null | null | null | # encoding: utf-8
import warnings
import numpy as np
class DefaultConfig(object):
# Seed
seed = 0
# dataset options
dataset = 'market1501'
datatype = 'person'
mode = 'retrieval'
# optimization options
loss = 'triplet'
optim = 'adam'
max_epoch = 400
train_batch = 128
test_batch = 128
adjust_lr = True
lr = 1e-3
gamma = 0.1
weight_decay = 5e-4
momentum = 0.9
random_crop = False # Why False?
margin = None
num_instances = 4
num_gpu = 2
evaluate = False
savefig = None
re_ranking = False
# model options
model_name = 'RBDLF' # triplet, softmax_triplet, bfe, ide
last_stride = 1
pretrained_model = None
# miscs
print_freq = 10
eval_step = 50
save_dir = './pytorch_ckpt_milestone_optimizer_distiller/market'
workers = 10
start_epoch = 0
best_rank = -np.inf
paper_setting='c'
def _parse(self, kwargs):
for k, v in kwargs.items():
if not hasattr(self, k):
warnings.warn("Warning: opt has not attribut %s" % k)
setattr(self, k, v)
if 'cls' in self.dataset:
self.mode = 'class'
if 'market' in self.dataset or 'cuhk' in self.dataset or 'duke' in self.dataset:
self.datatype = 'person'
elif 'cub' in self.dataset:
self.datatype = 'cub'
elif 'car' in self.dataset:
self.datatype = 'car'
elif 'clothes' in self.dataset:
self.datatype = 'clothes'
elif 'product' in self.dataset:
self.datatype = 'product'
def _state_dict(self):
return {k: getattr(self, k) for k, _ in DefaultConfig.__dict__.items()
if not k.startswith('_')}
opt = DefaultConfig()
| 25.873239 | 92 | 0.569951 |
f198e95b4347e403bcd37de17191d298bd59d0e6 | 28,362 | py | Python | cms/cms_toolbars.py | netzkolchose/django-cms | 0ab064b802b2463fbe5394fc3891b63dc7226033 | [
"BSD-3-Clause"
] | null | null | null | cms/cms_toolbars.py | netzkolchose/django-cms | 0ab064b802b2463fbe5394fc3891b63dc7226033 | [
"BSD-3-Clause"
] | null | null | null | cms/cms_toolbars.py | netzkolchose/django-cms | 0ab064b802b2463fbe5394fc3891b63dc7226033 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from classytags.utils import flatten_context
from django.conf import settings
from django.contrib import admin
from django.contrib.auth import get_permission_codename, get_user_model
from django.contrib.auth.models import AnonymousUser
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse, NoReverseMatch, resolve, Resolver404
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from cms.api import get_page_draft, can_change_page
from cms.constants import TEMPLATE_INHERITANCE_MAGIC, PUBLISHER_STATE_PENDING
from cms.models import CMSPlugin, Title, Page
from cms.toolbar.items import TemplateItem, REFRESH_PAGE
from cms.toolbar_base import CMSToolbar
from cms.toolbar_pool import toolbar_pool
from cms.utils.i18n import get_language_tuple, force_language, get_language_dict, get_default_language
from cms.utils.compat.dj import is_installed
from cms.utils import get_cms_setting, get_language_from_request
from cms.utils.permissions import (
get_user_sites_queryset,
has_auth_page_permission,
)
from cms.utils.urlutils import add_url_parameters, admin_reverse
from menus.utils import DefaultLanguageChanger
# Identifiers for search
ADMIN_MENU_IDENTIFIER = 'admin-menu'
LANGUAGE_MENU_IDENTIFIER = 'language-menu'
TEMPLATE_MENU_BREAK = 'Template Menu Break'
PAGE_MENU_IDENTIFIER = 'page'
PAGE_MENU_ADD_IDENTIFIER = 'add_page'
PAGE_MENU_FIRST_BREAK = 'Page Menu First Break'
PAGE_MENU_SECOND_BREAK = 'Page Menu Second Break'
PAGE_MENU_THIRD_BREAK = 'Page Menu Third Break'
PAGE_MENU_FOURTH_BREAK = 'Page Menu Fourth Break'
PAGE_MENU_LAST_BREAK = 'Page Menu Last Break'
HISTORY_MENU_IDENTIFIER = 'history'
HISTORY_MENU_BREAK = 'History Menu Break'
MANAGE_PAGES_BREAK = 'Manage Pages Break'
ADMIN_SITES_BREAK = 'Admin Sites Break'
ADMINISTRATION_BREAK = 'Administration Break'
CLIPBOARD_BREAK = 'Clipboard Break'
USER_SETTINGS_BREAK = 'User Settings Break'
ADD_PAGE_LANGUAGE_BREAK = "Add page language Break"
REMOVE_PAGE_LANGUAGE_BREAK = "Remove page language Break"
COPY_PAGE_LANGUAGE_BREAK = "Copy page language Break"
TOOLBAR_DISABLE_BREAK = 'Toolbar disable Break'
@toolbar_pool.register
class PlaceholderToolbar(CMSToolbar):
"""
Adds placeholder edit buttons if placeholders or static placeholders are detected in the template
"""
def init_from_request(self):
self.page = get_page_draft(self.request.current_page)
def init_placeholders(self):
content_renderer = self.toolbar.content_renderer
self.placeholders = content_renderer.get_rendered_placeholders()
self.statics = content_renderer.get_rendered_static_placeholders()
def populate(self):
self.init_from_request()
def post_template_populate(self):
self.init_placeholders()
self.add_wizard_button()
self.add_structure_mode()
def add_structure_mode(self):
if self.page and not self.page.application_urls:
if self.page.has_change_permission(self.request):
return self.add_structure_mode_item()
elif any(ph for ph in self.placeholders if ph.has_change_permission(self.request)):
return self.add_structure_mode_item()
for sp in self.statics:
if sp.has_change_permission(self.request):
return self.add_structure_mode_item()
def add_structure_mode_item(self, extra_classes=('cms-toolbar-item-cms-mode-switcher',)):
build_mode = self.toolbar.build_mode
build_url = '?%s' % get_cms_setting('CMS_TOOLBAR_URL__BUILD')
edit_url = '?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')
if self.request.user.has_perm("cms.use_structure"):
switcher = self.toolbar.add_button_list('Mode Switcher', side=self.toolbar.RIGHT,
extra_classes=extra_classes)
switcher.add_button(_('Structure'), build_url, active=build_mode, disabled=False)
switcher.add_button(_('Content'), edit_url, active=not build_mode, disabled=False)
def add_wizard_button(self):
from cms.wizards.wizard_pool import entry_choices
title = _("Create")
try:
page_pk = self.page.pk
except AttributeError:
page_pk = ''
user = getattr(self.request, "user", None)
disabled = user and hasattr(self, "page") and len(
list(entry_choices(user, self.page))) == 0
lang = get_language_from_request(self.request, current_page=self.page) or get_default_language()
url = '{url}?page={page}&language={lang}&edit'.format(
url=reverse("cms_wizard_create"),
page=page_pk,
lang=lang,
)
self.toolbar.add_modal_button(title, url,
side=self.toolbar.RIGHT,
disabled=disabled,
on_close=REFRESH_PAGE)
@toolbar_pool.register
class BasicToolbar(CMSToolbar):
"""
Basic Toolbar for site and languages menu
"""
page = None
_language_menu = None
_admin_menu = None
def init_from_request(self):
self.page = get_page_draft(self.request.current_page)
def populate(self):
if not self.page:
self.init_from_request()
self.clipboard = self.request.toolbar.user_settings.clipboard
self.add_admin_menu()
self.add_language_menu()
def add_admin_menu(self):
if not self._admin_menu:
self._admin_menu = self.toolbar.get_or_create_menu(ADMIN_MENU_IDENTIFIER, self.current_site.name)
# Users button
self.add_users_button(self._admin_menu)
# sites menu
if get_cms_setting('PERMISSION'):
sites_queryset = get_user_sites_queryset(self.request.user)
else:
sites_queryset = Site.objects.all()
if len(sites_queryset) > 1:
sites_menu = self._admin_menu.get_or_create_menu('sites', _('Sites'))
sites_menu.add_sideframe_item(_('Admin Sites'), url=admin_reverse('sites_site_changelist'))
sites_menu.add_break(ADMIN_SITES_BREAK)
for site in sites_queryset:
sites_menu.add_link_item(site.name, url='http://%s' % site.domain,
active=site.pk == self.current_site.pk)
# admin
self._admin_menu.add_sideframe_item(_('Administration'), url=admin_reverse('index'))
self._admin_menu.add_break(ADMINISTRATION_BREAK)
# cms users settings
self._admin_menu.add_sideframe_item(_('User settings'), url=admin_reverse('cms_usersettings_change'))
self._admin_menu.add_break(USER_SETTINGS_BREAK)
# clipboard
if self.toolbar.edit_mode or self.toolbar.build_mode:
# True if the clipboard exists and there's plugins in it.
clipboard_is_bound = self.get_clipboard_plugins().exists()
self._admin_menu.add_link_item(_('Clipboard...'), url='#',
extra_classes=['cms-clipboard-trigger'],
disabled=not clipboard_is_bound)
self._admin_menu.add_link_item(_('Clear clipboard'), url='#',
extra_classes=['cms-clipboard-empty'],
disabled=not clipboard_is_bound)
self._admin_menu.add_break(CLIPBOARD_BREAK)
# Disable toolbar
self._admin_menu.add_link_item(_('Disable toolbar'), url='?%s' % get_cms_setting('CMS_TOOLBAR_URL__DISABLE'))
self._admin_menu.add_break(TOOLBAR_DISABLE_BREAK)
# logout
self.add_logout_button(self._admin_menu)
def add_users_button(self, parent):
User = get_user_model()
if User in admin.site._registry:
opts = User._meta
if self.request.user.has_perm('%s.%s' % (opts.app_label, get_permission_codename('change', opts))):
user_changelist_url = admin_reverse('%s_%s_changelist' % (opts.app_label, opts.model_name))
parent.add_sideframe_item(_('Users'), url=user_changelist_url)
def add_logout_button(self, parent):
# If current page is not published or has view restrictions user is redirected to the home page:
# * published page: no redirect
# * unpublished page: redirect to the home page
# * published page with login_required: redirect to the home page
# * published page with view permissions: redirect to the home page
if (self.page and self.page.is_published(self.current_lang) and not self.page.login_required and
self.page.has_view_permission(self.request, AnonymousUser())):
on_success = self.toolbar.REFRESH_PAGE
else:
on_success = '/'
# We'll show "Logout Joe Bloggs" if the name fields in auth.User are completed, else "Logout jbloggs". If
# anything goes wrong, it'll just be "Logout".
user_name = self.get_username()
logout_menu_text = _('Logout %s') % user_name if user_name else _('Logout')
parent.add_ajax_item(logout_menu_text, action=admin_reverse('logout'), active=True, on_success=on_success)
def add_language_menu(self):
if settings.USE_I18N and not self._language_menu:
self._language_menu = self.toolbar.get_or_create_menu(LANGUAGE_MENU_IDENTIFIER, _('Language'), position=-1)
language_changer = getattr(self.request, '_language_changer', DefaultLanguageChanger(self.request))
for code, name in get_language_tuple(self.current_site.pk):
try:
url = language_changer(code)
except NoReverseMatch:
url = DefaultLanguageChanger(self.request)(code)
self._language_menu.add_link_item(name, url=url, active=self.current_lang == code)
def get_username(self, user=None, default=''):
user = user or self.request.user
try:
name = user.get_full_name()
if name:
return name
else:
return user.get_username()
except (AttributeError, NotImplementedError):
return default
def get_clipboard_plugins(self):
self.populate()
if not hasattr(self, "clipboard"):
return CMSPlugin.objects.none()
return self.clipboard.get_plugins().select_related('placeholder')
def render_addons(self, context):
context.push()
context['local_toolbar'] = self
clipboard = mark_safe(render_to_string('cms/toolbar/clipboard.html', flatten_context(context)))
context.pop()
return [clipboard]
@toolbar_pool.register
class PageToolbar(CMSToolbar):
_changed_admin_menu = None
watch_models = [Page]
# Helpers
def init_from_request(self):
self.page = get_page_draft(self.request.current_page)
self.title = self.get_title()
self.permissions_activated = get_cms_setting('PERMISSION')
def init_placeholders(self):
content_renderer = self.toolbar.content_renderer
self.placeholders = content_renderer.get_rendered_placeholders()
self.statics = content_renderer.get_rendered_static_placeholders()
self.dirty_statics = [sp for sp in self.statics if sp.dirty]
def get_title(self):
try:
return Title.objects.get(page=self.page, language=self.current_lang, publisher_is_draft=True)
except Title.DoesNotExist:
return None
def has_publish_permission(self):
if not hasattr(self, 'publish_permission'):
publish_permission = bool(self.page or self.statics)
if self.page:
publish_permission = self.page.has_publish_permission(self.request)
if self.statics:
publish_permission &= all(sp.has_publish_permission(self.request) for sp in self.dirty_statics)
self.publish_permission = publish_permission
return self.publish_permission
def has_page_change_permission(self):
if not hasattr(self, 'page_change_permission'):
if not self.page and not get_cms_setting('PERMISSION'):
# We can't check permissions for an individual page
# and can't check global cms permissions because
# user opted out of them.
# So just check django auth permissions.
user = self.request.user
can_change = has_auth_page_permission(user, action='change')
else:
can_change = can_change_page(self.request)
self.page_change_permission = can_change
return self.page_change_permission
def page_is_pending(self, page, language):
return (page.publisher_public_id and
page.publisher_public.get_publisher_state(language) == PUBLISHER_STATE_PENDING)
def in_apphook(self):
with force_language(self.toolbar.language):
try:
resolver = resolve(self.request.path_info)
except Resolver404:
return False
else:
from cms.views import details
return resolver.func != details
def in_apphook_root(self):
"""
Returns True if the request is for a page handled by an apphook, but
is also the page it is attached to.
:return: Boolean
"""
page = getattr(self.request, 'current_page', False)
if page:
language = get_language_from_request(self.request)
return self.request.path == page.get_absolute_url(language=language)
return False
def get_on_delete_redirect_url(self):
parent, language = self.page.parent, self.current_lang
# if the current page has a parent in the request's current language redirect to it
if parent and language in parent.get_languages():
with force_language(language):
return parent.get_absolute_url(language=language)
# else redirect to root, do not redirect to Page.objects.get_home() because user could have deleted the last
# page, if DEBUG == False this could cause a 404
return reverse('pages-root')
# Populate
def populate(self):
self.init_from_request()
self.change_admin_menu()
self.add_page_menu()
self.change_language_menu()
def post_template_populate(self):
self.init_placeholders()
self.add_draft_live()
self.add_publish_button()
# Buttons
def add_publish_button(self, classes=('cms-btn-action', 'cms-btn-publish',)):
# only do dirty lookups if publish permission is granted else button isn't added anyway
if self.toolbar.edit_mode and self.has_publish_permission():
classes = list(classes or [])
pk = self.page.pk if self.page else 0
dirty = (bool(self.dirty_statics) or
(self.page and (self.page.is_dirty(self.current_lang) or
self.page_is_pending(self.page, self.current_lang))))
if dirty:
classes.append('cms-btn-publish-active')
if self.dirty_statics or (self.page and self.page.is_published(self.current_lang)):
title = _('Publish page changes')
else:
title = _('Publish page now')
classes.append('cms-publish-page')
params = {}
if self.dirty_statics:
params['statics'] = ','.join(str(sp.pk) for sp in self.dirty_statics)
if self.in_apphook():
params['redirect'] = self.request.path_info
with force_language(self.current_lang):
url = admin_reverse('cms_page_publish_page', args=(pk, self.current_lang))
url = add_url_parameters(url, params)
self.toolbar.add_button(title, url=url, extra_classes=classes,
side=self.toolbar.RIGHT, disabled=not dirty)
def add_draft_live(self):
if self.page:
if self.toolbar.edit_mode and not self.title:
self.add_page_settings_button()
if self.page.has_change_permission(self.request) and self.page.is_published(self.current_lang):
return self.add_draft_live_item()
elif self.placeholders:
return self.add_draft_live_item()
for sp in self.statics:
if sp.has_change_permission(self.request):
return self.add_draft_live_item()
def add_draft_live_item(self, template='cms/toolbar/items/live_draft.html', extra_context=None):
context = {'request': self.request}
context.update(extra_context or {})
pos = len(self.toolbar.right_items)
self.toolbar.add_item(TemplateItem(template, extra_context=context, side=self.toolbar.RIGHT), position=pos)
def add_page_settings_button(self, extra_classes=('cms-btn-action',)):
url = '%s?language=%s' % (admin_reverse('cms_page_change', args=[self.page.pk]), self.toolbar.language)
self.toolbar.add_modal_button(_('Page settings'), url, side=self.toolbar.RIGHT, extra_classes=extra_classes)
# Menus
def change_language_menu(self):
if self.toolbar.edit_mode and self.page:
language_menu = self.toolbar.get_menu(LANGUAGE_MENU_IDENTIFIER)
if not language_menu:
return None
languages = get_language_dict(self.current_site.pk)
remove = [(code, languages.get(code, code)) for code in self.page.get_languages() if code in languages]
add = [l for l in languages.items() if l not in remove]
copy = [(code, name) for code, name in languages.items() if code != self.current_lang and (code, name) in remove]
if add or remove or copy:
language_menu.add_break(ADD_PAGE_LANGUAGE_BREAK)
if add:
add_plugins_menu = language_menu.get_or_create_menu('{0}-add'.format(LANGUAGE_MENU_IDENTIFIER), _('Add Translation'))
page_change_url = admin_reverse('cms_page_change', args=(self.page.pk,))
for code, name in add:
url = add_url_parameters(page_change_url, language=code)
add_plugins_menu.add_modal_item(name, url=url)
if remove:
remove_plugins_menu = language_menu.get_or_create_menu('{0}-del'.format(LANGUAGE_MENU_IDENTIFIER), _('Delete Translation'))
translation_delete_url = admin_reverse('cms_page_delete_translation', args=(self.page.pk,))
disabled = len(remove) == 1
for code, name in remove:
url = add_url_parameters(translation_delete_url, language=code)
remove_plugins_menu.add_modal_item(name, url=url, disabled=disabled)
if copy:
copy_plugins_menu = language_menu.get_or_create_menu('{0}-copy'.format(LANGUAGE_MENU_IDENTIFIER), _('Copy all plugins'))
title = _('from %s')
question = _('Are you sure you want copy all plugins from %s?')
page_copy_url = admin_reverse('cms_page_copy_language', args=(self.page.pk,))
for code, name in copy:
copy_plugins_menu.add_ajax_item(
title % name, action=page_copy_url,
data={'source_language': code, 'target_language': self.current_lang},
question=question % name, on_success=self.toolbar.REFRESH_PAGE
)
def change_admin_menu(self):
if not self._changed_admin_menu and self.has_page_change_permission():
admin_menu = self.toolbar.get_or_create_menu(ADMIN_MENU_IDENTIFIER)
url = admin_reverse('cms_page_changelist') # cms page admin
params = {'language': self.toolbar.language}
if self.page:
params['page_id'] = self.page.pk
url = add_url_parameters(url, params)
admin_menu.add_sideframe_item(_('Pages'), url=url, position=0)
# Used to prevent duplicates
self._changed_admin_menu = True
def add_page_menu(self):
if self.page and self.has_page_change_permission():
edit_mode = self.toolbar.edit_mode
refresh = self.toolbar.REFRESH_PAGE
# menu for current page
# NOTE: disabled if the current path is "deeper" into the
# application's url patterns than its root. This is because
# when the Content Manager is at the root of the app-hook,
# some of the page options still make sense.
current_page_menu = self.toolbar.get_or_create_menu(
PAGE_MENU_IDENTIFIER, _('Page'), position=1, disabled=self.in_apphook() and not self.in_apphook_root())
# page operations menu
add_page_menu = current_page_menu.get_or_create_menu(PAGE_MENU_ADD_IDENTIFIER, _('Create Page'))
app_page_url = admin_reverse('cms_page_add')
new_page_params = {'edit': 1, 'position': 'last-child'}
if self.page.parent_id:
new_page_params['target'] = self.page.parent_id
add_page_menu_modal_items = (
(_('New Page'), new_page_params),
(_('New Sub Page'), {'edit': 1, 'position': 'last-child', 'target': self.page.pk}),
(_('Duplicate this Page'), {'copy_target': self.page.pk})
)
for title, params in add_page_menu_modal_items:
params.update(language=self.toolbar.language)
add_page_menu.add_modal_item(title, url=add_url_parameters(app_page_url, params))
# first break
current_page_menu.add_break(PAGE_MENU_FIRST_BREAK)
# page edit
page_edit_url = '?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')
current_page_menu.add_link_item(_('Edit this Page'), disabled=edit_mode, url=page_edit_url)
# page settings
page_settings_url = admin_reverse('cms_page_change', args=(self.page.pk,))
page_settings_url = add_url_parameters(page_settings_url, language=self.toolbar.language)
current_page_menu.add_modal_item(_('Page settings'), url=page_settings_url, disabled=not edit_mode,
on_close=refresh)
# advanced settings
advanced_url = admin_reverse('cms_page_advanced', args=(self.page.pk,))
advanced_url = add_url_parameters(advanced_url, language=self.toolbar.language)
advanced_disabled = not self.page.has_advanced_settings_permission(self.request) or not edit_mode
current_page_menu.add_modal_item(_('Advanced settings'), url=advanced_url, disabled=advanced_disabled)
# templates menu
if self.toolbar.build_mode or edit_mode:
templates_menu = current_page_menu.get_or_create_menu('templates', _('Templates'))
action = admin_reverse('cms_page_change_template', args=(self.page.pk,))
for path, name in get_cms_setting('TEMPLATES'):
active = self.page.template == path
if path == TEMPLATE_INHERITANCE_MAGIC:
templates_menu.add_break(TEMPLATE_MENU_BREAK)
templates_menu.add_ajax_item(name, action=action, data={'template': path}, active=active,
on_success=refresh)
# page type
page_type_url = admin_reverse('cms_page_add_page_type')
page_type_url = add_url_parameters(page_type_url, copy_target=self.page.pk, language=self.toolbar.language)
current_page_menu.add_modal_item(_('Save as Page Type'), page_type_url, disabled=not edit_mode)
# second break
current_page_menu.add_break(PAGE_MENU_SECOND_BREAK)
# permissions
if self.permissions_activated:
permissions_url = admin_reverse('cms_page_permissions', args=(self.page.pk,))
permission_disabled = not edit_mode or not self.page.has_change_permissions_permission(self.request)
current_page_menu.add_modal_item(_('Permissions'), url=permissions_url, disabled=permission_disabled)
# dates settings
dates_url = admin_reverse('cms_page_dates', args=(self.page.pk,))
current_page_menu.add_modal_item(_('Publishing dates'), url=dates_url, disabled=not edit_mode)
# third break
current_page_menu.add_break(PAGE_MENU_THIRD_BREAK)
# navigation toggle
nav_title = _('Hide in navigation') if self.page.in_navigation else _('Display in navigation')
nav_action = admin_reverse('cms_page_change_innavigation', args=(self.page.pk,))
current_page_menu.add_ajax_item(nav_title, action=nav_action, disabled=not edit_mode, on_success=refresh)
# publisher
if self.title:
if self.title.published:
publish_title = _('Unpublish page')
publish_url = admin_reverse('cms_page_unpublish', args=(self.page.pk, self.current_lang))
else:
publish_title = _('Publish page')
publish_url = admin_reverse('cms_page_publish_page', args=(self.page.pk, self.current_lang))
current_page_menu.add_ajax_item(publish_title, action=publish_url, disabled=not edit_mode,
on_success=refresh)
# fourth break
current_page_menu.add_break(PAGE_MENU_FOURTH_BREAK)
history_menu = current_page_menu.get_or_create_menu(HISTORY_MENU_IDENTIFIER, _('History'))
if is_installed('reversion'):
from cms.utils.reversion_hacks import reversion, Revision
versions = reversion.get_for_object(self.page)
if self.page.revision_id:
current_revision = Revision.objects.get(pk=self.page.revision_id)
has_undo = versions.filter(revision__pk__lt=current_revision.pk).exists()
has_redo = versions.filter(revision__pk__gt=current_revision.pk).exists()
else:
has_redo = False
has_undo = versions.count() > 1
undo_action = admin_reverse('cms_page_undo', args=(self.page.pk,))
redo_action = admin_reverse('cms_page_redo', args=(self.page.pk,))
history_menu.add_ajax_item(_('Undo'), action=undo_action, disabled=not has_undo, on_success=refresh)
history_menu.add_ajax_item(_('Redo'), action=redo_action, disabled=not has_redo, on_success=refresh)
history_menu.add_break(HISTORY_MENU_BREAK)
revert_action = admin_reverse('cms_page_revert_page', args=(self.page.pk, self.current_lang))
revert_question = _('Are you sure you want to revert to live?')
is_enabled = self.page.is_dirty(self.current_lang) and self.page.publisher_public
history_menu.add_ajax_item(_('Revert to live'), action=revert_action, question=revert_question,
disabled=not is_enabled,
on_success=refresh, extra_classes=('cms-toolbar-revert',))
history_menu.add_modal_item(_('View history'), url=admin_reverse('cms_page_history', args=(self.page.pk,)))
# last break
current_page_menu.add_break(PAGE_MENU_LAST_BREAK)
# delete
delete_url = admin_reverse('cms_page_delete', args=(self.page.pk,))
on_delete_redirect_url = self.get_on_delete_redirect_url()
current_page_menu.add_modal_item(_('Delete page'), url=delete_url, on_close=on_delete_redirect_url,
disabled=not edit_mode)
| 45.893204 | 139 | 0.647028 |
3ad65b492cd1c1f3d6bc6452fada7def7ebcb975 | 2,902 | py | Python | loan/helpers.py | Casper-Smet/LOAN | 3aabf80cf4314bcba33779329fc6e4971b85e742 | [
"MIT"
] | null | null | null | loan/helpers.py | Casper-Smet/LOAN | 3aabf80cf4314bcba33779329fc6e4971b85e742 | [
"MIT"
] | null | null | null | loan/helpers.py | Casper-Smet/LOAN | 3aabf80cf4314bcba33779329fc6e4971b85e742 | [
"MIT"
] | null | null | null | import json
import operator
from pathlib import Path
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
def graph_from_json(fp: Path, against_weight: int = 6, with_weight: int = 2, name: str = "HumanBody"):
"""Loads a graph from given filepath.
Each row in `network` represents an edge and contains an array with 3 elements.
The first two elements combined are the from and to vertices for the edge.
The third element indicates if the edge is going with or against the stream.
With is indicated by `0`. Against is indicated by `1`.
"""
with open(fp) as f:
network = np.array(json.load(f).get("network"), dtype=int)
# Change all weights to corresponding energy cost
network[:, 2] = np.where(network[:, 2] == 0, with_weight, against_weight)
# Building and filling the graph
graph = nx.MultiDiGraph(name=name)
graph.add_weighted_edges_from(network.tolist()) # Instantiate all edges with corresponding weights
return graph
def give_node_positions():
"""Returns the node positions."""
pos = {14: (1, 0), 12: (1, 1), 13: (0, 1), 11: (2, 1), 10: (1, 2), 9: (2, 2), 8: (0, 3),
7: (1, 3), 6: (2, 3), 5: (0, 4), 4: (1, 4), 3: (2, 4), 2: (1, 5), 1: (1, 6)}
return pos
def node_positions_on_canvas(all_pos: dict, w_canvas: int = 600, h_canvas: int = 600):
"""Calculate the positions of the nodes based on the height and width of the canvas.
"""
# get the max coordinates on the x and y axis
all_coords = list(all_pos.values())
x_max = max(all_coords, key=operator.itemgetter(0))[0]
y_max = max(all_coords, key=operator.itemgetter(1))[1]
# Calculate the multiplication value on both axis with an offset
x_mult = w_canvas // (x_max + 1)
y_mult = h_canvas // (y_max + 1)
# Apply the new position
new_pos = {}
for pos in all_pos.items():
new_pos[pos[0]] = {"x": (pos[1][0] + 1) * x_mult,
"y": h_canvas - (pos[1][1] + 1) * y_mult}
return new_pos
def plot_graph(graph):
"""Plots the given graph.
For each tuple in pos-dictionairy, tuple[0] is the x-value, tuple[1] is the y-value.
"""
pos = give_node_positions()
# Draw all vertices and edges.
nx.draw_networkx_edges(graph, pos=pos)
nx.draw_networkx_labels(graph, pos=pos)
nx.draw_networkx_nodes(graph, pos, nodelist=[5, 8, 13], node_color="tab:blue", node_size=800) # Deoxygenated blood vertices
nx.draw_networkx_nodes(graph, pos, nodelist=[3, 6, 9, 11], node_color="tab:red", node_size=800) # Oxygenated blood vertices
nx.draw_networkx_nodes(graph, pos, nodelist=[1, 2, 4, 7, 10, 12, 14], node_color="tab:purple", node_size=800) # Organ-vertices
plt.title(graph.name)
plt.axis("off")
plt.show()
if __name__ == "__main__":
graph = graph_from_json(Path("./loan/network.json"))
plot_graph(graph)
| 36.734177 | 131 | 0.65162 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.