hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
79578191892439bc28c435fba3a9ad54a61413fe | 2,542 | py | Python | tools/misc/InvalidTestDetector.py | v-weiguo/test262 | a66c978c5f1faafebf90d1bb13774b7f0643e2c9 | [
"BSD-3-Clause"
] | null | null | null | tools/misc/InvalidTestDetector.py | v-weiguo/test262 | a66c978c5f1faafebf90d1bb13774b7f0643e2c9 | [
"BSD-3-Clause"
] | 1 | 2020-07-28T04:46:04.000Z | 2020-07-28T04:46:04.000Z | exhibitor/node_modules/6to5/vendor/test262/tools/misc/InvalidTestDetector.py | scharissis/liberator | a5a65ec3254b638078470b72aadef928c09e8fdc | [
"MIT"
] | null | null | null | # Copyright (c) 2012 Ecma International. All rights reserved.
# Ecma International makes this code available under the terms and conditions set
# forth on http://hg.ecmascript.org/tests/test262/raw-file/tip/LICENSE (the
# "Use Terms"). Any redistribution of this code must retain the above
# copyright and this notice and otherwise comply with the Use Terms.
#--Imports---------------------------------------------------------------------
import argparse
import os
import sys
import re
#--Globals---------------------------------------------------------------------
#List of regular expressions covering suspect code snippets which might be
#invalid from an ES5 POV
QUESTIONABLE_RE_LIST = ["window",
"document(?!ation)",
"alert",
"setTimeout",
"ActiveX",
]
QUESTIONABLE_RE_LIST = [re.compile(x, re.I) for x in QUESTIONABLE_RE_LIST]
#------------------------------------------------------------------------------
def getAllJSFiles(dirName):
'''
Returns all JS files under dirName
'''
retVal = []
if os.path.isfile(dirName) and dirName.endswith(".js"):
retVal = [dirName]
elif os.path.isdir(dirName):
tempList = [os.path.join(dirName, x) for x in os.listdir(dirName)]
for x in tempList:
retVal += getAllJSFiles(x)
#else:
# raise Exception("getAllJSFiles: encountered a non-file/non-dir:" + dirName)
return retVal
#------------------------------------------------------------------------------
def handleFile(filePath):
with open(filePath, "r") as f:
origLines = f.readlines()
for line in origLines:
for tempRe in QUESTIONABLE_RE_LIST:
if tempRe.search(line)!=None:
print filePath
print "\t", line
#--Main------------------------------------------------------------------------
if __name__=="__main__":
__parser = argparse.ArgumentParser(description='Tool used to detect (potentially) invalid test cases')
__parser.add_argument('tpath', action='store',
help='Full path to test cases. E.g., C:\repos\test262-msft\test\suite\ietestcenter')
ARGS = __parser.parse_args()
if not os.path.exists(ARGS.tpath):
print "Cannot examine tests in '%s' when it doesn't exist!" % ARGS.tpath
sys.exit(1)
ALL_JS_FILES = getAllJSFiles(ARGS.tpath)
for fileName in ALL_JS_FILES:
handleFile(fileName)
print "Done!"
| 38.515152 | 110 | 0.543273 |
795781ebd80873b6fdc52c32ab6038da0944047a | 5,053 | py | Python | tensorflow_datasets/core/features/audio_feature_test.py | harsh020/datasets | b4ad3617b279ec65356e696c4c860458621976f6 | [
"Apache-2.0"
] | 1 | 2020-12-22T17:05:51.000Z | 2020-12-22T17:05:51.000Z | tensorflow_datasets/core/features/audio_feature_test.py | harsh020/datasets | b4ad3617b279ec65356e696c4c860458621976f6 | [
"Apache-2.0"
] | null | null | null | tensorflow_datasets/core/features/audio_feature_test.py | harsh020/datasets | b4ad3617b279ec65356e696c4c860458621976f6 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensorflow_datasets.core.features.audio_feature."""
import array
import pathlib
import tempfile
from absl.testing import parameterized
import numpy as np
import pydub
import tensorflow.compat.v2 as tf
from tensorflow_datasets import testing
from tensorflow_datasets.core import features
tf.enable_v2_behavior()
class AudioFeatureTest(
testing.FeatureExpectationsTestCase, parameterized.TestCase
):
@parameterized.parameters([(1,), (2,), (8,)])
def test_numpy_array(self, num_channels):
np_audio = _create_np_audio(num_channels)
self.assertFeature(
feature=features.Audio(
sample_rate=1000,
shape=_shape_for_channels(num_channels)
),
shape=_shape_for_channels(num_channels),
dtype=tf.int64,
tests=[
testing.FeatureExpectationItem(
value=np_audio,
expected=np_audio,
),
],
test_attributes=dict(
_file_format=None,
sample_rate=1000,
)
)
@parameterized.parameters([(1,), (2,), (8,)])
def test_numpy_array_float(self, num_channels):
np_audio = _create_np_audio(num_channels).astype(np.float32)
self.assertFeature(
feature=features.Audio(
dtype=tf.float32, shape=_shape_for_channels(num_channels)
),
shape=_shape_for_channels(num_channels),
dtype=tf.float32,
tests=[
testing.FeatureExpectationItem(
value=np_audio,
expected=np_audio,
),
],
)
@parameterized.parameters([(1,), (2,), (8,)])
def test_wav_file(self, num_channels):
np_audio = _create_np_audio(num_channels)
_, tmp_file = tempfile.mkstemp()
_write_wave_file(np_audio, tmp_file)
self.assertFeature(
feature=features.Audio(
file_format='wav', shape=_shape_for_channels(num_channels)
),
shape=_shape_for_channels(num_channels),
dtype=tf.int64,
tests=[
testing.FeatureExpectationItem(
value=tmp_file,
expected=np_audio,
),
testing.FeatureExpectationItem(
value=pathlib.Path(tmp_file),
expected=np_audio,
),
],
test_attributes=dict(_file_format='wav',)
)
@parameterized.parameters([(1,), (2,), (8,)])
def test_file_object(self, num_channels):
np_audio = _create_np_audio(num_channels)
_, tmp_file = tempfile.mkstemp()
_write_wave_file(np_audio, tmp_file)
class GFileWithSeekOnRead(tf.io.gfile.GFile):
"""Wrapper around GFile which is reusable across multiple read() calls.
This is needed because assertFeature reuses the same
FeatureExpectationItem several times.
"""
def read(self, *args, **kwargs):
data_read = super(GFileWithSeekOnRead, self).read(*args, **kwargs)
self.seek(0)
return data_read
with GFileWithSeekOnRead(tmp_file, 'rb') as file_obj:
self.assertFeature(
feature=features.Audio(
file_format='wav', shape=_shape_for_channels(num_channels)
),
shape=_shape_for_channels(num_channels),
dtype=tf.int64,
tests=[
testing.FeatureExpectationItem(
value=file_obj,
expected=np_audio,
),
],
)
def test_sample_rate_property(self):
self.assertEqual(features.Audio(sample_rate=1600).sample_rate, 1600)
def _shape_for_channels(num_channels, *, length=None):
"""Returns the shape."""
return (length,) if num_channels == 1 else (length, num_channels)
def _create_np_audio(num_channels: int) -> np.ndarray:
"""Creates a random audio `np.array`."""
return np.random.randint(
-2**10,
2**10,
size=_shape_for_channels(num_channels, length=10),
dtype=np.int64
)
def _write_wave_file(np_audio, path):
"""Creates a random audio file."""
num_channels = np_audio.shape[1] if len(np_audio.shape) == 2 else 1
audio = pydub.AudioSegment(
b'',
sample_width=2,
channels=num_channels,
frame_rate=1,
)
# See documentation for _spawn usage:
# https://github.com/jiaaro/pydub/blob/master/API.markdown#audiosegmentget_array_of_samples
audio = audio._spawn(array.array(audio.array_type, np_audio.reshape((-1,))))
audio.export(path, format='wav')
| 30.439759 | 93 | 0.654067 |
795782db8000790f073c54c80c4133492ded6912 | 704 | py | Python | baekjoon/python/increasing_number_11057.py | yskang/AlgorithmPractice | 31b76e38b4c2f1e3e29fb029587662a745437912 | [
"MIT"
] | null | null | null | baekjoon/python/increasing_number_11057.py | yskang/AlgorithmPractice | 31b76e38b4c2f1e3e29fb029587662a745437912 | [
"MIT"
] | 1 | 2019-11-04T06:44:04.000Z | 2019-11-04T06:46:55.000Z | baekjoon/python/increasing_number_11057.py | yskang/AlgorithmPractice | 31b76e38b4c2f1e3e29fb029587662a745437912 | [
"MIT"
] | null | null | null | # Title: 오르막 수
# Link: https://www.acmicpc.net/problem/11057
import sys
sys.setrecursionlimit(10 ** 6)
read_single_int = lambda: int(sys.stdin.readline().strip())
def solution(n: int):
if n == 1:
return 10
d = [[0 for _ in range(10)] for _ in range(n+1)]
for i in range(10):
d[1][i] = 1
for num_len in range(2, n+1):
for num in range(10):
s = 0
for i in range(num+1):
s += d[num_len-1][i]
d[num_len][num] = s
ans = 0
for i in range(10):
ans += d[n][i]
return ans % 10007
def main():
N = read_single_int()
print(solution(N))
if __name__ == '__main__':
main() | 17.170732 | 59 | 0.515625 |
795782f8df28af1a815f6a729a3a817fd32e9ed2 | 80,499 | py | Python | zerver/tests/test_events.py | roberthoenig/zulip | 5d6724345a8ba4896d21478be2e33e624f8ac8ab | [
"Apache-2.0"
] | null | null | null | zerver/tests/test_events.py | roberthoenig/zulip | 5d6724345a8ba4896d21478be2e33e624f8ac8ab | [
"Apache-2.0"
] | null | null | null | zerver/tests/test_events.py | roberthoenig/zulip | 5d6724345a8ba4896d21478be2e33e624f8ac8ab | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# See http://zulip.readthedocs.io/en/latest/events-system.html for
# high-level documentation on how this system works.
from __future__ import absolute_import
from __future__ import print_function
from typing import Any, Callable, Dict, List, Optional, Union, Text, Tuple
from django.conf import settings
from django.http import HttpRequest, HttpResponse
from django.test import TestCase
from django.utils.timezone import now as timezone_now
from zerver.models import (
get_client, get_realm, get_recipient, get_stream, get_user_profile_by_email,
Message, RealmDomain, Recipient, UserMessage, UserPresence, UserProfile,
Realm,
)
from zerver.lib.actions import (
bulk_add_subscriptions,
bulk_remove_subscriptions,
do_add_alert_words,
check_add_realm_emoji,
check_send_typing_notification,
notify_realm_custom_profile_fields,
do_add_realm_filter,
do_add_reaction,
do_remove_reaction,
do_change_avatar_fields,
do_change_default_all_public_streams,
do_change_default_events_register_stream,
do_change_default_sending_stream,
do_change_full_name,
do_change_bot_owner,
do_change_is_admin,
do_change_stream_description,
do_change_subscription_property,
do_create_user,
do_deactivate_stream,
do_deactivate_user,
do_mark_hotspot_as_read,
do_reactivate_user,
do_refer_friend,
do_regenerate_api_key,
do_remove_alert_words,
do_remove_realm_emoji,
do_remove_realm_filter,
do_rename_stream,
do_add_default_stream,
do_remove_default_stream,
do_set_muted_topics,
do_set_realm_property,
do_set_realm_authentication_methods,
do_set_realm_message_editing,
do_update_embedded_data,
do_update_message,
do_update_message_flags,
do_update_muted_topic,
do_update_pointer,
do_update_user_presence,
do_set_user_display_setting,
do_change_enable_stream_desktop_notifications,
do_change_enable_stream_sounds,
do_change_enable_desktop_notifications,
do_change_enable_sounds,
do_change_enable_offline_email_notifications,
do_change_enable_offline_push_notifications,
do_change_enable_online_push_notifications,
do_change_pm_content_in_desktop_notifications,
do_change_enable_digest_emails,
do_add_realm_domain,
do_change_realm_domain,
do_remove_realm_domain,
do_change_icon_source,
)
from zerver.lib.events import (
apply_events,
fetch_initial_state_data,
)
from zerver.lib.message import render_markdown
from zerver.lib.test_helpers import POSTRequestMock, get_subscription
from zerver.lib.test_classes import (
ZulipTestCase,
)
from zerver.lib.validator import (
check_bool, check_dict, check_dict_only, check_float, check_int, check_list, check_string,
equals, check_none_or, Validator
)
from zerver.views.events_register import _default_all_public_streams, _default_narrow
from zerver.tornado.event_queue import allocate_client_descriptor, EventQueue
from zerver.tornado.views import get_events_backend
from collections import OrderedDict
import mock
import time
import ujson
from six.moves import range
class EventsEndpointTest(ZulipTestCase):
def test_events_register_endpoint(self):
# type: () -> None
# This test is intended to get minimal coverage on the
# events_register code paths
email = 'hamlet@zulip.com'
with mock.patch('zerver.views.events_register.do_events_register', return_value={}):
result = self.client_post('/json/register', **self.api_auth(email))
self.assert_json_success(result)
with mock.patch('zerver.lib.events.request_event_queue', return_value=None):
result = self.client_post('/json/register', **self.api_auth(email))
self.assert_json_error(result, "Could not allocate event queue")
with mock.patch('zerver.lib.events.request_event_queue', return_value='15:11'):
with mock.patch('zerver.lib.events.get_user_events',
return_value=[]):
result = self.client_post('/json/register', dict(event_types=ujson.dumps(['pointer'])),
**self.api_auth(email))
self.assert_json_success(result)
result_dict = ujson.loads(result.content)
self.assertEqual(result_dict['last_event_id'], -1)
self.assertEqual(result_dict['queue_id'], '15:11')
with mock.patch('zerver.lib.events.request_event_queue', return_value='15:12'):
with mock.patch('zerver.lib.events.get_user_events',
return_value=[{
'id': 6,
'type': 'pointer',
'pointer': 15,
}]):
result = self.client_post('/json/register', dict(event_types=ujson.dumps(['pointer'])),
**self.api_auth(email))
self.assert_json_success(result)
result_dict = ujson.loads(result.content)
self.assertEqual(result_dict['last_event_id'], 6)
self.assertEqual(result_dict['pointer'], 15)
self.assertEqual(result_dict['queue_id'], '15:12')
# Now test with `fetch_event_types` not matching the event
with mock.patch('zerver.lib.events.request_event_queue', return_value='15:13'):
with mock.patch('zerver.lib.events.get_user_events',
return_value=[{
'id': 6,
'type': 'pointer',
'pointer': 15,
}]):
result = self.client_post('/json/register',
dict(event_types=ujson.dumps(['pointer']),
fetch_event_types=ujson.dumps(['message'])),
**self.api_auth(email))
self.assert_json_success(result)
result_dict = ujson.loads(result.content)
self.assertEqual(result_dict['last_event_id'], 6)
# Check that the message event types data is in there
self.assertIn('max_message_id', result_dict)
# Check that the pointer event types data is not in there
self.assertNotIn('pointer', result_dict)
self.assertEqual(result_dict['queue_id'], '15:13')
# Now test with `fetch_event_types` matching the event
with mock.patch('zerver.lib.events.request_event_queue', return_value='15:13'):
with mock.patch('zerver.lib.events.get_user_events',
return_value=[{
'id': 6,
'type': 'pointer',
'pointer': 15,
}]):
result = self.client_post('/json/register',
dict(fetch_event_types=ujson.dumps(['pointer']),
event_types=ujson.dumps(['message'])),
**self.api_auth(email))
self.assert_json_success(result)
result_dict = ujson.loads(result.content)
self.assertEqual(result_dict['last_event_id'], 6)
# Check that we didn't fetch the messages data
self.assertNotIn('max_message_id', result_dict)
# Check that the pointer data is in there, and is correctly
# updated (presering our atomicity guaranteed), though of
# course any future pointer events won't be distributed
self.assertIn('pointer', result_dict)
self.assertEqual(result_dict['pointer'], 15)
self.assertEqual(result_dict['queue_id'], '15:13')
def test_tornado_endpoint(self):
# type: () -> None
# This test is mostly intended to get minimal coverage on
# the /notify_tornado endpoint, so we can have 100% URL coverage,
# but it does exercise a little bit of the codepath.
post_data = dict(
data=ujson.dumps(
dict(
event=dict(
type='other'
),
users=[get_user_profile_by_email('hamlet@zulip.com').id],
),
),
)
req = POSTRequestMock(post_data, user_profile=None)
req.META['REMOTE_ADDR'] = '127.0.0.1'
result = self.client_post_request('/notify_tornado', req)
self.assert_json_error(result, 'Access denied', status_code=403)
post_data['secret'] = settings.SHARED_SECRET
req = POSTRequestMock(post_data, user_profile=None)
req.META['REMOTE_ADDR'] = '127.0.0.1'
result = self.client_post_request('/notify_tornado', req)
self.assert_json_success(result)
class GetEventsTest(ZulipTestCase):
def tornado_call(self, view_func, user_profile, post_data):
# type: (Callable[[HttpRequest, UserProfile], HttpResponse], UserProfile, Dict[str, Any]) -> HttpResponse
request = POSTRequestMock(post_data, user_profile)
return view_func(request, user_profile)
def test_get_events(self):
# type: () -> None
email = "hamlet@zulip.com"
recipient_email = "othello@zulip.com"
user_profile = get_user_profile_by_email(email)
recipient_user_profile = get_user_profile_by_email(recipient_email)
self.login(email)
result = self.tornado_call(get_events_backend, user_profile,
{"apply_markdown": ujson.dumps(True),
"event_types": ujson.dumps(["message"]),
"user_client": "website",
"dont_block": ujson.dumps(True),
})
self.assert_json_success(result)
queue_id = ujson.loads(result.content)["queue_id"]
recipient_result = self.tornado_call(get_events_backend, recipient_user_profile,
{"apply_markdown": ujson.dumps(True),
"event_types": ujson.dumps(["message"]),
"user_client": "website",
"dont_block": ujson.dumps(True),
})
self.assert_json_success(recipient_result)
recipient_queue_id = ujson.loads(recipient_result.content)["queue_id"]
result = self.tornado_call(get_events_backend, user_profile,
{"queue_id": queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": ujson.dumps(True),
})
events = ujson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 0)
local_id = 10.01
self.send_message(email, recipient_email, Recipient.PERSONAL, "hello", local_id=local_id, sender_queue_id=queue_id)
result = self.tornado_call(get_events_backend, user_profile,
{"queue_id": queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": ujson.dumps(True),
})
events = ujson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 1)
self.assertEqual(events[0]["type"], "message")
self.assertEqual(events[0]["message"]["sender_email"], email)
self.assertEqual(events[0]["local_message_id"], local_id)
self.assertEqual(events[0]["message"]["display_recipient"][0]["is_mirror_dummy"], False)
self.assertEqual(events[0]["message"]["display_recipient"][1]["is_mirror_dummy"], False)
last_event_id = events[0]["id"]
local_id += 0.01
self.send_message(email, recipient_email, Recipient.PERSONAL, "hello", local_id=local_id, sender_queue_id=queue_id)
result = self.tornado_call(get_events_backend, user_profile,
{"queue_id": queue_id,
"user_client": "website",
"last_event_id": last_event_id,
"dont_block": ujson.dumps(True),
})
events = ujson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 1)
self.assertEqual(events[0]["type"], "message")
self.assertEqual(events[0]["message"]["sender_email"], email)
self.assertEqual(events[0]["local_message_id"], local_id)
# Test that the received message in the receiver's event queue
# exists and does not contain a local id
recipient_result = self.tornado_call(get_events_backend, recipient_user_profile,
{"queue_id": recipient_queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": ujson.dumps(True),
})
recipient_events = ujson.loads(recipient_result.content)["events"]
self.assert_json_success(recipient_result)
self.assertEqual(len(recipient_events), 2)
self.assertEqual(recipient_events[0]["type"], "message")
self.assertEqual(recipient_events[0]["message"]["sender_email"], email)
self.assertTrue("local_message_id" not in recipient_events[0])
self.assertEqual(recipient_events[1]["type"], "message")
self.assertEqual(recipient_events[1]["message"]["sender_email"], email)
self.assertTrue("local_message_id" not in recipient_events[1])
def test_get_events_narrow(self):
# type: () -> None
email = "hamlet@zulip.com"
user_profile = get_user_profile_by_email(email)
self.login(email)
result = self.tornado_call(get_events_backend, user_profile,
{"apply_markdown": ujson.dumps(True),
"event_types": ujson.dumps(["message"]),
"narrow": ujson.dumps([["stream", "denmark"]]),
"user_client": "website",
"dont_block": ujson.dumps(True),
})
self.assert_json_success(result)
queue_id = ujson.loads(result.content)["queue_id"]
result = self.tornado_call(get_events_backend, user_profile,
{"queue_id": queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": ujson.dumps(True),
})
events = ujson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 0)
self.send_message(email, "othello@zulip.com", Recipient.PERSONAL, "hello")
self.send_message(email, "Denmark", Recipient.STREAM, "hello")
result = self.tornado_call(get_events_backend, user_profile,
{"queue_id": queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": ujson.dumps(True),
})
events = ujson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 1)
self.assertEqual(events[0]["type"], "message")
self.assertEqual(events[0]["message"]["display_recipient"], "Denmark")
class EventsRegisterTest(ZulipTestCase):
user_profile = get_user_profile_by_email("hamlet@zulip.com")
maxDiff = None # type: Optional[int]
def create_bot(self, email):
# type: (str) -> UserProfile
return do_create_user(email, '123',
get_realm('zulip'), 'Test Bot', 'test',
bot_type=UserProfile.DEFAULT_BOT, bot_owner=self.user_profile)
def realm_bot_schema(self, field_name, check):
# type: (str, Validator) -> Validator
return self.check_events_dict([
('type', equals('realm_bot')),
('op', equals('update')),
('bot', check_dict_only([
('email', check_string),
('user_id', check_int),
(field_name, check),
])),
])
def do_test(self, action, event_types=None, include_subscribers=True, state_change_expected=True,
num_events=1):
# type: (Callable[[], Any], Optional[List[str]], bool, bool, int) -> List[Dict[str, Any]]
client = allocate_client_descriptor(
dict(user_profile_id = self.user_profile.id,
user_profile_email = self.user_profile.email,
realm_id = self.user_profile.realm_id,
event_types = event_types,
client_type_name = "website",
apply_markdown = True,
all_public_streams = False,
queue_timeout = 600,
last_connection_time = time.time(),
narrow = [])
)
# hybrid_state = initial fetch state + re-applying events triggered by our action
# normal_state = do action then fetch at the end (the "normal" code path)
hybrid_state = fetch_initial_state_data(self.user_profile, event_types, "", include_subscribers=include_subscribers)
action()
events = client.event_queue.contents()
self.assertTrue(len(events) == num_events)
before = ujson.dumps(hybrid_state)
apply_events(hybrid_state, events, self.user_profile, include_subscribers=include_subscribers)
after = ujson.dumps(hybrid_state)
if state_change_expected:
if before == after:
print(events) # nocoverage
raise AssertionError('Test does not exercise enough code -- events do not change state.')
else:
if before != after:
raise AssertionError('Test is invalid--state actually does change here.')
normal_state = fetch_initial_state_data(self.user_profile, event_types, "", include_subscribers=include_subscribers)
self.match_states(hybrid_state, normal_state)
return events
def assert_on_error(self, error):
# type: (Optional[str]) -> None
if error:
raise AssertionError(error)
def match_states(self, state1, state2):
# type: (Dict[str, Any], Dict[str, Any]) -> None
def normalize(state):
# type: (Dict[str, Any]) -> None
state['realm_users'] = {u['email']: u for u in state['realm_users']}
for u in state['subscriptions']:
if 'subscribers' in u:
u['subscribers'].sort()
state['subscriptions'] = {u['name']: u for u in state['subscriptions']}
state['unsubscribed'] = {u['name']: u for u in state['unsubscribed']}
if 'realm_bots' in state:
state['realm_bots'] = {u['email']: u for u in state['realm_bots']}
normalize(state1)
normalize(state2)
self.assertEqual(state1, state2)
def check_events_dict(self, required_keys):
# type: (List[Tuple[str, Validator]]) -> Validator
required_keys.append(('id', check_int))
return check_dict_only(required_keys)
def test_send_message_events(self):
# type: () -> None
schema_checker = self.check_events_dict([
('type', equals('message')),
('flags', check_list(None)),
('message', self.check_events_dict([
('avatar_url', check_string),
('client', check_string),
('content', check_string),
('content_type', equals('text/html')),
('display_recipient', check_string),
('gravatar_hash', check_string),
('is_mentioned', check_bool),
('reactions', check_list(None)),
('recipient_id', check_int),
('sender_realm_str', check_string),
('sender_email', check_string),
('sender_full_name', check_string),
('sender_id', check_int),
('sender_short_name', check_string),
('stream_id', check_int),
('subject', check_string),
('subject_links', check_list(None)),
('timestamp', check_int),
('type', check_string),
])),
])
events = self.do_test(
lambda: self.send_message("hamlet@zulip.com", "Verona", Recipient.STREAM, "hello"),
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
# Verify message editing
schema_checker = self.check_events_dict([
('type', equals('update_message')),
('flags', check_list(None)),
('content', check_string),
('edit_timestamp', check_int),
('flags', check_list(None)),
('message_id', check_int),
('message_ids', check_list(check_int)),
('orig_content', check_string),
('orig_rendered_content', check_string),
('orig_subject', check_string),
('prev_rendered_content_version', check_int),
('propagate_mode', check_string),
('rendered_content', check_string),
('sender', check_string),
('stream_id', check_int),
('subject', check_string),
('subject_links', check_list(None)),
('user_id', check_int),
])
message = Message.objects.order_by('-id')[0]
topic = 'new_topic'
propagate_mode = 'change_all'
content = 'new content'
rendered_content = render_markdown(message, content)
events = self.do_test(
lambda: do_update_message(self.user_profile, message, topic,
propagate_mode, content, rendered_content),
state_change_expected=False,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
# Verify do_update_embedded_data
schema_checker = self.check_events_dict([
('type', equals('update_message')),
('flags', check_list(None)),
('content', check_string),
('flags', check_list(None)),
('message_id', check_int),
('message_ids', check_list(check_int)),
('rendered_content', check_string),
('sender', check_string),
])
events = self.do_test(
lambda: do_update_embedded_data(self.user_profile, message,
u"embed_content", "<p>embed_content</p>"),
state_change_expected=False,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_update_message_flags(self):
# type: () -> None
# Test message flag update events
schema_checker = self.check_events_dict([
('all', check_bool),
('type', equals('update_message_flags')),
('flag', check_string),
('messages', check_list(check_int)),
('operation', equals("add")),
])
message = self.send_message("cordelia@zulip.com", "hamlet@zulip.com", Recipient.PERSONAL, "hello")
user_profile = get_user_profile_by_email("hamlet@zulip.com")
events = self.do_test(
lambda: do_update_message_flags(user_profile, 'add', 'starred',
[message], False, None, None),
state_change_expected=False,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
schema_checker = self.check_events_dict([
('all', check_bool),
('type', equals('update_message_flags')),
('flag', check_string),
('messages', check_list(check_int)),
('operation', equals("remove")),
])
events = self.do_test(
lambda: do_update_message_flags(user_profile, 'remove', 'starred',
[message], False, None, None),
state_change_expected=False,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_send_reaction(self):
# type: () -> None
schema_checker = self.check_events_dict([
('type', equals('reaction')),
('op', equals('add')),
('message_id', check_int),
('emoji_name', check_string),
('user', check_dict_only([
('email', check_string),
('full_name', check_string),
('user_id', check_int)
])),
])
message_id = self.send_message("hamlet@zulip.com", "Verona", Recipient.STREAM, "hello")
message = Message.objects.get(id=message_id)
events = self.do_test(
lambda: do_add_reaction(
self.user_profile, message, "tada"),
state_change_expected=False,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_remove_reaction(self):
# type: () -> None
schema_checker = self.check_events_dict([
('type', equals('reaction')),
('op', equals('remove')),
('message_id', check_int),
('emoji_name', check_string),
('user', check_dict_only([
('email', check_string),
('full_name', check_string),
('user_id', check_int)
])),
])
message_id = self.send_message("hamlet@zulip.com", "Verona", Recipient.STREAM, "hello")
message = Message.objects.get(id=message_id)
events = self.do_test(
lambda: do_remove_reaction(
self.user_profile, message, "tada"),
state_change_expected=False,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_typing_events(self):
# type: () -> None
schema_checker = self.check_events_dict([
('type', equals('typing')),
('op', equals('start')),
('sender', check_dict_only([
('email', check_string),
('user_id', check_int)])),
('recipients', check_list(check_dict_only([
('email', check_string),
('user_id', check_int),
]))),
])
events = self.do_test(
lambda: check_send_typing_notification(
self.user_profile, ["cordelia@zulip.com"], "start"),
state_change_expected=False,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_custom_profile_fields_events(self):
# type: () -> None
schema_checker = self.check_events_dict([
('type', equals('custom_profile_fields')),
('fields', check_list(check_dict_only([
('type', check_int),
('name', check_string),
]))),
])
events = self.do_test(
lambda: notify_realm_custom_profile_fields(
self.user_profile.realm),
state_change_expected=False,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_presence_events(self):
# type: () -> None
schema_checker = self.check_events_dict([
('type', equals('presence')),
('email', check_string),
('server_timestamp', check_float),
('presence', check_dict_only([
('website', check_dict_only([
('status', equals('active')),
('timestamp', check_int),
('client', check_string),
('pushable', check_bool),
])),
])),
])
events = self.do_test(lambda: do_update_user_presence(
self.user_profile, get_client("website"), timezone_now(), UserPresence.ACTIVE))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_presence_events_multiple_clients(self):
# type: () -> None
schema_checker_android = self.check_events_dict([
('type', equals('presence')),
('email', check_string),
('server_timestamp', check_float),
('presence', check_dict_only([
('ZulipAndroid/1.0', check_dict_only([
('status', equals('idle')),
('timestamp', check_int),
('client', check_string),
('pushable', check_bool),
])),
])),
])
self.client_post("/api/v1/users/me/presence", {'status': 'idle'},
HTTP_USER_AGENT="ZulipAndroid/1.0",
**self.api_auth(self.user_profile.email))
self.do_test(lambda: do_update_user_presence(
self.user_profile, get_client("website"), timezone_now(), UserPresence.ACTIVE))
events = self.do_test(lambda: do_update_user_presence(
self.user_profile, get_client("ZulipAndroid/1.0"), timezone_now(), UserPresence.IDLE))
error = schema_checker_android('events[0]', events[0])
self.assert_on_error(error)
def test_pointer_events(self):
# type: () -> None
schema_checker = self.check_events_dict([
('type', equals('pointer')),
('pointer', check_int)
])
events = self.do_test(lambda: do_update_pointer(self.user_profile, 1500))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_referral_events(self):
# type: () -> None
schema_checker = self.check_events_dict([
('type', equals('referral')),
('referrals', check_dict_only([
('granted', check_int),
('used', check_int),
])),
])
events = self.do_test(lambda: do_refer_friend(self.user_profile, "friend@example.com"))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_register_events(self):
# type: () -> None
realm_user_add_checker = self.check_events_dict([
('type', equals('realm_user')),
('op', equals('add')),
('person', check_dict_only([
('user_id', check_int),
('email', check_string),
('avatar_url', check_string),
('full_name', check_string),
('is_admin', check_bool),
('is_bot', check_bool),
('timezone', check_string),
])),
])
events = self.do_test(lambda: self.register("test1@zulip.com", "test1"))
self.assert_length(events, 1)
error = realm_user_add_checker('events[0]', events[0])
self.assert_on_error(error)
def test_alert_words_events(self):
# type: () -> None
alert_words_checker = self.check_events_dict([
('type', equals('alert_words')),
('alert_words', check_list(check_string)),
])
events = self.do_test(lambda: do_add_alert_words(self.user_profile, ["alert_word"]))
error = alert_words_checker('events[0]', events[0])
self.assert_on_error(error)
events = self.do_test(lambda: do_remove_alert_words(self.user_profile, ["alert_word"]))
error = alert_words_checker('events[0]', events[0])
self.assert_on_error(error)
def test_default_streams_events(self):
# type: () -> None
default_streams_checker = self.check_events_dict([
('type', equals('default_streams')),
('default_streams', check_list(check_dict_only([
('description', check_string),
('invite_only', check_bool),
('name', check_string),
('stream_id', check_int),
]))),
])
stream = get_stream("Scotland", self.user_profile.realm)
events = self.do_test(lambda: do_add_default_stream(stream))
error = default_streams_checker('events[0]', events[0])
events = self.do_test(lambda: do_remove_default_stream(stream))
error = default_streams_checker('events[0]', events[0])
self.assert_on_error(error)
def test_muted_topics_events(self):
# type: () -> None
muted_topics_checker = self.check_events_dict([
('type', equals('muted_topics')),
('muted_topics', check_list(check_list(check_string, 2))),
])
events = self.do_test(lambda: do_set_muted_topics(self.user_profile, [[u"Denmark", u"topic"]]))
error = muted_topics_checker('events[0]', events[0])
self.assert_on_error(error)
events = self.do_test(lambda: do_update_muted_topic(
self.user_profile, "Denmark", "topic", "add"))
error = muted_topics_checker('events[0]', events[0])
self.assert_on_error(error)
events = self.do_test(lambda: do_update_muted_topic(
self.user_profile, "Denmark", "topic", "remove"))
error = muted_topics_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_avatar_fields(self):
# type: () -> None
schema_checker = self.check_events_dict([
('type', equals('realm_user')),
('op', equals('update')),
('person', check_dict_only([
('email', check_string),
('user_id', check_int),
('avatar_url', check_string),
])),
])
events = self.do_test(
lambda: do_change_avatar_fields(self.user_profile, UserProfile.AVATAR_FROM_USER),
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_full_name(self):
# type: () -> None
schema_checker = self.check_events_dict([
('type', equals('realm_user')),
('op', equals('update')),
('person', check_dict_only([
('email', check_string),
('full_name', check_string),
('user_id', check_int),
])),
])
events = self.do_test(lambda: do_change_full_name(self.user_profile, 'Sir Hamlet'))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def do_set_realm_property_test(self, name):
# type: (str) -> None
bool_tests = [True, False, True] # type: List[bool]
test_values = dict(
add_emoji_by_admins_only=bool_tests,
create_stream_by_admins_only=bool_tests,
default_language=[u'es', u'de', u'en'],
description=[u'Realm description', u'New description'],
email_changes_disabled=bool_tests,
invite_required=bool_tests,
invite_by_admins_only=bool_tests,
inline_image_preview=bool_tests,
inline_url_embed_preview=bool_tests,
message_retention_days=[10, 20],
name=[u'Zulip', u'New Name'],
name_changes_disabled=bool_tests,
restricted_to_domain=bool_tests,
waiting_period_threshold=[10, 20],
) # type: Dict[str, Any]
property_type = Realm.property_types[name]
if property_type is bool:
validator = check_bool
elif property_type is Text:
validator = check_string
elif property_type is int:
validator = check_int
elif property_type == (int, type(None)):
validator = check_int
else:
raise AssertionError("Unexpected property type %s" % (property_type,))
schema_checker = self.check_events_dict([
('type', equals('realm')),
('op', equals('update')),
('property', equals(name)),
('value', validator),
])
vals = test_values.get(name)
if vals is None:
raise AssertionError('No test created for %s' % (name))
do_set_realm_property(self.user_profile.realm, name, vals[0])
for val in vals[1:]:
events = self.do_test(
lambda: do_set_realm_property(self.user_profile.realm, name, val))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_realm_property(self):
# type: () -> None
for prop in Realm.property_types:
self.do_set_realm_property_test(prop)
def test_change_realm_authentication_methods(self):
# type: () -> None
schema_checker = self.check_events_dict([
('type', equals('realm')),
('op', equals('update_dict')),
('property', equals('default')),
('data', check_dict_only([
('authentication_methods', check_dict([]))
])),
])
def fake_backends():
# type: () -> Any
backends = (
'zproject.backends.DevAuthBackend',
'zproject.backends.EmailAuthBackend',
'zproject.backends.GitHubAuthBackend',
'zproject.backends.GoogleMobileOauth2Backend',
'zproject.backends.ZulipLDAPAuthBackend',
)
return self.settings(AUTHENTICATION_BACKENDS=backends)
# Test transitions; any new backends should be tested with T/T/T/F/T
for (auth_method_dict) in \
({'Google': True, 'Email': True, 'GitHub': True, 'LDAP': False, 'Dev': False},
{'Google': True, 'Email': True, 'GitHub': False, 'LDAP': False, 'Dev': False},
{'Google': True, 'Email': False, 'GitHub': False, 'LDAP': False, 'Dev': False},
{'Google': True, 'Email': False, 'GitHub': True, 'LDAP': False, 'Dev': False},
{'Google': False, 'Email': False, 'GitHub': False, 'LDAP': False, 'Dev': True},
{'Google': False, 'Email': False, 'GitHub': True, 'LDAP': False, 'Dev': True},
{'Google': False, 'Email': True, 'GitHub': True, 'LDAP': True, 'Dev': False}):
with fake_backends():
events = self.do_test(
lambda: do_set_realm_authentication_methods(
self.user_profile.realm,
auth_method_dict))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_pin_stream(self):
# type: () -> None
schema_checker = self.check_events_dict([
('type', equals('subscription')),
('op', equals('update')),
('property', equals('pin_to_top')),
('stream_id', check_int),
('value', check_bool),
('name', check_string),
('email', check_string),
])
stream = get_stream("Denmark", self.user_profile.realm)
sub = get_subscription(stream.name, self.user_profile)
do_change_subscription_property(self.user_profile, sub, stream, "pin_to_top", False)
for pinned in (True, False):
events = self.do_test(lambda: do_change_subscription_property(self.user_profile, sub, stream, "pin_to_top", pinned))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_realm_message_edit_settings(self):
# type: () -> None
schema_checker = self.check_events_dict([
('type', equals('realm')),
('op', equals('update_dict')),
('property', equals('default')),
('data', check_dict_only([
('allow_message_editing', check_bool),
('message_content_edit_limit_seconds', check_int),
])),
])
# Test every transition among the four possibilities {T,F} x {0, non-0}
for (allow_message_editing, message_content_edit_limit_seconds) in \
((True, 0), (False, 0), (True, 0), (False, 1234), (True, 0), (True, 1234), (True, 0),
(False, 0), (False, 1234), (False, 0), (True, 1234), (False, 0),
(True, 1234), (True, 600), (False, 600), (False, 1234), (True, 600)):
events = self.do_test(
lambda: do_set_realm_message_editing(self.user_profile.realm,
allow_message_editing,
message_content_edit_limit_seconds))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_is_admin(self):
# type: () -> None
schema_checker = self.check_events_dict([
('type', equals('realm_user')),
('op', equals('update')),
('person', check_dict_only([
('email', check_string),
('is_admin', check_bool),
('user_id', check_int),
])),
])
do_change_is_admin(self.user_profile, False)
for is_admin in [True, False]:
events = self.do_test(lambda: do_change_is_admin(self.user_profile, is_admin))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def do_set_user_display_settings_test(self, setting_name, values_list):
# type: (str, List[Union[bool, Text]]) -> None
property_type = UserProfile.property_types[setting_name]
if property_type is bool:
validator = check_bool
elif property_type is Text:
validator = check_string
else:
raise AssertionError("Unexpected property type %s" % (property_type,))
num_events = 1
if setting_name == "timezone":
num_events = 2
if property_type == bool:
do_set_user_display_setting(self.user_profile, setting_name, False)
for value in values_list:
events = self.do_test(lambda: do_set_user_display_setting(
self.user_profile, setting_name, value), num_events=num_events)
schema_checker = self.check_events_dict([
('type', equals('update_display_settings')),
('setting_name', equals(setting_name)),
('user', check_string),
('setting', validator),
])
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
timezone_schema_checker = self.check_events_dict([
('type', equals('realm_user')),
('op', equals('update')),
('person', check_dict_only([
('email', check_string),
('user_id', check_int),
('timezone', check_string),
])),
])
if setting_name == "timezone":
error = timezone_schema_checker('events[1]', events[1])
def test_change_twenty_four_hour_time(self):
# type: () -> None
self.do_set_user_display_settings_test("twenty_four_hour_time", [True, False])
def test_change_left_side_userlist(self):
# type: () -> None
self.do_set_user_display_settings_test("left_side_userlist", [True, False])
def test_change_emoji_alt_code(self):
# type: () -> None
self.do_set_user_display_settings_test("emoji_alt_code", [True, False])
def test_change_emojiset(self):
# type: () -> None
self.do_set_user_display_settings_test("emojiset", [u'apple', u'twitter'])
def test_change_default_language(self):
# type: () -> None
self.do_set_user_display_settings_test("default_language", [u'de', u'es', u'en'])
def test_change_timezone(self):
# type: () -> None
self.do_set_user_display_settings_test("timezone", [u'US/Mountain', u'US/Samoa', u'Pacific/Galapagos', u''])
def test_change_enable_stream_desktop_notifications(self):
# type: () -> None
schema_checker = self.check_events_dict([
('type', equals('update_global_notifications')),
('notification_name', equals('enable_stream_desktop_notifications')),
('user', check_string),
('setting', check_bool),
])
do_change_enable_stream_desktop_notifications(self.user_profile, False)
for setting_value in [True, False]:
events = self.do_test(lambda: do_change_enable_stream_desktop_notifications(self.user_profile, setting_value, log=False))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_enable_stream_sounds(self):
# type: () -> None
schema_checker = self.check_events_dict([
('type', equals('update_global_notifications')),
('notification_name', equals('enable_stream_sounds')),
('user', check_string),
('setting', check_bool),
])
do_change_enable_stream_sounds(self.user_profile, False)
for setting_value in [True, False]:
events = self.do_test(lambda: do_change_enable_stream_sounds(self.user_profile, setting_value, log=False))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_enable_desktop_notifications(self):
# type: () -> None
schema_checker = self.check_events_dict([
('type', equals('update_global_notifications')),
('notification_name', equals('enable_desktop_notifications')),
('user', check_string),
('setting', check_bool),
])
do_change_enable_desktop_notifications(self.user_profile, False)
for setting_value in [True, False]:
events = self.do_test(lambda: do_change_enable_desktop_notifications(self.user_profile, setting_value, log=False))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_enable_sounds(self):
# type: () -> None
schema_checker = self.check_events_dict([
('type', equals('update_global_notifications')),
('notification_name', equals('enable_sounds')),
('user', check_string),
('setting', check_bool),
])
do_change_enable_sounds(self.user_profile, False)
for setting_value in [True, False]:
events = self.do_test(lambda: do_change_enable_sounds(self.user_profile, setting_value, log=False))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_enable_offline_email_notifications(self):
# type: () -> None
schema_checker = self.check_events_dict([
('type', equals('update_global_notifications')),
('notification_name', equals('enable_offline_email_notifications')),
('user', check_string),
('setting', check_bool),
])
do_change_enable_offline_email_notifications(self.user_profile, False)
for setting_value in [True, False]:
events = self.do_test(lambda: do_change_enable_offline_email_notifications(self.user_profile, setting_value, log=False))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_enable_offline_push_notifications(self):
# type: () -> None
schema_checker = self.check_events_dict([
('type', equals('update_global_notifications')),
('notification_name', equals('enable_offline_push_notifications')),
('user', check_string),
('setting', check_bool),
])
do_change_enable_offline_push_notifications(self.user_profile, False)
for setting_value in [True, False]:
events = self.do_test(lambda: do_change_enable_offline_push_notifications(self.user_profile, setting_value, log=False))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_enable_online_push_notifications(self):
# type: () -> None
schema_checker = self.check_events_dict([
('type', equals('update_global_notifications')),
('notification_name', equals('enable_online_push_notifications')),
('user', check_string),
('setting', check_bool),
])
do_change_enable_online_push_notifications(self.user_profile, False)
for setting_value in [True, False]:
events = self.do_test(lambda: do_change_enable_online_push_notifications(self.user_profile, setting_value, log=False))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_pm_content_in_desktop_notifications(self):
# type: () -> None
schema_checker = self.check_events_dict([
('type', equals('update_global_notifications')),
('notification_name', equals('pm_content_in_desktop_notifications')),
('user', check_string),
('setting', check_bool),
])
do_change_pm_content_in_desktop_notifications(self.user_profile, False)
for setting_value in [True, False]:
events = self.do_test(
lambda: do_change_pm_content_in_desktop_notifications(self.user_profile,
setting_value,
log=False),
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_enable_digest_emails(self):
# type: () -> None
schema_checker = self.check_events_dict([
('type', equals('update_global_notifications')),
('notification_name', equals('enable_digest_emails')),
('user', check_string),
('setting', check_bool),
])
do_change_enable_digest_emails(self.user_profile, False)
for setting_value in [True, False]:
events = self.do_test(lambda: do_change_enable_digest_emails(self.user_profile, setting_value, log=False))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_realm_emoji_events(self):
# type: () -> None
schema_checker = self.check_events_dict([
('type', equals('realm_emoji')),
('op', equals('update')),
('realm_emoji', check_dict([])),
])
events = self.do_test(lambda: check_add_realm_emoji(get_realm("zulip"), "my_emoji",
"https://realm.com/my_emoji"))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
events = self.do_test(lambda: do_remove_realm_emoji(get_realm("zulip"), "my_emoji"))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_realm_filter_events(self):
# type: () -> None
schema_checker = self.check_events_dict([
('type', equals('realm_filters')),
('realm_filters', check_list(None)), # TODO: validate tuples in the list
])
events = self.do_test(lambda: do_add_realm_filter(get_realm("zulip"), "#(?P<id>[123])",
"https://realm.com/my_realm_filter/%(id)s"))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
self.do_test(lambda: do_remove_realm_filter(get_realm("zulip"), "#(?P<id>[123])"))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_realm_domain_events(self):
# type: () -> None
schema_checker = self.check_events_dict([
('type', equals('realm_domains')),
('op', equals('add')),
('realm_domain', check_dict_only([
('domain', check_string),
('allow_subdomains', check_bool),
])),
])
realm = get_realm('zulip')
events = self.do_test(lambda: do_add_realm_domain(realm, 'zulip.org', False))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
schema_checker = self.check_events_dict([
('type', equals('realm_domains')),
('op', equals('change')),
('realm_domain', check_dict_only([
('domain', equals('zulip.org')),
('allow_subdomains', equals(True)),
])),
])
test_domain = RealmDomain.objects.get(realm=realm, domain='zulip.org')
events = self.do_test(lambda: do_change_realm_domain(test_domain, True))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
schema_checker = self.check_events_dict([
('type', equals('realm_domains')),
('op', equals('remove')),
('domain', equals('zulip.org')),
])
events = self.do_test(lambda: do_remove_realm_domain(test_domain))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_create_bot(self):
# type: () -> None
bot_created_checker = self.check_events_dict([
('type', equals('realm_bot')),
('op', equals('add')),
('bot', check_dict_only([
('email', check_string),
('user_id', check_int),
('full_name', check_string),
('is_active', check_bool),
('api_key', check_string),
('default_sending_stream', check_none_or(check_string)),
('default_events_register_stream', check_none_or(check_string)),
('default_all_public_streams', check_bool),
('avatar_url', check_string),
('owner', check_string),
])),
])
action = lambda: self.create_bot('test-bot@zulip.com')
events = self.do_test(action, num_events=2)
error = bot_created_checker('events[1]', events[1])
self.assert_on_error(error)
def test_change_bot_full_name(self):
# type: () -> None
bot = self.create_bot('test-bot@zulip.com')
action = lambda: do_change_full_name(bot, 'New Bot Name')
events = self.do_test(action, num_events=2)
error = self.realm_bot_schema('full_name', check_string)('events[1]', events[1])
self.assert_on_error(error)
def test_regenerate_bot_api_key(self):
# type: () -> None
bot = self.create_bot('test-bot@zulip.com')
action = lambda: do_regenerate_api_key(bot, self.user_profile)
events = self.do_test(action)
error = self.realm_bot_schema('api_key', check_string)('events[0]', events[0])
self.assert_on_error(error)
def test_change_bot_avatar_source(self):
# type: () -> None
bot = self.create_bot('test-bot@zulip.com')
action = lambda: do_change_avatar_fields(bot, bot.AVATAR_FROM_USER)
events = self.do_test(action, num_events=2)
error = self.realm_bot_schema('avatar_url', check_string)('events[0]', events[0])
self.assertEqual(events[1]['type'], 'realm_user')
self.assert_on_error(error)
def test_change_realm_icon_source(self):
# type: () -> None
realm = get_realm('zulip')
action = lambda: do_change_icon_source(realm, realm.ICON_FROM_GRAVATAR)
events = self.do_test(action, state_change_expected=False)
schema_checker = self.check_events_dict([
('type', equals('realm')),
('op', equals('update_dict')),
('property', equals('icon')),
('data', check_dict_only([
('icon_url', check_string),
('icon_source', check_string),
])),
])
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_bot_default_all_public_streams(self):
# type: () -> None
bot = self.create_bot('test-bot@zulip.com')
action = lambda: do_change_default_all_public_streams(bot, True)
events = self.do_test(action)
error = self.realm_bot_schema('default_all_public_streams', check_bool)('events[0]', events[0])
self.assert_on_error(error)
def test_change_bot_default_sending_stream(self):
# type: () -> None
bot = self.create_bot('test-bot@zulip.com')
stream = get_stream("Rome", bot.realm)
action = lambda: do_change_default_sending_stream(bot, stream)
events = self.do_test(action)
error = self.realm_bot_schema('default_sending_stream', check_string)('events[0]', events[0])
self.assert_on_error(error)
action = lambda: do_change_default_sending_stream(bot, None)
events = self.do_test(action)
error = self.realm_bot_schema('default_sending_stream', equals(None))('events[0]', events[0])
self.assert_on_error(error)
def test_change_bot_default_events_register_stream(self):
# type: () -> None
bot = self.create_bot('test-bot@zulip.com')
stream = get_stream("Rome", bot.realm)
action = lambda: do_change_default_events_register_stream(bot, stream)
events = self.do_test(action)
error = self.realm_bot_schema('default_events_register_stream', check_string)('events[0]', events[0])
self.assert_on_error(error)
action = lambda: do_change_default_events_register_stream(bot, None)
events = self.do_test(action)
error = self.realm_bot_schema('default_events_register_stream', equals(None))('events[0]', events[0])
self.assert_on_error(error)
def test_change_bot_owner(self):
# type: () -> None
change_bot_owner_checker = self.check_events_dict([
('type', equals('realm_bot')),
('op', equals('update')),
('bot', check_dict_only([
('email', check_string),
('user_id', check_int),
('owner_id', check_int),
])),
])
self.user_profile = get_user_profile_by_email('iago@zulip.com')
owner = get_user_profile_by_email('hamlet@zulip.com')
bot = self.create_bot('test-bot@zulip.com')
action = lambda: do_change_bot_owner(bot, owner, self.user_profile)
events = self.do_test(action)
error = change_bot_owner_checker('events[0]', events[0])
self.assert_on_error(error)
def test_do_deactivate_user(self):
# type: () -> None
bot_deactivate_checker = self.check_events_dict([
('type', equals('realm_bot')),
('op', equals('remove')),
('bot', check_dict_only([
('email', check_string),
('full_name', check_string),
('user_id', check_int),
])),
])
bot = self.create_bot('foo-bot@zulip.com')
action = lambda: do_deactivate_user(bot)
events = self.do_test(action, num_events=2)
error = bot_deactivate_checker('events[1]', events[1])
self.assert_on_error(error)
def test_do_reactivate_user(self):
# type: () -> None
bot_reactivate_checker = self.check_events_dict([
('type', equals('realm_bot')),
('op', equals('add')),
('bot', check_dict_only([
('email', check_string),
('user_id', check_int),
('full_name', check_string),
('is_active', check_bool),
('api_key', check_string),
('default_sending_stream', check_none_or(check_string)),
('default_events_register_stream', check_none_or(check_string)),
('default_all_public_streams', check_bool),
('avatar_url', check_string),
('owner', check_none_or(check_string)),
])),
])
bot = self.create_bot('foo-bot@zulip.com')
do_deactivate_user(bot)
action = lambda: do_reactivate_user(bot)
events = self.do_test(action, num_events=2)
error = bot_reactivate_checker('events[1]', events[1])
self.assert_on_error(error)
def test_do_mark_hotspot_as_read(self):
# type: () -> None
schema_checker = self.check_events_dict([
('type', equals('hotspots')),
('hotspots', check_list(check_string)),
])
events = self.do_test(lambda: do_mark_hotspot_as_read(self.user_profile, 'welcome'))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_rename_stream(self):
# type: () -> None
stream = self.make_stream('old_name')
new_name = u'stream with a brand new name'
self.subscribe_to_stream(self.user_profile.email, stream.name)
action = lambda: do_rename_stream(stream, new_name)
events = self.do_test(action, num_events=2)
schema_checker = self.check_events_dict([
('type', equals('stream')),
('op', equals('update')),
('property', equals('email_address')),
('value', check_string),
('stream_id', check_int),
('name', equals('old_name')),
])
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
schema_checker = self.check_events_dict([
('type', equals('stream')),
('op', equals('update')),
('property', equals('name')),
('value', equals(new_name)),
('name', equals('old_name')),
('stream_id', check_int),
])
error = schema_checker('events[1]', events[1])
self.assert_on_error(error)
def test_deactivate_stream_neversubscribed(self):
# type: () -> None
stream = self.make_stream('old_name')
action = lambda: do_deactivate_stream(stream)
events = self.do_test(action)
schema_checker = self.check_events_dict([
('type', equals('stream')),
('op', equals('delete')),
('streams', check_list(check_dict([]))),
])
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_subscribe_other_user_never_subscribed(self):
# type: () -> None
action = lambda: self.subscribe_to_stream("othello@zulip.com", u"test_stream")
events = self.do_test(action, num_events=2)
peer_add_schema_checker = self.check_events_dict([
('type', equals('subscription')),
('op', equals('peer_add')),
('user_id', check_int),
('subscriptions', check_list(check_string)),
])
error = peer_add_schema_checker('events[1]', events[1])
self.assert_on_error(error)
def test_subscribe_events(self):
# type: () -> None
self.do_test_subscribe_events(include_subscribers=True)
def test_subscribe_events_no_include_subscribers(self):
# type: () -> None
self.do_test_subscribe_events(include_subscribers=False)
def do_test_subscribe_events(self, include_subscribers):
# type: (bool) -> None
subscription_fields = [
('color', check_string),
('description', check_string),
('email_address', check_string),
('invite_only', check_bool),
('in_home_view', check_bool),
('name', check_string),
('desktop_notifications', check_bool),
('audible_notifications', check_bool),
('stream_id', check_int),
]
if include_subscribers:
subscription_fields.append(('subscribers', check_list(check_int))) # type: ignore
subscription_schema_checker = check_list(
check_dict(subscription_fields), # TODO: Can this be converted to check_dict_only?
)
stream_create_schema_checker = self.check_events_dict([
('type', equals('stream')),
('op', equals('create')),
('streams', check_list(check_dict_only([
('name', check_string),
('stream_id', check_int),
('invite_only', check_bool),
('description', check_string),
]))),
])
add_schema_checker = self.check_events_dict([
('type', equals('subscription')),
('op', equals('add')),
('subscriptions', subscription_schema_checker),
])
remove_schema_checker = self.check_events_dict([
('type', equals('subscription')),
('op', equals('remove')),
('subscriptions', check_list(
check_dict_only([
('name', equals('test_stream')),
('stream_id', check_int),
]),
)),
])
peer_add_schema_checker = self.check_events_dict([
('type', equals('subscription')),
('op', equals('peer_add')),
('user_id', check_int),
('subscriptions', check_list(check_string)),
])
peer_remove_schema_checker = self.check_events_dict([
('type', equals('subscription')),
('op', equals('peer_remove')),
('user_id', check_int),
('subscriptions', check_list(check_string)),
])
stream_update_schema_checker = self.check_events_dict([
('type', equals('stream')),
('op', equals('update')),
('property', equals('description')),
('value', check_string),
('stream_id', check_int),
('name', check_string),
])
# Subscribe to a totally new stream, so it's just Hamlet on it
action = lambda: self.subscribe_to_stream("hamlet@zulip.com", "test_stream") # type: Callable
events = self.do_test(action, event_types=["subscription", "realm_user"],
include_subscribers=include_subscribers)
error = add_schema_checker('events[0]', events[0])
self.assert_on_error(error)
# Add another user to that totally new stream
action = lambda: self.subscribe_to_stream("othello@zulip.com", "test_stream")
events = self.do_test(action,
include_subscribers=include_subscribers,
state_change_expected=include_subscribers,
)
error = peer_add_schema_checker('events[0]', events[0])
self.assert_on_error(error)
stream = get_stream("test_stream", self.user_profile.realm)
# Now remove the first user, to test the normal unsubscribe flow
action = lambda: bulk_remove_subscriptions(
[get_user_profile_by_email("othello@zulip.com")],
[stream])
events = self.do_test(action,
include_subscribers=include_subscribers,
state_change_expected=include_subscribers,
)
error = peer_remove_schema_checker('events[0]', events[0])
self.assert_on_error(error)
# Now remove the second user, to test the 'vacate' event flow
action = lambda: bulk_remove_subscriptions(
[get_user_profile_by_email("hamlet@zulip.com")],
[stream])
events = self.do_test(action,
include_subscribers=include_subscribers,
num_events=2)
error = remove_schema_checker('events[1]', events[1])
self.assert_on_error(error)
# Now resubscribe a user, to make sure that works on a vacated stream
action = lambda: self.subscribe_to_stream("hamlet@zulip.com", "test_stream")
events = self.do_test(action,
include_subscribers=include_subscribers,
num_events=2)
error = add_schema_checker('events[1]', events[1])
self.assert_on_error(error)
action = lambda: do_change_stream_description(stream, u'new description')
events = self.do_test(action,
include_subscribers=include_subscribers)
error = stream_update_schema_checker('events[0]', events[0])
self.assert_on_error(error)
# Subscribe to a totally new invite-only stream, so it's just Hamlet on it
stream = self.make_stream("private", get_realm("zulip"), invite_only=True)
user_profile = get_user_profile_by_email("hamlet@zulip.com")
action = lambda: bulk_add_subscriptions([stream], [user_profile])
events = self.do_test(action, include_subscribers=include_subscribers,
num_events=2)
error = stream_create_schema_checker('events[0]', events[0])
error = add_schema_checker('events[1]', events[1])
self.assert_on_error(error)
class FetchInitialStateDataTest(ZulipTestCase):
# Non-admin users don't have access to all bots
def test_realm_bots_non_admin(self):
# type: () -> None
email = 'cordelia@zulip.com'
user_profile = get_user_profile_by_email(email)
self.assertFalse(user_profile.is_realm_admin)
result = fetch_initial_state_data(user_profile, None, "")
self.assert_length(result['realm_bots'], 0)
# additionally the API key for a random bot is not present in the data
api_key = get_user_profile_by_email('notification-bot@zulip.com').api_key
self.assertNotIn(api_key, str(result))
# Admin users have access to all bots in the realm_bots field
def test_realm_bots_admin(self):
# type: () -> None
email = 'hamlet@zulip.com'
user_profile = get_user_profile_by_email(email)
do_change_is_admin(user_profile, True)
self.assertTrue(user_profile.is_realm_admin)
result = fetch_initial_state_data(user_profile, None, "")
self.assertTrue(len(result['realm_bots']) > 5)
def test_max_message_id_with_no_history(self):
# type: () -> None
email = 'aaron@zulip.com'
user_profile = get_user_profile_by_email(email)
# Delete all historical messages for this user
UserMessage.objects.filter(user_profile=user_profile).delete()
result = fetch_initial_state_data(user_profile, None, "")
self.assertEqual(result['max_message_id'], -1)
class EventQueueTest(TestCase):
def test_one_event(self):
# type: () -> None
queue = EventQueue("1")
queue.push({"type": "pointer",
"pointer": 1,
"timestamp": "1"})
self.assertFalse(queue.empty())
self.assertEqual(queue.contents(),
[{'id': 0,
'type': 'pointer',
"pointer": 1,
"timestamp": "1"}])
def test_event_collapsing(self):
# type: () -> None
queue = EventQueue("1")
for pointer_val in range(1, 10):
queue.push({"type": "pointer",
"pointer": pointer_val,
"timestamp": str(pointer_val)})
self.assertEqual(queue.contents(),
[{'id': 8,
'type': 'pointer',
"pointer": 9,
"timestamp": "9"}])
queue = EventQueue("2")
for pointer_val in range(1, 10):
queue.push({"type": "pointer",
"pointer": pointer_val,
"timestamp": str(pointer_val)})
queue.push({"type": "unknown"})
queue.push({"type": "restart", "server_generation": "1"})
for pointer_val in range(11, 20):
queue.push({"type": "pointer",
"pointer": pointer_val,
"timestamp": str(pointer_val)})
queue.push({"type": "restart", "server_generation": "2"})
self.assertEqual(queue.contents(),
[{"type": "unknown",
"id": 9},
{'id': 19,
'type': 'pointer',
"pointer": 19,
"timestamp": "19"},
{"id": 20,
"type": "restart",
"server_generation": "2"}])
for pointer_val in range(21, 23):
queue.push({"type": "pointer",
"pointer": pointer_val,
"timestamp": str(pointer_val)})
self.assertEqual(queue.contents(),
[{"type": "unknown",
"id": 9},
{'id': 19,
'type': 'pointer',
"pointer": 19,
"timestamp": "19"},
{"id": 20,
"type": "restart",
"server_generation": "2"},
{'id': 22,
'type': 'pointer',
"pointer": 22,
"timestamp": "22"},
])
def test_flag_add_collapsing(self):
# type: () -> None
queue = EventQueue("1")
queue.push({"type": "update_message_flags",
"flag": "read",
"operation": "add",
"all": False,
"messages": [1, 2, 3, 4],
"timestamp": "1"})
queue.push({"type": "update_message_flags",
"flag": "read",
"all": False,
"operation": "add",
"messages": [5, 6],
"timestamp": "1"})
self.assertEqual(queue.contents(),
[{'id': 1,
'type': 'update_message_flags',
"all": False,
"flag": "read",
"operation": "add",
"messages": [1, 2, 3, 4, 5, 6],
"timestamp": "1"}])
def test_flag_remove_collapsing(self):
# type: () -> None
queue = EventQueue("1")
queue.push({"type": "update_message_flags",
"flag": "collapsed",
"operation": "remove",
"all": False,
"messages": [1, 2, 3, 4],
"timestamp": "1"})
queue.push({"type": "update_message_flags",
"flag": "collapsed",
"all": False,
"operation": "remove",
"messages": [5, 6],
"timestamp": "1"})
self.assertEqual(queue.contents(),
[{'id': 1,
'type': 'update_message_flags',
"all": False,
"flag": "collapsed",
"operation": "remove",
"messages": [1, 2, 3, 4, 5, 6],
"timestamp": "1"}])
def test_collapse_event(self):
# type: () -> None
queue = EventQueue("1")
queue.push({"type": "pointer",
"pointer": 1,
"timestamp": "1"})
queue.push({"type": "unknown",
"timestamp": "1"})
self.assertEqual(queue.contents(),
[{'id': 0,
'type': 'pointer',
"pointer": 1,
"timestamp": "1"},
{'id': 1,
'type': 'unknown',
"timestamp": "1"}])
class TestEventsRegisterAllPublicStreamsDefaults(TestCase):
def setUp(self):
# type: () -> None
self.email = 'hamlet@zulip.com'
self.user_profile = get_user_profile_by_email(self.email)
def test_use_passed_all_public_true_default_false(self):
# type: () -> None
self.user_profile.default_all_public_streams = False
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, True)
self.assertTrue(result)
def test_use_passed_all_public_true_default(self):
# type: () -> None
self.user_profile.default_all_public_streams = True
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, True)
self.assertTrue(result)
def test_use_passed_all_public_false_default_false(self):
# type: () -> None
self.user_profile.default_all_public_streams = False
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, False)
self.assertFalse(result)
def test_use_passed_all_public_false_default_true(self):
# type: () -> None
self.user_profile.default_all_public_streams = True
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, False)
self.assertFalse(result)
def test_use_true_default_for_none(self):
# type: () -> None
self.user_profile.default_all_public_streams = True
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, None)
self.assertTrue(result)
def test_use_false_default_for_none(self):
# type: () -> None
self.user_profile.default_all_public_streams = False
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, None)
self.assertFalse(result)
class TestEventsRegisterNarrowDefaults(TestCase):
def setUp(self):
# type: () -> None
self.email = 'hamlet@zulip.com'
self.user_profile = get_user_profile_by_email(self.email)
self.stream = get_stream('Verona', self.user_profile.realm)
def test_use_passed_narrow_no_default(self):
# type: () -> None
self.user_profile.default_events_register_stream_id = None
self.user_profile.save()
result = _default_narrow(self.user_profile, [[u'stream', u'my_stream']])
self.assertEqual(result, [[u'stream', u'my_stream']])
def test_use_passed_narrow_with_default(self):
# type: () -> None
self.user_profile.default_events_register_stream_id = self.stream.id
self.user_profile.save()
result = _default_narrow(self.user_profile, [[u'stream', u'my_stream']])
self.assertEqual(result, [[u'stream', u'my_stream']])
def test_use_default_if_narrow_is_empty(self):
# type: () -> None
self.user_profile.default_events_register_stream_id = self.stream.id
self.user_profile.save()
result = _default_narrow(self.user_profile, [])
self.assertEqual(result, [[u'stream', u'Verona']])
def test_use_narrow_if_default_is_none(self):
# type: () -> None
self.user_profile.default_events_register_stream_id = None
self.user_profile.save()
result = _default_narrow(self.user_profile, [])
self.assertEqual(result, [])
| 43.372306 | 133 | 0.571038 |
795783a4216aab9423b51d9926f5f9d4c39cf17d | 1,138 | py | Python | kanon_api/routes/calendars.py | legau/kanon-api | bae8fcba11caefa2f6715247852f853bb52fb9a6 | [
"BSD-3-Clause"
] | null | null | null | kanon_api/routes/calendars.py | legau/kanon-api | bae8fcba11caefa2f6715247852f853bb52fb9a6 | [
"BSD-3-Clause"
] | 80 | 2021-04-21T16:02:03.000Z | 2022-03-28T00:48:58.000Z | kanon_api/routes/calendars.py | legau/kanon-api | bae8fcba11caefa2f6715247852f853bb52fb9a6 | [
"BSD-3-Clause"
] | null | null | null | import dataclasses
from fastapi.param_functions import Depends
from fastapi.routing import APIRouter
from kanon.calendars.calendars import Calendar, hours_to_day
from kanon_api.utils import DateParams, safe_calendar, safe_date
router = APIRouter(prefix="/calendars", tags=["calendars"])
@router.get("/{calendar}/to_jdn/")
def get_to_jdn(
calendar: Calendar = Depends(safe_calendar),
date_params: DateParams = Depends(),
):
date = safe_date(calendar, date_params)
return {"jdn": date.jdn, "date": str(date)}
@router.get("/{calendar}/from_jdn/")
def get_from_jdn(jdn: float, calendar: Calendar = Depends(safe_calendar)):
date = calendar.from_julian_days(jdn)
return {"date": str(date), "ymd": date.ymd, "frac": hours_to_day(date.hours)}
@router.get("/{calendar}/infos")
def get_infos(calendar: Calendar = Depends(safe_calendar)):
return {
"common_year": calendar.common_year,
"leap_year": calendar.leap_year,
"months": [dataclasses.asdict(m) for m in calendar.months],
"name": calendar.name,
"cycle": calendar.cycle,
"era": calendar.era.epoch,
}
| 27.095238 | 81 | 0.699473 |
795784107a2af5a80883f455d84274dcf5b36d65 | 5,609 | py | Python | src/nimbro_vis/contrib/rqt/rqt_gui/src/rqt_gui/rospkg_plugin_provider.py | nvtienanh/UXA_OP | a06a3f1113721627e7d384f89718369036e8028e | [
"BSD-3-Clause"
] | 2 | 2018-11-22T08:15:43.000Z | 2020-07-22T07:18:50.000Z | src/nimbro_vis/contrib/rqt/rqt_gui/src/rqt_gui/rospkg_plugin_provider.py | nvtienanh/UXA_OP | a06a3f1113721627e7d384f89718369036e8028e | [
"BSD-3-Clause"
] | 1 | 2018-11-22T08:34:34.000Z | 2018-11-22T08:34:34.000Z | src/nimbro_vis/contrib/rqt/rqt_gui/src/rqt_gui/rospkg_plugin_provider.py | nvtienanh/UXA_OP | a06a3f1113721627e7d384f89718369036e8028e | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2011, Dirk Thomas, Dorian Scholz, TU Darmstadt
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the TU Darmstadt nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
from python_qt_binding.QtCore import qDebug, qWarning
from rospkg.common import MANIFEST_FILE, PACKAGE_FILE
from rospkg.manifest import parse_manifest_file, InvalidManifest
from .ros_plugin_provider import RosPluginProvider
class RospkgPluginProvider(RosPluginProvider):
rospack = None
"""`RosPluginProvider` using rospkg."""
def __init__(self, export_tag, base_class_type):
super(RospkgPluginProvider, self).__init__(export_tag, base_class_type)
self.setObjectName('RospkgPluginProvider')
if RospkgPluginProvider.rospack is None:
from rospkg import RosPack
RospkgPluginProvider.rospack = RosPack()
def _find_plugins(self, export_tag, discovery_data):
crawl = True
if discovery_data:
data = discovery_data.get_settings('rqt_gui.RospkgPluginProvider')
export_data = data.get_settings(export_tag)
crawl = export_tag not in data.child_groups()
plugins = []
if crawl:
qDebug("RospkgPluginProvider._find_plugins() crawling for plugins of type '%s'" % export_tag)
r = RospkgPluginProvider.rospack
for package_name in r.list():
package_path = r.get_path(package_name)
manifest_file_path = os.path.join(package_path, MANIFEST_FILE)
if os.path.isfile(manifest_file_path):
try:
manifest = parse_manifest_file(package_path, MANIFEST_FILE)
except InvalidManifest as e:
qWarning('Could not parse manifest "%s":\n%s' % (manifest_file_path, e))
continue
exports = manifest.get_export(export_tag, 'plugin')
for export in exports:
plugins.append([package_name, str(export)])
continue
package_file_path = os.path.join(package_path, PACKAGE_FILE)
if os.path.isfile(package_file_path):
# only try to import catkin if a PACKAGE_FILE is found
try:
from catkin_pkg.package import parse_package, InvalidPackage
except ImportError as e:
qWarning('Package "%s" has a package file, but import of parser failed:\n%s' % (package_path, e))
continue
try:
package = parse_package(package_file_path)
except InvalidPackage as e:
qWarning('Could not parse package file "%s":\n%s' % (package_file_path, e))
continue
for export in package.exports:
if export.tagname != export_tag or 'plugin' not in export.attributes:
continue
plugin_xml_path = export.attributes['plugin']
plugin_xml_path = plugin_xml_path.replace('${prefix}', package_path)
plugins.append([package_name, plugin_xml_path])
continue
# write crawling information to cache
if discovery_data:
plugins_by_package = {}
for (package_name, export) in plugins:
if package_name not in plugins_by_package:
plugins_by_package[package_name] = []
plugins_by_package[package_name].append(export)
for package_name, exports in plugins_by_package.items():
export_data.set_value(package_name, os.pathsep.join([str(e) for e in exports]))
else:
# use cached information
for package_name in export_data.all_keys():
exports = export_data.value(package_name)
if exports:
for export in exports.split(os.pathsep):
plugins.append([package_name, export])
return plugins
| 46.741667 | 121 | 0.635051 |
7957848ee5e5f36c726dc835c58233a87d159e0c | 4,282 | py | Python | pdf-txt.py | JamesRunnalls/pdf-txt | 238223d548bf231a9fb76cec8823cec47fad7853 | [
"MIT"
] | null | null | null | pdf-txt.py | JamesRunnalls/pdf-txt | 238223d548bf231a9fb76cec8823cec47fad7853 | [
"MIT"
] | null | null | null | pdf-txt.py | JamesRunnalls/pdf-txt | 238223d548bf231a9fb76cec8823cec47fad7853 | [
"MIT"
] | null | null | null |
# coding: utf-8
from pdfminer.layout import LAParams
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import PDFPageAggregator
from pdfminer.pdfpage import PDFPage
from pdfminer.layout import LTTextBoxHorizontal
from functools import reduce
import pandas as pd
import re
import os
def pdf_to_txt(pdf_name):
# Variables
sensitivity = 3 # Distance for lines to count as same line
header = 750 # Cut off text above this height
footer = 80 # Cutt off text below this height
# Functions
def order_pdf_textboxes(pdf_data,sensitivity,header,footer):
df = pd.DataFrame(pdf_data)
df.columns = ['x1','y1','x2','y2','Text']
df['x1'] = pd.to_numeric(df.x1)
df['x2'] = pd.to_numeric(df.x2)
df['y1'] = pd.to_numeric(df.y1)
df['y2'] = pd.to_numeric(df.y2)
df = splitDataFrameList(df,'Text','\n')
df = df.sort_values(['y2_new'],ascending=False).reset_index(drop=True)
df.insert(0, 'Group', range(-1, -1 - len(df),-1))
i = 0
for index, row in df.iterrows():
i = i + 1
try:
if abs(df.iloc[index]['y2_new'] - df.iloc[index+1]['y2_new']) < sensitivity:
df.set_value(index,'Group',i)
df.set_value(index+1,'Group',i)
except:
pass
df = df.sort_values(['x1'],ascending=True).reset_index(drop=True)
df1 = df.groupby('Group', as_index=False).agg({'y2_new':'first','x1':'first'})
df = df.groupby(['Group'])['Text'].apply(lambda x: ' '.join(x.astype(str))).reset_index()
df['y2_new'] = df1['y2_new']
df = df.sort_values(['y2_new'],ascending=False)
df = df[df.y2_new > footer]
df = df[df.y2_new < header]
return df['Text'].tolist()
def splitDataFrameList(df,target_column,separator):
def splitListToRows(row,row_accumulator,target_column,separator):
split_row = row[target_column].split(separator)
del split_row[-1]
i = 0
for s in split_row:
new_row = row.to_dict()
new_row[target_column] = s
line_height = (new_row['y2']-new_row['y1'])/(len(split_row))
new_row['y2_new'] = new_row['y2'] - (i * line_height)
new_row['y1_new'] = new_row['y2'] - ((i + 1) * line_height)
i = i + 1
row_accumulator.append(new_row)
new_rows = []
df.apply(splitListToRows,axis=1,args = (new_rows,target_column,separator))
new_df = pd.DataFrame(new_rows)
return new_df
def extract_from_element(x):
text = x.get_text()
text = re.sub('"',"'",str(text))
reps = ("\u201c",'"'), ("\u201d",'"'), ("\u2013",'-'), ("\u2019","'"), ("\uf06c",'-'), ("\uf06c",'-'), ("\u2122",'(TM)'), ("\uf0b7",'-'), ("\u01b7",'3'), ("\u0e00",' '), ("(cid:149)",'x')
text = reduce(lambda a, kv: a.replace(*kv), reps, text)
dims = str(x).split(' ')[1].split(',')
return dims + [text]
def list_to_txt(lists,fname):
thefile = open(fname.replace(".pdf",".txt"), 'w')
for item in lists:
item = str(item.encode("utf-8"))
item = item[2:-1]
thefile.write("%s\n" % item)
# PDF extract code
document = open(pdf_name, 'rb')
#Create resource manager
rsrcmgr = PDFResourceManager()
# Set parameters for analysis.
laparams = LAParams()
# Create a PDF page aggregator object.
device = PDFPageAggregator(rsrcmgr, laparams=laparams)
interpreter = PDFPageInterpreter(rsrcmgr, device)
pdf_full = []
#Loop through the pages
for page in PDFPage.get_pages(document):
pdf_data = []
interpreter.process_page(page)
# receive the LTPage object for the page.
layout = device.get_result()
# Extract only the text objects
for element in layout:
if "LTTextBoxHorizontal" not in str(element):
continue
else:
pdf_data.append(extract_from_element(element))
pdf_full = pdf_full + order_pdf_textboxes(pdf_data,sensitivity,header,footer)
list_to_txt(pdf_full,pdf_name)
| 38.576577 | 254 | 0.581738 |
795785616752ccd327d8d39169088b51c4ed2358 | 12,040 | py | Python | bentoml/service/inference_api.py | d3m0n-r00t/BentoML | e5c53b821369f5391de9ab3a20ecad5db9e77202 | [
"Apache-2.0"
] | 1 | 2021-04-21T17:59:35.000Z | 2021-04-21T17:59:35.000Z | bentoml/service/inference_api.py | d3m0n-r00t/BentoML | e5c53b821369f5391de9ab3a20ecad5db9e77202 | [
"Apache-2.0"
] | 1 | 2021-01-22T08:23:40.000Z | 2021-01-22T08:23:40.000Z | bentoml/service/inference_api.py | d3m0n-r00t/BentoML | e5c53b821369f5391de9ab3a20ecad5db9e77202 | [
"Apache-2.0"
] | 2 | 2021-01-06T14:09:21.000Z | 2021-03-31T16:04:31.000Z | # Copyright 2019 Atalaya Tech, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import functools
import inspect
import itertools
import logging
import sys
from typing import Iterable, Iterator, Sequence
import flask
from bentoml.adapters import BaseInputAdapter, BaseOutputAdapter
from bentoml.exceptions import BentoMLConfigException
from bentoml.server import trace
from bentoml.types import HTTPRequest, InferenceResult, InferenceTask
from bentoml.utils import cached_property
logger = logging.getLogger(__name__)
prediction_logger = logging.getLogger("bentoml.prediction")
class InferenceAPI(object):
"""
InferenceAPI defines an inference call to the underlying model, including its input
and output adapter, the user-defined API callback function, and configurations for
working with the BentoML adaptive micro-batching mechanism
"""
def __init__(
self,
service,
name,
doc,
input_adapter: BaseInputAdapter,
user_func: callable,
output_adapter: BaseOutputAdapter,
mb_max_latency=10000,
mb_max_batch_size=1000,
batch=False,
):
"""
:param service: ref to service containing this API
:param name: API name
:param doc: the user facing document of this inference API, default to the
docstring of the inference API function
:param input_adapter: A InputAdapter that transforms HTTP Request and/or
CLI options into parameters for API func
:param user_func: the user-defined API callback function, this is
typically the 'predict' method on a model
:param output_adapter: A OutputAdapter is an layer between result of user
defined API callback function
and final output in a variety of different forms,
such as HTTP response, command line stdout or AWS Lambda event object.
:param mb_max_latency: The latency goal of this inference API in milliseconds.
Default: 10000.
:param mb_max_batch_size: The maximum size of requests batch accepted by this
inference API. This parameter governs the throughput/latency trade off, and
avoids having large batches that exceed some resource constraint (e.g. GPU
memory to hold the entire batch's data). Default: 1000.
:param batch: If true, the user API functool would take a batch of input data
a time.
"""
self._service = service
self._name = name
self._input_adapter = input_adapter
self._user_func = user_func
self._output_adapter = output_adapter
self.mb_max_latency = mb_max_latency
self.mb_max_batch_size = mb_max_batch_size
self.batch = batch
if not self.input_adapter.BATCH_MODE_SUPPORTED and batch:
raise BentoMLConfigException(
f"{input_adapter.__class__.__name__} does not support `batch=True`"
)
if not self.input_adapter.SINGLE_MODE_SUPPORTED and not batch:
raise BentoMLConfigException(
f"{input_adapter.__class__.__name__} does not support `batch=False`, "
"its output passed to API functions could only be a batch of data."
)
if doc is None:
# generate a default doc string for this inference API
doc = (
f"BentoService inference API '{self.name}', input: "
f"'{type(input_adapter).__name__}', output: "
f"'{type(output_adapter).__name__}'"
)
self._doc = doc
@property
def service(self):
"""
:return: a reference to the BentoService serving this inference API
"""
return self._service
@property
def name(self):
"""
:return: the name of this inference API
"""
return self._name
@property
def doc(self):
"""
:return: user facing documentation of this inference API
"""
return self._doc
@property
def input_adapter(self) -> BaseInputAdapter:
"""
:return: the input adapter of this inference API
"""
return self._input_adapter
@property
def output_adapter(self) -> BaseOutputAdapter:
"""
:return: the output adapter of this inference API
"""
return self._output_adapter
@cached_property
def user_func(self):
"""
:return: user-defined inference API callback function
"""
# allow user to define handlers without 'tasks' kwargs
_sig = inspect.signature(self._user_func)
if self.batch:
append_arg = "tasks"
else:
append_arg = "task"
try:
_sig.bind_partial(**{append_arg: None})
append_arg = None
except TypeError:
pass
@functools.wraps(self._user_func)
def wrapped_func(*args, **kwargs):
with trace(
service_name=self.__class__.__name__,
span_name="user defined inference api callback function",
):
if append_arg and append_arg in kwargs:
tasks = kwargs.pop(append_arg)
elif append_arg in kwargs:
tasks = kwargs[append_arg]
else:
tasks = []
try:
return self._user_func(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except
logger.error("Error caught in API function:", exc_info=1)
if self.batch:
for task in tasks:
if not task.is_discarded:
task.discard(
http_status=500,
err_msg=f"Exception happened in API function: {e}",
)
return [None] * sum(
1 if t.batch is None else t.batch for t in tasks
)
else:
task = tasks
if not task.is_discarded:
task.discard(
http_status=500,
err_msg=f"Exception happened in API function: {e}",
)
return [None] * (1 if task.batch is None else task.batch)
return wrapped_func
@property
def request_schema(self):
"""
:return: the HTTP API request schema in OpenAPI/Swagger format
"""
schema = self.input_adapter.request_schema
if schema.get('application/json'):
schema.get('application/json')[
'example'
] = self.input_adapter._http_input_example
return schema
def _filter_tasks(
self, inf_tasks: Iterable[InferenceTask]
) -> Iterator[InferenceTask]:
for task in inf_tasks:
if task.is_discarded:
continue
try:
self.input_adapter.validate_task(task)
yield task
except AssertionError as e:
task.discard(http_status=400, err_msg=str(e))
def infer(self, inf_tasks: Iterable[InferenceTask]) -> Sequence[InferenceResult]:
inf_tasks = tuple(inf_tasks)
# extract args
user_args = self.input_adapter.extract_user_func_args(inf_tasks)
filtered_tasks = tuple(t for t in inf_tasks if not t.is_discarded)
# call user function
if not self.batch: # For single inputs
user_return = []
for task, legacy_user_args in zip(
filtered_tasks,
self.input_adapter.iter_batch_args(user_args, tasks=filtered_tasks),
):
ret = self.user_func(*legacy_user_args, task=task)
if task.is_discarded:
continue
else:
user_return.append(ret)
if (
isinstance(user_return, (list, tuple))
and len(user_return)
and isinstance(user_return[0], InferenceResult)
):
inf_results = user_return
else:
# pack return value
filtered_tasks = tuple(t for t in inf_tasks if not t.is_discarded)
inf_results = self.output_adapter.pack_user_func_return_value(
user_return, tasks=filtered_tasks
)
else:
user_return = self.user_func(*user_args, tasks=filtered_tasks)
if (
isinstance(user_return, (list, tuple))
and len(user_return)
and isinstance(user_return[0], InferenceResult)
):
inf_results = user_return
else:
# pack return value
filtered_tasks = tuple(t for t in inf_tasks if not t.is_discarded)
inf_results = self.output_adapter.pack_user_func_return_value(
user_return, tasks=filtered_tasks
)
full_results = InferenceResult.complete_discarded(inf_tasks, inf_results)
log_data = dict(
service_name=self.service.name if self.service else "",
service_version=self.service.version if self.service else "",
api=self.name,
)
for task, result in zip(inf_tasks, inf_results):
prediction_logger.info(
dict(
log_data,
task=task.to_json(),
result=result.to_json(),
request_id=task.task_id,
)
)
return tuple(full_results)
def handle_request(self, request: flask.Request):
req = HTTPRequest.from_flask_request(request)
inf_task = self.input_adapter.from_http_request(req)
results = self.infer((inf_task,))
result = next(iter(results))
response = self.output_adapter.to_http_response(result)
return response.to_flask_response()
def handle_batch_request(self, requests: Sequence[HTTPRequest]):
with trace(
service_name=self.__class__.__name__,
span_name=f"call `{self.input_adapter.__class__.__name__}`",
):
inf_tasks = map(self.input_adapter.from_http_request, requests)
results = self.infer(inf_tasks)
return map(self.output_adapter.to_http_response, results)
def handle_cli(self, cli_args: Sequence[str]) -> int:
parser = argparse.ArgumentParser()
parser.add_argument("--max-batch-size", default=sys.maxsize, type=int)
parsed_args, _ = parser.parse_known_args(cli_args)
exit_code = 0
tasks_iter = self.input_adapter.from_cli(tuple(cli_args))
while True:
tasks = tuple(itertools.islice(tasks_iter, parsed_args.max_batch_size))
if not len(tasks):
break
results = self.infer(tasks)
exit_code = exit_code or self.output_adapter.to_cli(results)
return exit_code
def handle_aws_lambda_event(self, event):
inf_task = self.input_adapter.from_aws_lambda_event(event)
result = next(iter(self.infer((inf_task,))))
return self.output_adapter.to_aws_lambda_event(result)
| 37.275542 | 87 | 0.593522 |
795785640167fa175a30f68940c33818aa3df01a | 12,996 | py | Python | mapss/static/packages/arches/arches/app/datatypes/concept_types.py | MPI-MAPSS/MAPSS | 3a5c0109758801717aaa8de1125ca5e98f83d3b4 | [
"CC0-1.0"
] | null | null | null | mapss/static/packages/arches/arches/app/datatypes/concept_types.py | MPI-MAPSS/MAPSS | 3a5c0109758801717aaa8de1125ca5e98f83d3b4 | [
"CC0-1.0"
] | null | null | null | mapss/static/packages/arches/arches/app/datatypes/concept_types.py | MPI-MAPSS/MAPSS | 3a5c0109758801717aaa8de1125ca5e98f83d3b4 | [
"CC0-1.0"
] | null | null | null | import uuid
import csv
from django.core.exceptions import ObjectDoesNotExist
from django.utils.translation import ugettext as _
from arches.app.models import models
from arches.app.models import concept
from arches.app.models.system_settings import settings
from arches.app.datatypes.base import BaseDataType
from arches.app.datatypes.datatypes import DataTypeFactory, get_value_from_jsonld
from arches.app.models.concept import get_preflabel_from_valueid, get_preflabel_from_conceptid, get_valueids_from_concept_label
from arches.app.search.elasticsearch_dsl_builder import Bool, Match, Range, Term, Nested, Exists, Terms
from arches.app.utils.date_utils import ExtendedDateFormat
# for the RDF graph export helper functions
from rdflib import Namespace, URIRef, Literal, BNode
from rdflib import ConjunctiveGraph as Graph
from rdflib.namespace import RDF, RDFS, XSD, DC, DCTERMS, SKOS
from arches.app.models.concept import ConceptValue
archesproject = Namespace(settings.ARCHES_NAMESPACE_FOR_DATA_EXPORT)
cidoc_nm = Namespace("http://www.cidoc-crm.org/cidoc-crm/")
class BaseConceptDataType(BaseDataType):
def __init__(self, model=None):
super(BaseConceptDataType, self).__init__(model=model)
self.value_lookup = {}
def get_value(self, valueid):
try:
return self.value_lookup[valueid]
except:
try:
self.value_lookup[valueid] = models.Value.objects.get(pk=valueid)
return self.value_lookup[valueid]
except ObjectDoesNotExist:
return models.Value()
def get_concept_export_value(self, valueid, concept_export_value_type=None):
ret = ""
if valueid is None or valueid.strip() == "":
pass
elif concept_export_value_type is None or concept_export_value_type == "" or concept_export_value_type == "label":
ret = self.get_value(valueid).value
elif concept_export_value_type == "both":
ret = valueid + "|" + self.get_value(valueid).value
elif concept_export_value_type == "id":
ret = valueid
return ret
def get_concept_dates(self, concept):
result = None
date_range = {}
values = models.Value.objects.filter(concept=concept)
for value in values:
if value.valuetype.valuetype in ("min_year" "max_year"):
date_range[value.valuetype.valuetype] = value.value
if "min_year" in date_range and "max_year" in date_range:
result = date_range
return result
def append_to_document(self, document, nodevalue, nodeid, tile, provisional=False):
try:
assert isinstance(nodevalue, (list, tuple)) # assert nodevalue is an array
except AssertionError:
nodevalue = [nodevalue]
for valueid in nodevalue:
value = self.get_value(valueid)
date_range = self.get_concept_dates(value.concept)
if date_range is not None:
min_date = ExtendedDateFormat(date_range["min_year"]).lower
max_date = ExtendedDateFormat(date_range["max_year"]).upper
if {"gte": min_date, "lte": max_date} not in document["date_ranges"]:
document["date_ranges"].append(
{"date_range": {"gte": min_date, "lte": max_date}, "nodegroup_id": tile.nodegroup_id, "provisional": provisional}
)
document["domains"].append(
{
"label": value.value,
"conceptid": value.concept_id,
"valueid": valueid,
"nodegroup_id": tile.nodegroup_id,
"provisional": provisional,
}
)
document["strings"].append({"string": value.value, "nodegroup_id": tile.nodegroup_id, "provisional": provisional})
def append_search_filters(self, value, node, query, request):
try:
if value["op"] == "null" or value["op"] == "not_null":
self.append_null_search_filters(value, node, query, request)
elif value["val"] != "":
match_query = Match(field="tiles.data.%s" % (str(node.pk)), type="phrase", query=value["val"])
if "!" in value["op"]:
query.must_not(match_query)
query.filter(Exists(field="tiles.data.%s" % (str(node.pk))))
else:
query.must(match_query)
except KeyError as e:
pass
class ConceptDataType(BaseConceptDataType):
def validate(self, value, row_number=None, source="", node=None, nodeid=None):
errors = []
# first check to see if the validator has been passed a valid UUID,
# which should be the case at this point. return error if not.
if value is not None:
if type(value) == list:
message = _("The widget used to save this data appears to be incorrect for this datatype. Contact system admin to resolve")
error_message = self.create_error_message(value, source, row_number, message)
errors.append(error_message)
return errors
try:
uuid.UUID(str(value))
except ValueError:
message = _("This is an invalid concept prefLabel, or an incomplete UUID")
error_message = self.create_error_message(value, source, row_number, message)
errors.append(error_message)
return errors
try:
models.Value.objects.get(pk=value)
except ObjectDoesNotExist:
message = _("This UUID is not an available concept value")
error_message = self.create_error_message(value, source, row_number, message)
errors.append(error_message)
return errors
return errors
def transform_value_for_tile(self, value):
return value.strip()
def transform_export_values(self, value, *args, **kwargs):
concept_export_value_type = kwargs.get("concept_export_value_type", None)
return self.get_concept_export_value(value, concept_export_value_type)
def get_pref_label(self, nodevalue, lang="en-US"):
return get_preflabel_from_valueid(nodevalue, lang)["value"]
def get_display_value(self, tile, node):
data = self.get_tile_data(tile)
if data[str(node.nodeid)] is None or data[str(node.nodeid)].strip() == "":
return ""
else:
return self.get_value(uuid.UUID(data[str(node.nodeid)])).value
def get_rdf_uri(self, node, data, which="r"):
if not data:
return None
c = ConceptValue(str(data))
assert c.value is not None, "Null or blank concept value"
ext_ids = [
ident.value for ident in models.Value.objects.all().filter(concept_id__exact=c.conceptid, valuetype__category="identifiers")
]
for p in settings.PREFERRED_CONCEPT_SCHEMES:
for id_uri in ext_ids:
if str(id_uri).startswith(p):
return URIRef(id_uri)
return URIRef(archesproject[f"concepts/{c.conceptid}"])
def to_rdf(self, edge_info, edge):
g = Graph()
myuri = self.get_rdf_uri(None, edge_info["range_tile_data"])
if edge_info["r_uri"] == myuri:
c = ConceptValue(str(edge_info["range_tile_data"]))
g.add((edge_info["r_uri"], RDF.type, URIRef(edge.rangenode.ontologyclass)))
g.add((edge_info["d_uri"], URIRef(edge.ontologyproperty), edge_info["r_uri"]))
g.add((edge_info["r_uri"], URIRef(RDFS.label), Literal(c.value)))
return g
def from_rdf(self, json_ld_node):
# Expects a label and a concept URI within the json_ld_node, might not always get them both
try:
# assume a list, and as this is a ConceptDataType, assume a single entry
json_ld_node = json_ld_node[0]
except KeyError as e:
pass
concept_uri = json_ld_node.get("@id")
label_node = json_ld_node.get(str(RDFS.label))
concept_id = lang = None
import re
# FIXME: This should use settings for host and check for UUID
p = re.compile(r"(http|https)://(?P<host>[^/]*)/concepts/(?P<concept_id>[A-Fa-f0-9\-]*)/?$")
m = p.match(concept_uri)
if m is not None:
concept_id = m.groupdict().get("concept_id")
else:
# could be an external id, rather than an Arches only URI
hits = [ident for ident in models.Value.objects.all().filter(value__exact=str(concept_uri), valuetype__category="identifiers")]
if len(hits) == 1:
concept_id = hits[0].concept_id
else:
print("ERROR: Multiple hits for {0} external identifier in RDM:".format(concept_uri))
for hit in hits:
print("ConceptValue {0}, Concept {1} - '{2}'".format(hit.valueid, hit.conceptid, hit.value))
# Just try the first one and hope
concept_id = hits[0].concept_id
if label_node:
label, lang = get_value_from_jsonld(label_node)
if label:
values = get_valueids_from_concept_label(label, concept_id, lang)
if values:
return values[0]["id"]
else:
if concept_id:
hits = [ident for ident in models.Value.objects.all().filter(value__exact=label)]
if hits and len(hits) == 1:
return str(hits[0].pk)
label = None
else:
print("No Concept ID URI supplied for rdf")
else:
label = None
if concept_id and label is None:
value = get_preflabel_from_conceptid(concept_id, lang=lang)
if value["id"]:
return value["id"]
else:
hits = [ident for ident in models.Value.objects.all()]
if hits:
return str(hits[0].pk)
else:
print(f"No labels for concept: {concept_id}!")
return None
else:
# No concept_id means not in RDM at all
return None
def ignore_keys(self):
return ["http://www.w3.org/2000/01/rdf-schema#label http://www.w3.org/2000/01/rdf-schema#Literal"]
class ConceptListDataType(BaseConceptDataType):
def validate(self, value, row_number=None, source="", node=None, nodeid=None):
errors = []
# iterate list of values and use the concept validation on each one
if value is not None:
validate_concept = DataTypeFactory().get_instance("concept")
for v in value:
val = v.strip()
errors += validate_concept.validate(val, row_number)
return errors
def transform_value_for_tile(self, value):
ret = []
for val in csv.reader([value], delimiter=",", quotechar='"'):
for v in val:
ret.append(v.strip())
return ret
def transform_export_values(self, value, *args, **kwargs):
new_values = []
for val in value:
new_val = self.get_concept_export_value(val, kwargs.get("concept_export_value_type", None))
new_values.append(new_val)
return ",".join(new_values)
def get_display_value(self, tile, node):
new_values = []
data = self.get_tile_data(tile)
if data[str(node.nodeid)]:
for val in data[str(node.nodeid)]:
new_val = self.get_value(uuid.UUID(val))
new_values.append(new_val.value)
return ",".join(new_values)
def get_rdf_uri(self, node, data, which="r"):
c = ConceptDataType()
if not data:
print(f"concept-list got data without values: {node}, {data}")
return []
return [c.get_rdf_uri(node, d, which) for d in data]
def to_rdf(self, edge_info, edge):
g = Graph()
c = ConceptDataType()
if edge_info["range_tile_data"]:
for r in edge_info["range_tile_data"]:
concept_info = edge_info.copy()
concept_info["range_tile_data"] = r
g += c.to_rdf(concept_info, edge)
return g
def from_rdf(self, json_ld_node):
# returns a list of concept ids
ctype = ConceptDataType()
if isinstance(json_ld_node, list):
return [ctype.from_rdf(item) for item in json_ld_node]
else:
return [ctype.from_rdf(json_ld_node)]
def collects_multiple_values(self):
return True
def ignore_keys(self):
return ["http://www.w3.org/2000/01/rdf-schema#label http://www.w3.org/2000/01/rdf-schema#Literal"] | 42.609836 | 139 | 0.601493 |
79578568d0cf8421cd881294ddf582d3b8a48162 | 577 | py | Python | book/code/streamspeedtest.py | columbia-applied-data-science/lecturenotes | 047b0cdc6ce70c441526f26e5516337e395feb84 | [
"CC0-1.0"
] | null | null | null | book/code/streamspeedtest.py | columbia-applied-data-science/lecturenotes | 047b0cdc6ce70c441526f26e5516337e395feb84 | [
"CC0-1.0"
] | null | null | null | book/code/streamspeedtest.py | columbia-applied-data-science/lecturenotes | 047b0cdc6ce70c441526f26e5516337e395feb84 | [
"CC0-1.0"
] | null | null | null | import numpy as np
import pandas
from numpy.random import rand
def generate_arrays(N):
rand(N).tofile('/tmp/x.data', sep='\n')
rand(N).tofile('/tmp/y.data', sep='\n')
def myfun(xfile, yfile):
fx = open(xfile, 'r')
fy = open(yfile, 'r')
retval = 0.0
for x in fx:
y = fy.next()
retval += float(x) * float(y)
fx.close()
fy.close()
return retval
def myfun_pandas(xfile, yfile):
x = pandas.read_csv(xfile, header=None)
y = pandas.read_csv(yfile, header=None)
retval = x.T.dot(y).values
return retval
| 18.03125 | 43 | 0.590988 |
7957858cbab20ad6f4c34f3744c2929720a79b89 | 1,067 | py | Python | src/livestreamer/stream/http.py | rzr/livestreamer | 96e3c70977901f203ef27272459465c2005cc789 | [
"BSD-2-Clause",
"MIT"
] | 1 | 2016-01-15T09:03:17.000Z | 2016-01-15T09:03:17.000Z | src/livestreamer/stream/http.py | huangbong/livestreamer | 14d242614362208cc66023ada68d71262448a69a | [
"BSD-2-Clause",
"MIT"
] | null | null | null | src/livestreamer/stream/http.py | huangbong/livestreamer | 14d242614362208cc66023ada68d71262448a69a | [
"BSD-2-Clause",
"MIT"
] | null | null | null | from .stream import Stream
from .wrappers import StreamIOWrapper
from ..exceptions import StreamError
from ..utils import urlget
from requests import Request
class HTTPStream(Stream):
"""
Regular HTTP stream
*Attributes:*
- :attr:`url` URL to the stream
- :attr:`args` A :class:`dict` containing keyword arguments passed to
:meth:`requests.request`
"""
__shortname__ = "http"
def __init__(self, session, url, **args):
Stream.__init__(self, session)
self.url = url
self.args = args
def __repr__(self):
return "<HTTPStream({0!r})>".format(self.url)
def __json__(self):
req = Request(url=self.url, **self.args).prepare()
return dict(type=HTTPStream.shortname(),
url=req.url, headers=req.headers,
body=req.body, method=req.method or "GET")
def open(self):
res = urlget(self.url, stream=True,
exception=StreamError,
**self.args)
return StreamIOWrapper(res.raw)
| 23.195652 | 73 | 0.602624 |
795785b712ddd90fefae8c5aebdeae9e2d51058c | 9,362 | py | Python | packages/skills/tac_negotiation/transactions.py | 8ball030/agents-aea | fcf470e3daa9bd8e272ca66542c6003feb0fd7a8 | [
"Apache-2.0"
] | 1 | 2021-07-25T18:50:18.000Z | 2021-07-25T18:50:18.000Z | packages/skills/tac_negotiation/transactions.py | 8ball030/agents-aea | fcf470e3daa9bd8e272ca66542c6003feb0fd7a8 | [
"Apache-2.0"
] | null | null | null | packages/skills/tac_negotiation/transactions.py | 8ball030/agents-aea | fcf470e3daa9bd8e272ca66542c6003feb0fd7a8 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains a class to manage transactions."""
import datetime
import logging
from collections import defaultdict, deque
from typing import Dict, Tuple, Deque, cast
from aea.decision_maker.base import OwnershipState
from aea.decision_maker.messages.transaction import TransactionMessage, TransactionId
from aea.helpers.dialogue.base import DialogueLabel
from aea.skills.base import SharedClass
logger = logging.getLogger("aea.tac_negotiation_skill")
MESSAGE_ID = int
class Transactions(SharedClass):
"""Class to handle pending transaction proposals/acceptances and locked transactions."""
def __init__(self, **kwargs) -> None:
"""Initialize the transactions."""
self._pending_transaction_timeout = kwargs.pop('pending_transaction_timeout') if 'pending_transaction_timeout' in kwargs.keys() else 30
super().__init__(**kwargs)
self._pending_proposals = defaultdict(lambda: {}) # type: Dict[DialogueLabel, Dict[MESSAGE_ID, TransactionMessage]]
self._pending_initial_acceptances = defaultdict(lambda: {}) # type: Dict[DialogueLabel, Dict[MESSAGE_ID, TransactionMessage]]
self._locked_txs = {} # type: Dict[TransactionId, TransactionMessage]
self._locked_txs_as_buyer = {} # type: Dict[TransactionId, TransactionMessage]
self._locked_txs_as_seller = {} # type: Dict[TransactionId, TransactionMessage]
self._last_update_for_transactions = deque() # type: Deque[Tuple[datetime.datetime, TransactionId]]
@property
def pending_proposals(self) -> Dict[DialogueLabel, Dict[MESSAGE_ID, TransactionMessage]]:
"""Get the pending proposals."""
return self._pending_proposals
@property
def pending_initial_acceptances(self) -> Dict[DialogueLabel, Dict[MESSAGE_ID, TransactionMessage]]:
"""Get the pending initial acceptances."""
return self._pending_initial_acceptances
def cleanup_pending_transactions(self) -> None:
"""
Remove all the pending messages (i.e. either proposals or acceptances) that have been stored for an amount of time longer than the timeout.
:return: None
"""
queue = self._last_update_for_transactions
timeout = datetime.timedelta(0, self._pending_transaction_timeout)
if len(queue) == 0:
return
next_date, next_item = queue[0]
while datetime.datetime.now() - next_date > timeout:
# remove the element from the queue
queue.popleft()
# extract dialogue label and message id
transaction_id = next_item
logger.debug("Removing transaction: {}".format(transaction_id))
# remove (safely) the associated pending proposal (if present)
self._locked_txs.pop(transaction_id, None)
self._locked_txs_as_buyer.pop(transaction_id, None)
self._locked_txs_as_seller.pop(transaction_id, None)
# check the next transaction, if present
if len(queue) == 0:
break
next_date, next_item = queue[0]
def add_pending_proposal(self, dialogue_label: DialogueLabel, proposal_id: int, transaction_msg: TransactionMessage) -> None:
"""
Add a proposal (in the form of a transaction) to the pending list.
:param dialogue_label: the dialogue label associated with the proposal
:param proposal_id: the message id of the proposal
:param transaction_msg: the transaction message
:raise AssertionError: if the pending proposal is already present.
:return: None
"""
assert dialogue_label not in self._pending_proposals and proposal_id not in self._pending_proposals[dialogue_label]
self._pending_proposals[dialogue_label][proposal_id] = transaction_msg
def pop_pending_proposal(self, dialogue_label: DialogueLabel, proposal_id: int) -> TransactionMessage:
"""
Remove a proposal (in the form of a transaction) from the pending list.
:param dialogue_label: the dialogue label associated with the proposal
:param proposal_id: the message id of the proposal
:raise AssertionError: if the pending proposal is not present.
:return: the transaction message
"""
assert dialogue_label in self._pending_proposals and proposal_id in self._pending_proposals[dialogue_label]
transaction_msg = self._pending_proposals[dialogue_label].pop(proposal_id)
return transaction_msg
def add_pending_initial_acceptance(self, dialogue_label: DialogueLabel, proposal_id: int, transaction_msg: TransactionMessage) -> None:
"""
Add an acceptance (in the form of a transaction) to the pending list.
:param dialogue_label: the dialogue label associated with the proposal
:param proposal_id: the message id of the proposal
:param transaction_msg: the transaction message
:raise AssertionError: if the pending acceptance is already present.
:return: None
"""
assert dialogue_label not in self._pending_initial_acceptances and proposal_id not in self._pending_initial_acceptances[dialogue_label]
self._pending_initial_acceptances[dialogue_label][proposal_id] = transaction_msg
def pop_pending_initial_acceptance(self, dialogue_label: DialogueLabel, proposal_id: int) -> TransactionMessage:
"""
Remove an acceptance (in the form of a transaction) from the pending list.
:param dialogue_label: the dialogue label associated with the proposal
:param proposal_id: the message id of the proposal
:raise AssertionError: if the pending acceptance is not present.
:return: the transaction message
"""
assert dialogue_label in self._pending_initial_acceptances and proposal_id in self._pending_initial_acceptances[dialogue_label]
transaction_msg = self._pending_initial_acceptances[dialogue_label].pop(proposal_id)
return transaction_msg
def _register_transaction_with_time(self, transaction_id: TransactionId) -> None:
"""
Register a transaction with a creation datetime.
:param transaction_id: the transaction id
:return: None
"""
now = datetime.datetime.now()
self._last_update_for_transactions.append((now, transaction_id))
def add_locked_tx(self, transaction_msg: TransactionMessage, as_seller: bool) -> None:
"""
Add a lock (in the form of a transaction).
:param transaction_msg: the transaction message
:param as_seller: whether the agent is a seller or not
:raise AssertionError: if the transaction is already present.
:return: None
"""
transaction_id = cast(TransactionId, transaction_msg.get("transaction_id"))
assert transaction_id not in self._locked_txs
self._register_transaction_with_time(transaction_id)
self._locked_txs[transaction_id] = transaction_msg
if as_seller:
self._locked_txs_as_seller[transaction_id] = transaction_msg
else:
self._locked_txs_as_buyer[transaction_id] = transaction_msg
def pop_locked_tx(self, transaction_msg: TransactionMessage) -> TransactionMessage:
"""
Remove a lock (in the form of a transaction).
:param transaction_msg: the transaction message
:raise AssertionError: if the transaction with the given transaction id has not been found.
:return: the transaction
"""
transaction_id = cast(TransactionId, transaction_msg.get("transaction_id"))
assert transaction_id in self._locked_txs
transaction_msg = self._locked_txs.pop(transaction_id)
self._locked_txs_as_buyer.pop(transaction_id, None)
self._locked_txs_as_seller.pop(transaction_id, None)
return transaction_msg
def ownership_state_after_locks(self, is_seller: bool) -> OwnershipState:
"""
Apply all the locks to the current ownership state of the agent.
This assumes, that all the locked transactions will be successful.
:param is_seller: Boolean indicating the role of the agent.
:return: the agent state with the locks applied to current state
"""
transaction_msgs = list(self._locked_txs_as_seller.values()) if is_seller else list(self._locked_txs_as_buyer.values())
ownership_state_after_locks = self.context.agent_ownership_state.apply(transaction_msgs)
return ownership_state_after_locks
| 44.160377 | 147 | 0.699423 |
795788424ef06d0225e692e2a417f0c0085c11df | 897 | py | Python | setup.py | alt-dima/aws-quota-checker | 8a2af974fce01149856c99e5d34a29244a3ba067 | [
"MIT"
] | 43 | 2021-02-25T00:53:24.000Z | 2022-02-25T17:38:24.000Z | setup.py | alt-dima/aws-quota-checker | 8a2af974fce01149856c99e5d34a29244a3ba067 | [
"MIT"
] | 25 | 2021-02-24T22:47:29.000Z | 2022-02-14T21:04:26.000Z | setup.py | alt-dima/aws-quota-checker | 8a2af974fce01149856c99e5d34a29244a3ba067 | [
"MIT"
] | 9 | 2021-02-26T21:01:33.000Z | 2022-01-18T08:25:33.000Z | from os import path
from setuptools import setup, find_packages
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='aws-quota-checker',
version='1.10.0',
description='A CLI tool that checks your AWS quota utilization',
long_description=long_description,
long_description_content_type='text/markdown',
packages=find_packages(),
install_requires=[
'boto3',
'click',
'tabulate',
'cachetools'
],
extras_require={
'dev':{
'autopep8',
'pylint',
'keepachangelog',
'wheel'
},
'prometheus':{
'prometheus-client'
}
},
entry_points='''
[console_scripts]
aws-quota-checker=aws_quota.cli:cli
''',
)
| 24.243243 | 73 | 0.590858 |
79578a2af373bac8565453360a0efecc8c6b4bf0 | 3,134 | py | Python | gui/qt/qrwindow.py | JeremyRand/electrum-nmc | 52b3522924122b91e2be9923a6e106b35650ebdf | [
"MIT"
] | null | null | null | gui/qt/qrwindow.py | JeremyRand/electrum-nmc | 52b3522924122b91e2be9923a6e106b35650ebdf | [
"MIT"
] | null | null | null | gui/qt/qrwindow.py | JeremyRand/electrum-nmc | 52b3522924122b91e2be9923a6e106b35650ebdf | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2014 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import platform
from PyQt5.QtCore import Qt
from PyQt5.QtGui import *
from PyQt5.QtWidgets import QHBoxLayout, QVBoxLayout, QLabel, QWidget
from electrum_nmc_gui.qt.qrcodewidget import QRCodeWidget
from electrum_nmc.i18n import _
if platform.system() == 'Windows':
MONOSPACE_FONT = 'Lucida Console'
elif platform.system() == 'Darwin':
MONOSPACE_FONT = 'Monaco'
else:
MONOSPACE_FONT = 'monospace'
column_index = 4
class QR_Window(QWidget):
def __init__(self, win):
QWidget.__init__(self)
self.win = win
self.setWindowTitle('Electrum-NMC - '+_('Payment Request'))
self.setMinimumSize(800, 250)
self.address = ''
self.label = ''
self.amount = 0
self.setFocusPolicy(Qt.NoFocus)
main_box = QHBoxLayout()
self.qrw = QRCodeWidget()
main_box.addWidget(self.qrw, 1)
vbox = QVBoxLayout()
main_box.addLayout(vbox)
self.address_label = QLabel("")
#self.address_label.setFont(QFont(MONOSPACE_FONT))
vbox.addWidget(self.address_label)
self.label_label = QLabel("")
vbox.addWidget(self.label_label)
self.amount_label = QLabel("")
vbox.addWidget(self.amount_label)
vbox.addStretch(1)
self.setLayout(main_box)
def set_content(self, address, amount, message, url):
address_text = "<span style='font-size: 18pt'>%s</span>" % address if address else ""
self.address_label.setText(address_text)
if amount:
amount = self.win.format_amount(amount)
amount_text = "<span style='font-size: 21pt'>%s</span> <span style='font-size: 16pt'>%s</span> " % (amount, self.win.base_unit())
else:
amount_text = ''
self.amount_label.setText(amount_text)
label_text = "<span style='font-size: 21pt'>%s</span>" % message if message else ""
self.label_label.setText(label_text)
self.qrw.setData(url)
| 34.822222 | 141 | 0.692087 |
79578a67831da6d4729fd07bf521f1c9aa7f404b | 3,906 | py | Python | scripts/modules/tests/test_reweighting.py | andrrizzi/tfep-revisited-2021 | 9a9aff61286be3111c4e70136620d0e3aac31318 | [
"MIT"
] | 7 | 2021-07-22T00:53:37.000Z | 2022-03-11T07:29:36.000Z | scripts/modules/tests/test_reweighting.py | andrrizzi/tfep-revisited-2021 | 9a9aff61286be3111c4e70136620d0e3aac31318 | [
"MIT"
] | 2 | 2021-08-24T07:54:55.000Z | 2021-09-14T08:51:55.000Z | scripts/modules/tests/test_reweighting.py | andrrizzi/tfep-revisited-2021 | 9a9aff61286be3111c4e70136620d0e3aac31318 | [
"MIT"
] | 1 | 2021-07-22T00:53:56.000Z | 2021-07-22T00:53:56.000Z | #!/usr/bin/env python
# =============================================================================
# MODULE DOCSTRING
# =============================================================================
"""
Test objects and function in the module reweighting.
"""
# =============================================================================
# GLOBAL IMPORTS
# =============================================================================
import os
import tempfile
import numpy as np
from numpy.random import RandomState
import pint
from ..reweighting import DatasetReweighting
# =============================================================================
# GLOBAL VARIABLES
# =============================================================================
# Makes random test cases deterministic.
_random_state = RandomState(0)
_ureg = pint.UnitRegistry()
# =============================================================================
# TEST UTILITIES
# =============================================================================
class DummyStdReweighting(DatasetReweighting):
"""Dummy implementation of standard reweighting for testing."""
U0 = 0.0
def compute_potentials(self, batch_positions):
kJ_mol = _ureg.kJ / _ureg.mol
return (self.U0 + _random_state.rand(len(batch_positions))) * kJ_mol
def get_traj_info(self):
kJ_mol = _ureg.kJ / _ureg.mol
cvs = np.array(range(len(self.dataset)))
reference_potentials = _random_state.rand(len(cvs)) * kJ_mol
metad_rbias = np.zeros(len(cvs)) * kJ_mol
return cvs, reference_potentials, metad_rbias
# =============================================================================
# TESTS
# =============================================================================
def test_standard_reweighting_potentials_cache():
"""Test that DatasetReweighting caches and reuses the potentials correctly."""
import MDAnalysis.coordinates
from ..data import TrajectoryDataset, TrajectorySubset
def _get_potentials(dataset, file_path, u0, indices, batch_size, write_interval):
subset = TrajectorySubset(dataset, indices=indices)
DummyStdReweighting.U0 = u0
reweighting = DummyStdReweighting(
subset, n_bins=len(subset), temperature=300*_ureg.kelvin,
potentials_file_path=file_path)
return reweighting.compute_dataset_potentials(
batch_size=batch_size, write_interval=write_interval)
# Load the test PDB file.
pdb_file_path = os.path.join(os.path.dirname(__file__), 'data', 'chloro-fluoromethane.pdb')
with MDAnalysis.coordinates.PDB.PDBReader(pdb_file_path) as trajectory:
dataset = TrajectoryDataset(trajectory, return_batch_index=True)
# Cache the potentials in a temporary file.
with tempfile.TemporaryDirectory() as tmp_dir:
file_path = os.path.join(tmp_dir, 'potentials.npz')
# Cache a first value for the potentials of some of the frames.
u1 = 10
potentials1 = _get_potentials(dataset, file_path, u1, indices=[0, 2, 4],
batch_size=1, write_interval=2)
assert np.all((0 <= potentials1.magnitude - u1) & (potentials1.magnitude - u1 < 1))
# Check that what we have just computed does not get re-computed.
u2 = 20
potentials2 = _get_potentials(dataset, file_path, u2, indices=[1, 3, 4],
batch_size=5, write_interval=2)
assert potentials1[-1] == potentials2[-1]
assert np.all((0 <= potentials2.magnitude[:-1] - u2) & (potentials2.magnitude[:-1] - u2 < 1))
# The cache should be up-to-date.
times, potentials = DummyStdReweighting.load_cached_potentials_from_file(file_path)
assert not np.isnan(potentials).any()
| 38.673267 | 106 | 0.534306 |
79578b3702886246708a2f00a3d0605a1346e4e5 | 2,002 | py | Python | tests/helpers.py | oarepo/invenio-oarepo-oai-pmh-harvester | 399ef743ac9da23d36e655e072aa72ee1b332372 | [
"MIT"
] | null | null | null | tests/helpers.py | oarepo/invenio-oarepo-oai-pmh-harvester | 399ef743ac9da23d36e655e072aa72ee1b332372 | [
"MIT"
] | 13 | 2020-11-04T13:47:55.000Z | 2021-04-15T17:56:33.000Z | tests/helpers.py | oarepo/oarepo-oai-pmh-harvester | 399ef743ac9da23d36e655e072aa72ee1b332372 | [
"MIT"
] | 1 | 2020-05-14T07:59:12.000Z | 2020-05-14T07:59:12.000Z | # TODO: otestovat celý kód, zde je Mock harvestu
# https://github.com/mloesch/sickle/blob/master/sickle/tests/test_harvesting.py
import os
from sickle import OAIResponse
from sickle._compat import to_unicode
class MockResponse(object):
"""Mimics the response object returned by HTTP requests."""
def __init__(self, text):
# request's response object carry an attribute 'text' which contains
# the server's response data encoded as unicode.
self.text = text
self.content = text.encode('utf-8')
def mock_harvest(*args, **kwargs):
"""Read test data from files instead of from an OAI interface.
The data is read from the ``xml`` directory by using the provided
:attr:`verb` as file name. The following returns an OAIResponse created
from the file ``ListRecords.xml``::
fake_harvest(verb='ListRecords', metadataPrefix='oai_dc')
The file names for consecutive resumption responses are expected in the
resumptionToken parameter::
fake_harvest(verb='ListRecords', resumptionToken='ListRecords2.xml')
The parameter :attr:`error` can be used to invoke a specific OAI error
response. For instance, the following returns a ``badArgument`` error
response::
fake_harvest(verb='ListRecords', error='badArgument')
:param kwargs: OAI arguments that would normally be passed to
:meth:`sickle.app.Sickle.harvest`.
:rtype: :class:`sickle.response.OAIResponse`.
"""
this_dir, this_filename = os.path.split(__file__)
verb = kwargs.get('verb')
resumption_token = kwargs.get('resumptionToken')
error = kwargs.get('error')
if resumption_token is not None:
filename = resumption_token
elif error is not None:
filename = '%s.xml' % error
else:
filename = '%s.xml' % verb
with open(os.path.join(this_dir, 'data', filename), 'r') as fp:
response = MockResponse(to_unicode(fp.read()))
return OAIResponse(response, kwargs) | 40.857143 | 80 | 0.691808 |
79578b4bac5897314a41a2ea6cdde85312c69ce6 | 1,782 | py | Python | plotposterior.py | JohannesBuchner/massivedatans | bf13e90048a3a1bb945e25e0a8848c08fa8a80f8 | [
"BSD-2-Clause"
] | 10 | 2017-07-17T20:59:29.000Z | 2019-09-09T01:57:05.000Z | plotposterior.py | JohannesBuchner/massivedatans | bf13e90048a3a1bb945e25e0a8848c08fa8a80f8 | [
"BSD-2-Clause"
] | 1 | 2020-04-23T13:59:35.000Z | 2020-04-23T13:59:35.000Z | plotposterior.py | JohannesBuchner/massivedatans | bf13e90048a3a1bb945e25e0a8848c08fa8a80f8 | [
"BSD-2-Clause"
] | 3 | 2017-07-18T01:41:04.000Z | 2018-06-05T16:42:10.000Z | from __future__ import print_function, division
import json
import numpy
from numpy import log, log10
import sys
import matplotlib.pyplot as plt
import h5py
import scipy.stats
xx = []
yy = []
filename = sys.argv[1]
colors = ['yellow', 'pink', 'cyan', 'magenta']
cmap = plt.cm.gray
zs = []
plt.figure(figsize=(6,4))
with h5py.File(filename, 'r') as f:
logZ = f['logZ'].value
for i in range(len(logZ)):
w = f['w'][:,i] + f['L'][:,i]
mask = numpy.isfinite(w)
jparent = numpy.where(mask)[0]
w = w[jparent]
#print w, w.min(), w.max()
w = numpy.exp(w - w.max())
w = w / w.sum()
j = numpy.random.choice(jparent, size=1000, p=w)
mu = f['x'][:,i,1][j]
if mu.std() < 50:
zs.append(mu.mean() / 440 - 1)
#if mu.std() > 40:
# print 'skipping unconstrained: %.1f' % mu.std()
# continue
#A = log10(f['x'][:,i,0][j])
A = f['x'][:,i,0][j] * 100
#if i < 4:
# plt.plot(mu[:100], A[:100], '. ', color='r', alpha=0.2)
if i < 4:
color = colors[i]
else:
color = cmap(0.8 * min(50, mu.std())/50.)
plt.errorbar(x=numpy.mean(mu), xerr=mu.std(),
y=A.mean(), yerr=A.std(),
capsize=0, color=color,
elinewidth=4 if i < 4 else 1)
plt.xlabel('Wavelength [nm]')
plt.ylabel('Line amplitude')
plt.xlim(400, 800)
plt.ylim(1, 20)
plt.yticks([1,2,10], [1,2,10])
plt.yscale('log')
plt.savefig('plotposterior.pdf', bbox_inches='tight')
plt.close()
plt.figure(figsize=(5,1.5))
plt.hist(zs, bins=10, histtype='step', label='Well-constrained lines', normed=True)
alpha, beta, scale = 2., 7., 1
x = numpy.linspace(0, 2, 1000)
plt.plot(x, scipy.stats.beta(alpha, beta).pdf(x), '-', color='k', label='Input redshift distribution')
plt.ylabel('Frequency')
plt.xlabel('Redshift')
plt.xlim(0, 1)
plt.savefig('plotposteriorz.pdf', bbox_inches='tight')
plt.close()
| 25.457143 | 102 | 0.618406 |
79578b7a5bdbd06bd6c391eaa54f606c1eda2334 | 8,626 | py | Python | test/albumentations_on_real_set.py | GorkemP/EndoCV2021-EfficientDet-Pytorch | 2ca3140d50a07e503850cad101deb0887eace9c7 | [
"MIT"
] | 4 | 2021-05-29T19:02:19.000Z | 2021-12-17T13:53:45.000Z | test/albumentations_on_real_set.py | GorkemP/EndoCV2021-EfficientDet-Pytorch | 2ca3140d50a07e503850cad101deb0887eace9c7 | [
"MIT"
] | null | null | null | test/albumentations_on_real_set.py | GorkemP/EndoCV2021-EfficientDet-Pytorch | 2ca3140d50a07e503850cad101deb0887eace9c7 | [
"MIT"
] | null | null | null | # Created by Gorkem Polat at 23.02.2021
# contact: polatgorkem@gmail.com
# import argparse
# import os
# import numpy as np
# import yaml
# import matplotlib.pyplot as plt
# from torchvision import transforms
# from efficientdet.dataset import CocoDataset, Resizer, Normalizer
# from utils.sync_batchnorm import patch_replication_callback
# from utils.utils import replace_w_sync_bn, CustomDataParallel, get_last_weights, init_weights, boolean_string
# from utils.augmentations import CustomAugmenter
import argparse
import os
import traceback
import numpy as np
import torch
import yaml
from torch import nn
from torch.utils.data import DataLoader
from torchvision import transforms
from tqdm.autonotebook import tqdm
from backbone import EfficientDetBackbone
from efficientdet.dataset import CocoDataset, Resizer, Normalizer, Augmenter, collater
from efficientdet.loss import FocalLoss
from utils.sync_batchnorm import patch_replication_callback
from utils.utils import replace_w_sync_bn, CustomDataParallel, get_last_weights, init_weights, boolean_string
import albumentations as A
import cv2
# import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from utils.augmentations import CustomAugmenter, CustomAugmenter_experimental
project_name = "polyps"
efficientdet_version = 0
num_worker = 8
batch_size = 10
lr = 0.01
num_epochs = 100
head_only = False
weights_file = "weights/efficientdet-d" + str(efficientdet_version) + ".pth"
early_stopping_patience = 12
lr_scheduler_patience = 5
mAP_interval = 5
def show_torch_data(img):
npimg = img.numpy()
plt.imshow(npimg)
plt.show()
def draw_bbox_on_image_augmented_numpy(img, annotations):
image = img
fig, ax = plt.subplots()
ax.imshow(image)
for i in range(len(annotations)):
rect = patches.Rectangle((annotations[i][0], annotations[i][1]),
annotations[i][2] - annotations[i][0],
annotations[i][3] - annotations[i][1],
linewidth=2,
edgecolor="yellow",
facecolor='none')
ax.add_patch(rect)
plt.text(annotations[i][0],
annotations[i][1] - 3,
"polyp",
color="yellow")
plt.tight_layout()
# plt.axis("off")
plt.show()
def draw_bbox_on_image_augmented_torch(image, annotations):
image = image.numpy()
fig, ax = plt.subplots()
ax.imshow(image)
for i in range(len(annotations)):
rect = patches.Rectangle((annotations[i][0], annotations[i][1]),
annotations[i][2] - annotations[i][0],
annotations[i][3] - annotations[i][1],
linewidth=2,
edgecolor="yellow",
facecolor='none')
ax.add_patch(rect)
plt.text(annotations[i][0],
annotations[i][1] - 3,
"polyp",
color="yellow")
plt.tight_layout()
# plt.axis("off")
plt.show()
class Params:
def __init__(self, project_file):
self.params = yaml.safe_load(open(project_file).read())
def __getattr__(self, item):
return self.params.get(item, None)
def get_args():
parser = argparse.ArgumentParser('EfficientDet Pytorch: GorkemP')
parser.add_argument('-p', '--project', type=str, default=project_name, help='project file that contains parameters')
parser.add_argument('-c', '--compound_coef', type=int, default=efficientdet_version,
help='coefficients of efficientdet')
parser.add_argument('-n', '--num_workers', type=int, default=num_worker, help='num_workers of dataloader')
parser.add_argument('--batch_size', type=int, default=batch_size,
help='The number of images per batch among all devices')
parser.add_argument('--head_only', type=boolean_string, default=head_only,
help='whether finetunes only the regressor and the classifier, '
'useful in early stage convergence or small/easy dataset')
parser.add_argument('--lr', type=float, default=lr)
parser.add_argument('--optim', type=str, default='adamw', help='select optimizer for training, '
'suggest using \'admaw\' until the'
' very final stage then switch to \'sgd\'')
parser.add_argument('--num_epochs', type=int, default=num_epochs)
parser.add_argument('--val_interval', type=int, default=1, help='Number of epoches between valing phases')
parser.add_argument('--save_interval', type=int, default=100, help='Number of steps between saving')
parser.add_argument('--es_min_delta', type=float, default=0.0,
help='Early stopping\'s parameter: minimum change loss to qualify as an improvement')
parser.add_argument('--es_patience', type=int, default=early_stopping_patience,
help='Early stopping\'s parameter: number of epochs with no improvement after which training will be stopped. Set to 0 to disable this technique.')
parser.add_argument('--data_path', type=str, default='datasets/', help='the root folder of dataset')
parser.add_argument('--log_path', type=str, default='logs/')
parser.add_argument('-w', '--load_weights', type=str, default=weights_file,
help='whether to load weights from a checkpoint, set None to initialize, set \'last\' to load last checkpoint')
parser.add_argument('--saved_path', type=str, default='logs/')
parser.add_argument('--debug', type=boolean_string, default=False,
help='whether visualize the predicted boxes of training, '
'the output images will be in test/')
args = parser.parse_args()
return args
opt = get_args()
params = Params(f'projects/{opt.project}.yml')
input_sizes = [512, 640, 768, 896, 1024, 1280, 1280, 1536, 1536]
# Normalizer(mean=params.mean, std=params.std),
# training_set = CocoDataset(root_dir=os.path.join(opt.data_path, params.project_name), set=params.train_set)
training_set = CocoDataset(root_dir=os.path.join(opt.data_path, params.project_name), set=params.train_set,
transform=transforms.Compose([
# Normalizer(mean=params.mean, std=params.std),
CustomAugmenter(
A.Compose([
# A.OneOf([
# A.ColorJitter(brightness=0.0, contrast=0.0, saturation=0.2, hue=0.1,
# p=0.5),
# A.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.0, hue=0.1,
# p=0.5),
# ], p=0.9),
# A.IAAPerspective(),
# A.ShiftScaleRotate(shift_limit=0, rotate_limit=0, scale_limit=(-0.8, 1.0),
# border_mode=cv2.BORDER_CONSTANT, p=1),
# A.RandomScale(0.5, p=1)
# A.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.0, hue=0.0, p=1),
# A.RandomBrightnessContrast(brightness_limit=0.3, contrast_limit=0.3),
# A.Rotate((180), border_mode=cv2.BORDER_CONSTANT),
# A.HorizontalFlip(),
# A.Cutout(num_holes=8, max_h_size=128, max_w_size=128, fill_value=0, p=1)
# A.VerticalFlip()
], bbox_params=A.BboxParams(format="pascal_voc", min_visibility=0.5))
),
Resizer(input_sizes[3])
]))
selected_sample = training_set[4]
image_selected = selected_sample["img"]
annotations_selected = selected_sample["annot"]
print(image_selected.shape)
draw_bbox_on_image_augmented_numpy(image_selected, annotations_selected)
# draw_bbox_on_image_augmented_solo(image_selected, annotations_selected)
| 45.162304 | 171 | 0.589961 |
79578c4dc78c59e92af79eb8e8beb0fb81fdaa5f | 3,390 | py | Python | airflow/providers/google/marketing_platform/example_dags/example_analytics.py | troywinter/airflow | ba66ba0d97941c55d9f00f66329a9d3c7ad673e7 | [
"Apache-2.0"
] | 1 | 2019-05-07T06:46:55.000Z | 2019-05-07T06:46:55.000Z | airflow/providers/google/marketing_platform/example_dags/example_analytics.py | troywinter/airflow | ba66ba0d97941c55d9f00f66329a9d3c7ad673e7 | [
"Apache-2.0"
] | 9 | 2020-07-28T15:07:03.000Z | 2022-03-29T22:27:52.000Z | airflow/providers/google/marketing_platform/example_dags/example_analytics.py | troywinter/airflow | ba66ba0d97941c55d9f00f66329a9d3c7ad673e7 | [
"Apache-2.0"
] | 1 | 2019-06-15T08:38:53.000Z | 2019-06-15T08:38:53.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG that shows how to use Google Analytics 360.
"""
import os
from airflow import models
from airflow.providers.google.marketing_platform.operators.analytics import (
GoogleAnalyticsDataImportUploadOperator, GoogleAnalyticsDeletePreviousDataUploadsOperator,
GoogleAnalyticsGetAdsLinkOperator, GoogleAnalyticsListAccountsOperator,
GoogleAnalyticsModifyFileHeadersDataImportOperator, GoogleAnalyticsRetrieveAdsLinksListOperator,
)
from airflow.utils import dates
ACCOUNT_ID = os.environ.get("GA_ACCOUNT_ID", "123456789")
BUCKET = os.environ.get("GMP_ANALYTICS_BUCKET", "test-airflow-analytics-bucket")
BUCKET_FILENAME = "data.csv"
WEB_PROPERTY_ID = os.environ.get("GA_WEB_PROPERTY", "UA-12345678-1")
WEB_PROPERTY_AD_WORDS_LINK_ID = os.environ.get(
"GA_WEB_PROPERTY_AD_WORDS_LINK_ID", "rQafFTPOQdmkx4U-fxUfhj"
)
DATA_ID = "kjdDu3_tQa6n8Q1kXFtSmg"
with models.DAG(
"example_google_analytics",
schedule_interval=None, # Override to match your needs,
start_date=dates.days_ago(1),
) as dag:
# [START howto_marketing_platform_list_accounts_operator]
list_account = GoogleAnalyticsListAccountsOperator(task_id="list_account")
# [END howto_marketing_platform_list_accounts_operator]
# [START howto_marketing_platform_get_ads_link_operator]
get_ad_words_link = GoogleAnalyticsGetAdsLinkOperator(
web_property_ad_words_link_id=WEB_PROPERTY_AD_WORDS_LINK_ID,
web_property_id=WEB_PROPERTY_ID,
account_id=ACCOUNT_ID,
task_id="get_ad_words_link",
)
# [END howto_marketing_platform_get_ads_link_operator]
# [START howto_marketing_platform_retrieve_ads_links_list_operator]
list_ad_words_link = GoogleAnalyticsRetrieveAdsLinksListOperator(
task_id="list_ad_link", account_id=ACCOUNT_ID, web_property_id=WEB_PROPERTY_ID
)
# [END howto_marketing_platform_retrieve_ads_links_list_operator]
upload = GoogleAnalyticsDataImportUploadOperator(
task_id="upload",
storage_bucket=BUCKET,
storage_name_object=BUCKET_FILENAME,
account_id=ACCOUNT_ID,
web_property_id=WEB_PROPERTY_ID,
custom_data_source_id=DATA_ID,
)
delete = GoogleAnalyticsDeletePreviousDataUploadsOperator(
task_id="delete",
account_id=ACCOUNT_ID,
web_property_id=WEB_PROPERTY_ID,
custom_data_source_id=DATA_ID,
)
transform = GoogleAnalyticsModifyFileHeadersDataImportOperator(
task_id="transform",
storage_bucket=BUCKET,
storage_name_object=BUCKET_FILENAME,
)
upload >> [delete, transform]
| 38.965517 | 100 | 0.777876 |
79578c95fc6ffdd414c52bf0c13ed9f11baa76ba | 2,577 | py | Python | webapp/general/models.py | DzenMorm/team_work_tattoo | a4a618eb2f703c8515efa3ecc25259c414594cff | [
"MIT"
] | null | null | null | webapp/general/models.py | DzenMorm/team_work_tattoo | a4a618eb2f703c8515efa3ecc25259c414594cff | [
"MIT"
] | null | null | null | webapp/general/models.py | DzenMorm/team_work_tattoo | a4a618eb2f703c8515efa3ecc25259c414594cff | [
"MIT"
] | null | null | null | from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from webapp.db import db
class Auth(db.Model, UserMixin):
__tablename__ = 'auth'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String, nullable=False, unique=True)
password = db.Column(db.String, nullable=False)
def set_password(self, password):
self.password = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password, password)
def __repr__(self):
return f'<Auth {self.id}, {self.email}>'
class City(db.Model):
__tablename__ = 'city'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, nullable=False)
def __repr__(self):
return f'<City {self.name}, {self.id}>'
class Review(db.Model):
__tablename__ = 'review'
id = db.Column(db.Integer, primary_key=True)
date = db.Column(db.Date, nullable=False)
text = db.Column(db.Text, nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'),
nullable=False)
user = db.relationship('User', backref='reviews')
master_id = db.Column(db.Integer, db.ForeignKey('master.id'),
nullable=True)
master = db.relationship('Master', backref='reviews')
salon_id = db.Column(db.Integer, db.ForeignKey('salon.id'),
nullable=True)
salon = db.relationship('Salon', backref='reviews')
def __repr__(self):
return f'<Reviews {self.date}, {self.text}>'
class Image(db.Model):
__tablename__ = 'image'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, nullable=False)
path = db.Column(db.Text, nullable=False)
description = db.Column(db.Text, nullable=True)
master_id = db.Column(db.Integer, db.ForeignKey('master.id'),
nullable=True)
master = db.relationship('Master', backref='images')
salon_id = db.Column(db.Integer, db.ForeignKey('salon.id'),
nullable=True)
salon = db.relationship('Salon', backref='images')
review_id = db.Column(db.Integer, db.ForeignKey(Review.id),
nullable=True)
review = db.relationship('Review', backref='images')
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=True)
user = db.relationship('User', backref='images')
def __repr__(self):
return f'<Drawings and photos {self.name}, {self.description}>'
| 31.426829 | 76 | 0.646876 |
79578ca6b93d8e5c21bb1b2cc3a7213418d99e64 | 24,571 | py | Python | run.py | leeyy2020/LM-BFF | 2c80b2ea3987c403c4d4abc6e202d280ea846210 | [
"MIT"
] | null | null | null | run.py | leeyy2020/LM-BFF | 2c80b2ea3987c403c4d4abc6e202d280ea846210 | [
"MIT"
] | null | null | null | run.py | leeyy2020/LM-BFF | 2c80b2ea3987c403c4d4abc6e202d280ea846210 | [
"MIT"
] | null | null | null | """Finetuning the library models for sequence classification on GLUE."""
import dataclasses
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Callable, Dict, Optional
import torch
import numpy as np
import transformers
from transformers import AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, EvalPrediction
from transformers import GlueDataTrainingArguments as DataTrainingArguments
from transformers import HfArgumentParser, TrainingArguments, set_seed
from src.dataset import FewShotDataset
from src.models import BertForPromptFinetuning, RobertaForPromptFinetuning, resize_token_type_embeddings
from src.trainer import Trainer
from src.processors import processors_mapping, num_labels_mapping, output_modes_mapping, compute_metrics_mapping, bound_mapping
from filelock import FileLock
from datetime import datetime
from copy import deepcopy
from tqdm import tqdm
import json
logger = logging.getLogger(__name__)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
# Few-shot type
# - finetune: standard fine-tuning
# - prompt: prompt-based fine-tuning
# - prompt-demo: prompt-based fine-tuning with demonstrations
few_shot_type: str = field(
default='prompt-demo',
metadata={"help": "Few-shot learning model type. Choice: finetune, prompt, prompt-demo"}
)
# Only for BERT-type model
random_segment: bool = field(
default=False,
metadata={"help": "Whether to reinitialize the token type embeddings (only for BERT)."}
)
@dataclass
class DynamicDataTrainingArguments(DataTrainingArguments):
"""
Arguments for dynamic training.
"""
num_k: Optional[int] = field(
default=16,
metadata={"help": "Number of training instances per class"}
)
num_sample: Optional[int] = field(
default=16,
metadata={"help": "Number of samples (for inference) in fine-tuning with demonstrations"}
)
num_demo: Optional[int] = field(
default=1,
metadata={"help": "Number of demonstrations from each class"}
)
auto_demo: bool = field(
default=True,
metadata={"help": "Automatically generate template for using demonstrations"}
)
# For prompting
template: str = field(
default=None,
metadata={"help": "Template"}
)
mapping: str = field(
default=None,
metadata={"help": "Label word mapping"}
)
template_path: str = field(
default=None,
metadata={"help": "Path to a txt file that stores all the templates, one per line. Do not set this when prompt_path is used"}
)
mapping_path: str = field(
default=None,
metadata={"help": "Path to a txt file that stores all the label word mappings, one per line. Do not set this when prompt_path is used"}
)
prompt_path: str = field(
default=None,
metadata={"help": "Path to a txt file that stores all the prompts (templates and mappings), one per line"}
)
template_id: int = field(
default=None,
metadata={"help": "Template id if using template_path"}
)
mapping_id: int = field(
default=None,
metadata={"help": "Mapping id if using template_path"}
)
prompt_id: int = field(
default=None,
metadata={"help": "Prompt id if using prompt_path"}
)
top_n_template: int = field(
default=None,
metadata={"help": "Use top-n template in the template path"}
)
# For logging
tag: str = field(
default='',
metadata={"help": "Set the tag and find the result easier in the log."}
)
# For filtering when using demonstrations
demo_filter: bool = field(
default=False,
metadata={"help": "Only use similar instances in demonstrations"}
)
demo_filter_rate: float = field(
default=0.5,
metadata={"help": "Only use top-x\% similar instances in demonstrations"}
)
demo_filter_model: str = field(
default=None,
metadata={"help": "Model name for demonstration filter embeddings. Will load embeddings based on the model name."}
)
debug_mode: bool = field(
default=False,
metadata={"help": "Debug mode"}
)
# For max length
double_demo: bool = field(
default=False,
metadata={"help": "Use double length for using demonstrations"}
)
first_sent_limit: int = field(
default=None,
metadata={"help": "Limit the length of the first sentence (i.e., sent_0)"}
)
other_sent_limit: int = field(
default=None,
metadata={"help": "Limit the length of sentences other than the first sentence"}
)
use_full_length: bool = field(
default=None,
metadata={"help": "Use the full length (512)"}
)
# GPT-3's in-context learning
gpt3_in_context_head: bool = field(
default=False,
metadata={"help": "GPT-3's in-context learning (context at the beginning)"}
)
gpt3_in_context_tail: bool = field(
default=False,
metadata={"help": "GPT-3's in-context learning (context at the end)"}
)
gpt3_in_context_num: int = field(
default=32,
metadata={"help": "Number of context examples"}
)
truncate_head: bool = field(
default=False,
metadata={"help": "When exceeding the maximum length, truncate the head instead of the tail."}
)
# Do not set up the following fields. They are set up automatically.
prompt: bool = field(
default=False,
metadata={"help": "Whether to use prompt-based fine-tuning"}
)
template_list: list = field(
default=None,
metadata={"help": "(DO NOT List of templates (only initialized after the program starts."}
)
@dataclass
class DynamicTrainingArguments(TrainingArguments):
# For ensemble
array_id: int = field(
default=-1,
metadata={"help": "Array ID (contains seed and hyper-paramter search) to idenfity the model"}
)
model_id: int = field(
default=-1,
metadata={"help": "Model ID (contains template information) to identify the model"}
)
save_logit: bool = field(
default=False,
metadata={"help": "Save test file logit with name $TASK-$MODEL_ID-$ARRAY_ID.npy"}
)
save_logit_dir: str = field(
default=None,
metadata={"help": "Where to save the prediction result"}
)
# Regularization
fix_layers: int = field(
default=0,
metadata={"help": "Fix bottom-n layers when optimizing"}
)
# Training
save_at_last: bool = field(
default=False,
metadata={"help": "Instead of saving the best (dev performance) checkpoint, save the last checkpoint"}
)
# Turn off train/test
no_train: bool = field(
default=False,
metadata={"help": "No training"}
)
no_predict: bool = field(
default=False,
metadata={"help": "No test"}
)
alpha: float = field(
default=0,
metadata={"help": "Number of context examples"}
)
def main():
parser = HfArgumentParser((ModelArguments, DynamicDataTrainingArguments, DynamicTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if 'prompt' in model_args.few_shot_type:
data_args.prompt = True
if training_args.no_train:
training_args.do_train = False
if training_args.no_predict:
training_args.do_predict = False
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
)
# Load prompt/template/mapping file
if data_args.prompt:
if data_args.prompt_path is not None:
assert data_args.prompt_id is not None
prompt_list = []
with open(data_args.prompt_path) as f:
for line in f:
line = line.strip()
template, mapping = line.split('\t')
prompt_list.append((template, mapping))
data_args.template, data_args.mapping = prompt_list[data_args.prompt_id]
logger.info("Specify load the %d-th prompt: %s | %s" % (data_args.prompt_id, data_args.template, data_args.mapping))
else:
if data_args.template_path is not None:
with open(data_args.template_path) as f:
data_args.template_list = []
for line in f:
line = line.strip()
if len(line) > 0:
data_args.template_list.append(line)
# Load top-n templates
if data_args.top_n_template is not None:
data_args.template_list = data_args.template_list[:data_args.top_n_template]
logger.info("Load top-%d templates from %s" % (len(data_args.template_list), data_args.template_path))
# ... or load i-th template
if data_args.template_id is not None:
data_args.template = data_args.template_list[data_args.template_id]
data_args.template_list = None
logger.info("Specify load the %d-th template: %s" % (data_args.template_id, data_args.template))
if data_args.mapping_path is not None:
assert data_args.mapping_id is not None # Only can use one label word mapping
with open(data_args.mapping_path) as f:
mapping_list = []
for line in f:
line = line.strip()
mapping_list.append(line)
data_args.mapping = mapping_list[data_args.mapping_id]
logger.info("Specify using the %d-th mapping: %s" % (data_args.mapping_id, data_args.mapping))
# Check save path
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(f"Output directory ({training_args.output_dir}) already exists.")
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
training_args.local_rank,
training_args.device,
training_args.n_gpu,
bool(training_args.local_rank != -1),
training_args.fp16,
)
logger.info("Training/evaluation parameters %s", training_args)
# Set seed
set_seed(training_args.seed)
try:
num_labels = num_labels_mapping[data_args.task_name]
output_mode = output_modes_mapping[data_args.task_name]
logger.info("Task name: {}, number of labels: {}, output mode: {}".format(data_args.task_name, num_labels, output_mode))
except KeyError:
raise ValueError("Task not found: %s" % (data_args.task_name))
# Automatically generate template for using demonstrations
if data_args.auto_demo and model_args.few_shot_type == 'prompt-demo':
# GPT-3's in-context learning
if data_args.gpt3_in_context_head or data_args.gpt3_in_context_tail:
logger.info("Automatically convert the template to GPT-3's in-context learning.")
assert data_args.template_list is None
old_template = data_args.template
new_template = old_template + ''
old_template = old_template.replace('*cls*', '')
# Single sentence or sentence pair?
sent_num = 1
if "_1" in old_template:
sent_num = 2
for instance_id in range(data_args.gpt3_in_context_num):
sub_template = old_template + ''
# Replace sent_id
for sent_id in range(sent_num):
sub_template = sub_template.replace("_{}*".format(sent_id), "_{}*".format(sent_num + sent_num * instance_id + sent_id))
# Replace mask
sub_template = sub_template.replace("*mask*", "*labelx_{}*".format(instance_id))
if data_args.gpt3_in_context_tail:
new_template = new_template + sub_template # Put context at the end
else:
new_template = sub_template + new_template # Put context at the beginning
logger.info("| {} => {}".format(data_args.template, new_template))
data_args.template = new_template
else:
logger.info("Automatically convert the template to using demonstrations.")
if data_args.template_list is not None:
for i in range(len(data_args.template_list)):
old_template = data_args.template_list[i]
new_template = old_template + ''
old_template = old_template.replace('*cls*', '')
# Single sentence or sentence pair?
sent_num = 1
if "_1" in old_template:
sent_num = 2
for label_id in range(num_labels):
sub_template = old_template + ''
# Replace sent id
for sent_id in range(sent_num):
sub_template = sub_template.replace("_{}*".format(sent_id), "_{}*".format(sent_num + sent_num * label_id + sent_id))
# Replace mask
sub_template = sub_template.replace("*mask*", "*label_{}*".format(label_id))
new_template = new_template + sub_template
logger.info("| {} => {}".format(data_args.template_list[i], new_template))
data_args.template_list[i] = new_template
else:
old_template = data_args.template
new_template = old_template + ''
old_template = old_template.replace('*cls*', '')
# Single sentence or sentence pair?
sent_num = 1
if "_1" in old_template:
sent_num = 2
for label_id in range(num_labels):
sub_template = old_template + ''
# Replace sent id
for sent_id in range(sent_num):
sub_template = sub_template.replace("_{}".format(sent_id), "_{}".format(sent_num + sent_num * label_id + sent_id))
# Replace mask
sub_template = sub_template.replace("*mask*", "*label_{}*".format(label_id))
new_template = new_template + sub_template
logger.info("| {} => {}".format(data_args.template, new_template))
data_args.template = new_template
# Create config
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=num_labels,
finetuning_task=data_args.task_name,
cache_dir=model_args.cache_dir,
)
if 'prompt' in model_args.few_shot_type:
if config.model_type == 'roberta':
model_fn = RobertaForPromptFinetuning
elif config.model_type == 'bert':
model_fn = BertForPromptFinetuning
else:
raise NotImplementedError
elif model_args.few_shot_type == 'finetune':
model_fn = AutoModelForSequenceClassification
else:
raise NotImplementedError
special_tokens = []
# Create tokenizer
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
additional_special_tokens=special_tokens,
cache_dir=model_args.cache_dir,
)
# Get our special datasets.
train_dataset = (
FewShotDataset(data_args, tokenizer=tokenizer, mode="train", use_demo=("demo" in model_args.few_shot_type))
)
eval_dataset = (
FewShotDataset(data_args, tokenizer=tokenizer, mode="dev", use_demo=("demo" in model_args.few_shot_type))
if training_args.do_eval
else None
)
test_dataset = (
FewShotDataset(data_args, tokenizer=tokenizer, mode="test", use_demo=("demo" in model_args.few_shot_type))
if training_args.do_predict
else None
)
set_seed(training_args.seed)
model = model_fn.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
)
# For BERT, increase the size of the segment (token type) embeddings
if config.model_type == 'bert':
model.resize_token_embeddings(len(tokenizer))
resize_token_type_embeddings(model, new_num_types=10, random_segment=model_args.random_segment)
# Pass dataset and argument information to the model
if data_args.prompt:
model.label_word_list = torch.tensor(train_dataset.label_word_list).long().cuda()
if output_modes_mapping[data_args.task_name] == 'regression':
# lower / upper bounds
model.lb, model.ub = bound_mapping[data_args.task_name]
model.model_args = model_args
model.data_args = data_args
model.tokenizer = tokenizer
# Build metric
def build_compute_metrics_fn(task_name: str) -> Callable[[EvalPrediction], Dict]:
def compute_metrics_fn(p: EvalPrediction):
# Note: the eval dataloader is sequential, so the examples are in order.
# We average the logits over each sample for using demonstrations.
predictions = p.predictions
num_logits = predictions.shape[-1]
logits = predictions.reshape([eval_dataset.num_sample, -1, num_logits])
logits = logits.mean(axis=0)
if num_logits == 1:
preds = np.squeeze(logits)
else:
preds = np.argmax(logits, axis=1)
# Just for sanity, assert label ids are the same.
label_ids = p.label_ids.reshape([eval_dataset.num_sample, -1])
label_ids_avg = label_ids.mean(axis=0)
label_ids_avg = label_ids_avg.astype(p.label_ids.dtype)
assert (label_ids_avg - label_ids[0]).mean() < 1e-2
label_ids = label_ids[0]
return compute_metrics_mapping[task_name](task_name, preds, label_ids)
return compute_metrics_fn
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
compute_metrics=build_compute_metrics_fn(data_args.task_name)
)
# Training
if training_args.do_train:
trainer.train(model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None, alpha = training_args.alpha)
# Use the early stop, so do not save the model in the end (unless specify save_at_last)
if training_args.save_at_last:
trainer.save_model(training_args.output_dir)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
torch.save(model_args, os.path.join(training_args.output_dir, "model_args.bin"))
torch.save(data_args, os.path.join(training_args.output_dir, "data_args.bin"))
# Reload the best checkpoint (for eval)
model = model_fn.from_pretrained(training_args.output_dir)
model = model.to(training_args.device)
trainer.model = model
if data_args.prompt:
model.label_word_list = torch.tensor(train_dataset.label_word_list).long().cuda()
if output_modes_mapping[data_args.task_name] == 'regression':
# lower / upper bounds
model.lb, model.ub = bound_mapping[data_args.task_name]
model.model_args = model_args
model.data_args = data_args
model.tokenizer = tokenizer
# Evaluation
final_result = {
'time': str(datetime.today()),
}
eval_results = {}
if training_args.do_eval:
logger.info("*** Validate ***")
eval_datasets = [eval_dataset]
for eval_dataset in eval_datasets:
trainer.compute_metrics = build_compute_metrics_fn(eval_dataset.args.task_name)
output = trainer.evaluate(eval_dataset=eval_dataset)
eval_result = output.metrics
output_eval_file = os.path.join(
training_args.output_dir, f"eval_results_{eval_dataset.args.task_name}.txt"
)
if trainer.is_world_master():
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results {} *****".format(eval_dataset.args.task_name))
for key, value in eval_result.items():
logger.info(" %s = %s", key, value)
writer.write("%s = %s\n" % (key, value))
final_result[eval_dataset.args.task_name + '_dev_' + key] = value
eval_results.update(eval_result)
test_results = {}
if training_args.do_predict:
logging.info("*** Test ***")
test_datasets = [test_dataset]
if data_args.task_name == "mnli":
mnli_mm_data_args = dataclasses.replace(data_args, task_name="mnli-mm")
test_datasets.append(
FewShotDataset(mnli_mm_data_args, tokenizer=tokenizer, mode="test", use_demo=('demo' in model_args.few_shot_type))
)
for test_dataset in test_datasets:
trainer.compute_metrics = build_compute_metrics_fn(test_dataset.args.task_name)
output = trainer.evaluate(eval_dataset=test_dataset)
test_result = output.metrics
output_test_file = os.path.join(
training_args.output_dir, f"test_results_{test_dataset.args.task_name}.txt"
)
if trainer.is_world_master():
with open(output_test_file, "w") as writer:
logger.info("***** Test results {} *****".format(test_dataset.args.task_name))
for key, value in test_result.items():
logger.info(" %s = %s", key, value)
writer.write("%s = %s\n" % (key, value))
final_result[test_dataset.args.task_name + '_test_' + key] = value
if training_args.save_logit:
predictions = output.predictions
num_logits = predictions.shape[-1]
logits = predictions.reshape([test_dataset.num_sample, -1, num_logits]).mean(axis=0)
np.save(os.path.join(training_args.save_logit_dir, "{}-{}-{}.npy".format(test_dataset.task_name, training_args.model_id, training_args.array_id)), logits)
test_results.update(test_result)
with FileLock('log.lock'):
with open('log', 'a') as f:
final_result.update(vars(model_args))
final_result.update(vars(training_args))
final_result.update(vars(data_args))
if 'evaluation_strategy' in final_result:
final_result.pop('evaluation_strategy')
f.write(str(final_result) + '\n')
return eval_results
if __name__ == "__main__":
main()
| 38.633648 | 174 | 0.622807 |
79578ce0164e3c1b493d9208e1608e2683884d77 | 4,661 | py | Python | synthesize.wave.py | f2re/FCH-TTS | 54ddee710694929d978943356fe913609ed0aab5 | [
"MIT"
] | 80 | 2021-04-02T09:24:18.000Z | 2021-09-18T08:51:19.000Z | synthesize.wave.py | f2re/FCH-TTS | 54ddee710694929d978943356fe913609ed0aab5 | [
"MIT"
] | 5 | 2021-04-26T06:51:49.000Z | 2021-08-19T09:52:27.000Z | synthesize.wave.py | f2re/FCH-TTS | 54ddee710694929d978943356fe913609ed0aab5 | [
"MIT"
] | 14 | 2021-04-02T00:04:50.000Z | 2021-09-03T00:58:28.000Z | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : train-duration.py
@Date : 2021/01/05, Tue
@Author : Atomicoo
@Version : 1.0
@Contact : atomicoo95@gmail.com
@License : (C)Copyright 2020-2021, ShiGroup-NLP-XMU
@Desc : Synthetize sentences into speech.
'''
__author__ = 'Atomicoo'
import argparse
import os
import os.path as osp
import time
from scipy.io.wavfile import write
import torch
from utils.hparams import HParam
from utils.transform import StandardNorm
from helpers.synthesizer import Synthesizer
import vocoder.models
from vocoder.layers import PQMF
from utils.audio import dynamic_range_decompression
from datasets.dataset import TextProcessor
from models import ParallelText2Mel
from utils.utils import select_device, get_last_chkpt_path
try:
from helpers.manager import GPUManager
except ImportError as err:
print(err); gm = None
else:
gm = GPUManager()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--batch_size", default=8, type=int, help="Batch size")
parser.add_argument("--checkpoint", default=None, type=str, help="Checkpoint file path")
parser.add_argument("--melgan_checkpoint", default=None, type=str, help="Checkpoint file path of melgan")
parser.add_argument("--input_texts", default=None, type=str, help="Input text file path")
parser.add_argument("--outputs_dir", default=None, type=str, help="Output wave file directory")
parser.add_argument("--device", default=None, help="cuda device or cpu")
parser.add_argument("--name", default="parallel", type=str, help="Append to logdir name")
parser.add_argument("--config", default=None, type=str, help="Config file path")
args = parser.parse_args()
if torch.cuda.is_available():
index = args.device if args.device else str(0 if gm is None else gm.auto_choice())
else:
index = 'cpu'
device = select_device(index)
hparams = HParam(args.config) \
if args.config else HParam(osp.join(osp.abspath(os.getcwd()), "config", "default.yaml"))
logdir = osp.join(hparams.trainer.logdir, f"%s-%s" % (hparams.data.dataset, args.name))
checkpoint = args.checkpoint or get_last_chkpt_path(logdir)
normalizer = StandardNorm(hparams.audio.spec_mean, hparams.audio.spec_std)
processor = TextProcessor(hparams.text)
text2mel = ParallelText2Mel(hparams.parallel)
text2mel.eval()
synthesizer = Synthesizer(
model=text2mel,
checkpoint=checkpoint,
processor=processor,
normalizer=normalizer,
device=device
)
print('Synthesizing...')
since = time.time()
text_file = args.input_texts or hparams.synthesizer.inputs_file_path
with open(text_file, 'r', encoding='utf-8') as fr:
texts = fr.read().strip().split('\n')
melspecs = synthesizer.inference(texts)
print(f"Inference {len(texts)} spectrograms, total elapsed {time.time()-since:.3f}s. Done.")
vocoder_checkpoint = args.melgan_checkpoint or \
osp.join(hparams.trainer.logdir, f"{hparams.data.dataset}-melgan", hparams.melgan.checkpoint)
ckpt = torch.load(vocoder_checkpoint, map_location=device)
# Ref: https://github.com/kan-bayashi/ParallelWaveGAN/issues/169
decompressed = dynamic_range_decompression(melspecs)
decompressed_log10 = torch.log10(decompressed)
mu = torch.tensor(ckpt['stats']['mu']).to(device).unsqueeze(1)
var = torch.tensor(ckpt['stats']['var']).to(device).unsqueeze(1)
sigma = torch.sqrt(var)
melspecs = (decompressed_log10 - mu) / sigma
Generator = getattr(vocoder.models, ckpt['gtype'])
vocoder = Generator(**ckpt['config']).to(device)
vocoder.remove_weight_norm()
if ckpt['config']['out_channels'] > 1:
vocoder.pqmf = PQMF().to(device)
vocoder.load_state_dict(ckpt['model'])
if ckpt['config']['out_channels'] > 1:
waves = vocoder.pqmf.synthesis(vocoder(melspecs)).squeeze(1)
else:
waves = vocoder(melspecs).squeeze(1)
print(f"Generate {len(texts)} audios, total elapsed {time.time()-since:.3f}s. Done.")
print('Saving audio...')
outputs_dir = args.outputs_dir or hparams.synthesizer.outputs_dir
os.makedirs(outputs_dir, exist_ok=True)
for i, wav in enumerate(waves, start=1):
wav = wav.cpu().detach().numpy()
filename = osp.join(outputs_dir, f"{time.strftime('%Y-%m-%d')}_{i:03d}.wav")
write(filename, hparams.audio.sampling_rate, wav)
print(f"Audios saved to {outputs_dir}. Done.")
print(f'Done. ({time.time()-since:.3f}s)')
| 38.204918 | 113 | 0.700493 |
79578d274fb930885e4a47e5d609aefed900826b | 130 | py | Python | setup.py | tamos/real_prices | 9f14e8e10341eccd1fdd5c726be216afa62ee326 | [
"MIT"
] | null | null | null | setup.py | tamos/real_prices | 9f14e8e10341eccd1fdd5c726be216afa62ee326 | [
"MIT"
] | null | null | null | setup.py | tamos/real_prices | 9f14e8e10341eccd1fdd5c726be216afa62ee326 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
setup(name="real_prices", packages=find_packages(),
install_requires=['pandas'])
| 26 | 51 | 0.776923 |
79578d5f8b22847b84fb5d8a29b67371186ccb9c | 10,416 | py | Python | src/tensorpack_cpu/tensorpack/RL/simulator.py | deepsense-ai/Distributed-BA3C | f5195ae83121746bd449e1a5eb2897000bbd12df | [
"Apache-2.0"
] | 62 | 2017-10-04T12:17:15.000Z | 2021-12-31T06:22:23.000Z | src/tensorpack_cpu/tensorpack/RL/simulator.py | deepsense-ai/Distributed-BA3C | f5195ae83121746bd449e1a5eb2897000bbd12df | [
"Apache-2.0"
] | 19 | 2018-01-28T23:05:33.000Z | 2022-03-11T23:14:57.000Z | src/tensorpack_cpu/tensorpack/RL/simulator.py | deepsense-ai/Distributed-BA3C | f5195ae83121746bd449e1a5eb2897000bbd12df | [
"Apache-2.0"
] | 10 | 2017-10-06T16:36:04.000Z | 2018-12-02T09:11:09.000Z | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
# File: simulator.py
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
import sys
import os
import signal
import time
import tensorflow as tf
import multiprocessing as mp
import time
import threading
import weakref
from abc import abstractmethod, ABCMeta
from collections import defaultdict, namedtuple
import numpy as np
import six
from six.moves import queue
from ..models._common import disable_layer_logging
from ..callbacks import Callback
from ..tfutils.varmanip import SessionUpdate
from ..predict import OfflinePredictor
from ..utils import logger
from ..utils.timer import *
from ..utils.serialize import *
from ..utils.concurrency import *
__all__ = ['SimulatorProcess', 'SimulatorMaster',
'SimulatorProcessStateExchange', 'SimulatorProcessSharedWeight',
'TransitionExperience', 'WeightSync']
try:
import zmq
except ImportError:
logger.warn("Error in 'import zmq'. RL simulator won't be available.")
__all__ = []
class TransitionExperience(object):
""" A transition of state, or experience"""
def __init__(self, state, action, reward, **kwargs):
""" kwargs: whatever other attribute you want to save"""
self.state = state
self.action = action
self.reward = reward
for k, v in six.iteritems(kwargs):
setattr(self, k, v)
class SimulatorProcessBase(mp.Process):
__metaclass__ = ABCMeta
def __init__(self, idx):
super(SimulatorProcessBase, self).__init__()
self.idx = int(idx)
self.identity = u'simulator-{}'.format(self.idx).encode('utf-8')
@abstractmethod
def _build_player(self):
pass
class SimulatorProcessStateExchange(SimulatorProcessBase):
"""
A process that simulates a player and communicates to master to
send states and receive the next action
"""
__metaclass__ = ABCMeta
def __init__(self, idx, pipe_c2s, pipe_s2c):
"""
:param idx: idx of this process
"""
super(SimulatorProcessStateExchange, self).__init__(idx)
self.c2s = pipe_c2s
self.s2c = pipe_s2c
def run(self):
player = self._build_player()
context = zmq.Context()
c2s_socket = context.socket(zmq.PUSH)
c2s_socket.setsockopt(zmq.IDENTITY, self.identity)
c2s_socket.set_hwm(2)
c2s_socket.connect(self.c2s)
s2c_socket = context.socket(zmq.DEALER)
s2c_socket.setsockopt(zmq.IDENTITY, self.identity)
#s2c_socket.set_hwm(5)
s2c_socket.connect(self.s2c)
state = player.current_state()
reward, isOver = 0, False
ts = 0
while True:
c2s_socket.send(dumps(
(self.identity, state, reward, isOver, ts, True)),
copy=False)
#t.grel here we get the action
(action, ts, isAlive) = loads(s2c_socket.recv(copy=False).bytes)
if not isAlive:
c2s_socket.send(dumps(
(self.identity, 0, 0, 0, 0, False)),
copy=False)
print("closing thread : {}".format(self.identity))
break
reward, isOver = player.action(action)
state = player.current_state()
# compatibility
SimulatorProcess = SimulatorProcessStateExchange
class SimulatorMaster(threading.Thread):
""" A base thread to communicate with all StateExchangeSimulatorProcess.
It should produce action for each simulator, as well as
defining callbacks when a transition or an episode is finished.
"""
__metaclass__ = ABCMeta
class ClientState(object):
def __init__(self):
self.memory = [] # list of Experience
def __init__(self, pipe_c2s, pipe_s2c, simulator_procs, pid):
super(SimulatorMaster, self).__init__()
self.daemon = True
self.context = zmq.Context()
self.c2s_socket = self.context.socket(zmq.PULL)
self.c2s_socket.bind(pipe_c2s)
self.c2s_socket.set_hwm(10)
self.s2c_socket = self.context.socket(zmq.ROUTER)
self.s2c_socket.bind(pipe_s2c)
self.s2c_socket.set_hwm(10)
# queueing messages to client
self.send_queue = queue.Queue(maxsize=1)
self.simulator_procs = simulator_procs
self.killed_threads = 0
self.pid = pid
def f():
msg = self.send_queue.get()
self.s2c_socket.send_multipart(msg, copy=False)
self.send_thread = LoopThread(f)
self.send_thread.daemon = True
self.send_thread.start()
# make sure socket get closed at the end
def clean_context(soks, context):
for s in soks:
s.close()
context.term()
import atexit
atexit.register(clean_context, [self.c2s_socket, self.s2c_socket], self.context)
def run(self):
self.clients = defaultdict(self.ClientState)
while True:
bytes = self.c2s_socket.recv(copy=False).bytes
msg = loads(bytes)
ident, state, reward, isOver, ts, isAlive = msg
client = self.clients[ident]
if not isAlive:
self.killed_threads += 1
print("killed : {}, waiting for {}".format(self.killed_threads, self.simulator_procs))
if self.killed_threads == self.simulator_procs:
self.M.isDone = True
break
continue
# check if reward&isOver is valid
# in the first message, only state is valid
if len(client.memory) > 0:
client.memory[-1].reward = reward
if isOver:
self._on_episode_over((ident, ts))
else:
self._on_datapoint((ident, ts))
# feed state and return action
self._on_state(state, (ident, ts))
print("MasterSimulator is out, peace")
time.sleep(10)
os.kill(self.pid, signal.SIGKILL)
@abstractmethod
def _on_state(self, state, ident):
"""response to state sent by ident. Preferrably an async call"""
@abstractmethod
def _on_episode_over(self, client):
""" callback when the client just finished an episode.
You may want to clear the client's memory in this callback.
"""
def _on_datapoint(self, client):
""" callback when the client just finished a transition
"""
def __del__(self):
self.context.destroy(linger=0)
class SimulatorProcessDF(SimulatorProcessBase):
""" A simulator which contains a forward model itself, allowing
it to produce data points directly """
def __init__(self, idx, pipe_c2s):
super(SimulatorProcessDF, self).__init__(idx)
self.pipe_c2s = pipe_c2s
def run(self):
self.player = self._build_player()
self.ctx = zmq.Context()
self.c2s_socket = self.ctx.socket(zmq.PUSH)
self.c2s_socket.setsockopt(zmq.IDENTITY, self.identity)
self.c2s_socket.set_hwm(5)
self.c2s_socket.connect(self.pipe_c2s)
self._prepare()
for dp in self.get_data():
self.c2s_socket.send(dumps(dp), copy=False)
@abstractmethod
def _prepare(self):
pass
@abstractmethod
def get_data(self):
pass
class SimulatorProcessSharedWeight(SimulatorProcessDF):
""" A simulator process with an extra thread waiting for event,
and take shared weight from shm.
Start me under some CUDA_VISIBLE_DEVICES set!
"""
def __init__(self, idx, pipe_c2s, condvar, shared_dic, pred_config):
super(SimulatorProcessSharedWeight, self).__init__(idx, pipe_c2s)
self.condvar = condvar
self.shared_dic = shared_dic
self.pred_config = pred_config
def _prepare(self):
disable_layer_logging()
self.predictor = OfflinePredictor(self.pred_config)
with self.predictor.graph.as_default():
vars_to_update = self._params_to_update()
self.sess_updater = SessionUpdate(
self.predictor.session, vars_to_update)
# TODO setup callback for explore?
self.predictor.graph.finalize()
self.weight_lock = threading.Lock()
# start a thread to wait for notification
def func():
self.condvar.acquire()
while True:
self.condvar.wait()
self._trigger_evt()
self.evt_th = threading.Thread(target=func)
self.evt_th.daemon = True
self.evt_th.start()
def _trigger_evt(self):
with self.weight_lock:
self.sess_updater.update(self.shared_dic['params'])
logger.info("Updated.")
def _params_to_update(self):
# can be overwritten to update more params
return tf.trainable_variables()
class WeightSync(Callback):
""" Sync weight from main process to shared_dic and notify"""
def __init__(self, condvar, shared_dic):
self.condvar = condvar
self.shared_dic = shared_dic
def _setup_graph(self):
self.vars = self._params_to_update()
def _params_to_update(self):
# can be overwritten to update more params
return tf.trainable_variables()
def _before_train(self):
self._sync()
def _trigger_epoch(self):
self._sync()
def _sync(self):
logger.info("Updating weights ...")
dic = {v.name: v.eval() for v in self.vars}
self.shared_dic['params'] = dic
self.condvar.acquire()
self.condvar.notify_all()
self.condvar.release()
if __name__ == '__main__':
import random
from tensorpack.RL import NaiveRLEnvironment
class NaiveSimulator(SimulatorProcess):
def _build_player(self):
return NaiveRLEnvironment()
class NaiveActioner(SimulatorActioner):
def _get_action(self, state):
time.sleep(1)
return random.randint(1, 12)
def _on_episode_over(self, client):
#print("Over: ", client.memory)
client.memory = []
client.state = 0
name = 'ipc://whatever'
procs = [NaiveSimulator(k, name) for k in range(10)]
[k.start() for k in procs]
th = NaiveActioner(name)
ensure_proc_terminate(procs)
th.start()
import time
time.sleep(100)
| 31.468278 | 102 | 0.627592 |
79578d7e079216ce6b788118f92dccdc9c6e380a | 1,795 | py | Python | conans/test/functional/toolchains/test_msbuild_toolchain.py | fanStefan/conan | 7e9baff749ded0656e19d343a6483bf33cf32086 | [
"MIT"
] | 6,205 | 2015-12-01T13:40:05.000Z | 2022-03-31T07:30:25.000Z | conans/test/functional/toolchains/test_msbuild_toolchain.py | fanStefan/conan | 7e9baff749ded0656e19d343a6483bf33cf32086 | [
"MIT"
] | 8,747 | 2015-12-01T16:28:48.000Z | 2022-03-31T23:34:53.000Z | conans/test/functional/toolchains/test_msbuild_toolchain.py | fanStefan/conan | 7e9baff749ded0656e19d343a6483bf33cf32086 | [
"MIT"
] | 961 | 2015-12-01T16:56:43.000Z | 2022-03-31T13:50:52.000Z | import platform
import textwrap
import pytest
from parameterized import parameterized
from conans.test.utils.tools import TestClient
@parameterized.expand([("msvc", "19.0", "dynamic"),
("msvc", "19.1", "static")]
)
@pytest.mark.tool_visual_studio
@pytest.mark.skipif(platform.system() != "Windows", reason="Only for windows")
def test_toolchain_win(compiler, version, runtime):
client = TestClient(path_with_spaces=False)
settings = {"compiler": compiler,
"compiler.version": version,
"compiler.cppstd": "17",
"compiler.runtime": runtime,
"build_type": "Release",
"arch": "x86_64"}
# Build the profile according to the settings provided
settings = " ".join('-s %s="%s"' % (k, v) for k, v in settings.items() if v)
conanfile = textwrap.dedent("""
from conans import ConanFile
from conan.tools.microsoft import MSBuildToolchain
class Pkg(ConanFile):
settings = "os", "compiler", "build_type", "arch"
def generate(self):
msbuild = MSBuildToolchain(self)
msbuild.generate()
""")
client.save({"conanfile.py": conanfile})
client.run("install . {}".format(settings))
props = client.load("conantoolchain_release_x64.props")
assert "<LanguageStandard>stdcpp17</LanguageStandard>" in props
if version == "19.0":
assert "<PlatformToolset>v140</PlatformToolset>" in props
else:
assert "<PlatformToolset>v141</PlatformToolset>" in props
if runtime == "dynamic":
assert "<RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary>" in props
else:
assert "<RuntimeLibrary>MultiThreaded</RuntimeLibrary>" in props
| 37.395833 | 80 | 0.62507 |
79578d7eee013a56f9a0f839c364721bf229f1b7 | 728 | py | Python | src/dials/algorithms/image/filter/__init__.py | dials-src/dials | 25055c1f6164dc33e672e7c5c6a9c5a35e870660 | [
"BSD-3-Clause"
] | 1 | 2021-12-10T17:28:16.000Z | 2021-12-10T17:28:16.000Z | src/dials/algorithms/image/filter/__init__.py | dials-src/dials | 25055c1f6164dc33e672e7c5c6a9c5a35e870660 | [
"BSD-3-Clause"
] | null | null | null | src/dials/algorithms/image/filter/__init__.py | dials-src/dials | 25055c1f6164dc33e672e7c5c6a9c5a35e870660 | [
"BSD-3-Clause"
] | 1 | 2021-12-07T12:39:04.000Z | 2021-12-07T12:39:04.000Z | from __future__ import annotations
from dials_algorithms_image_filter_ext import * # noqa: F403; lgtm
__all__ = ( # noqa: F405
"IndexOfDispersionFilterDouble",
"IndexOfDispersionFilterFloat",
"IndexOfDispersionFilterMaskedDouble",
"IndexOfDispersionFilterMaskedFloat",
"MeanAndVarianceFilterDouble",
"MeanAndVarianceFilterFloat",
"MeanAndVarianceFilterMaskedDouble",
"MeanAndVarianceFilterMaskedFloat",
"anisotropic_diffusion",
"chebyshev_distance",
"convolve",
"convolve_col",
"convolve_row",
"index_of_dispersion_filter",
"manhattan_distance",
"mean_and_variance_filter",
"mean_filter",
"median_filter",
"summed_area",
"summed_area_table",
)
| 26.962963 | 67 | 0.736264 |
79578df083e4af3fb4ac2410c6c7c48912460aa7 | 1,160 | py | Python | Relue/Eu76.py | jialing3/corner_cases | 54a316518fcf4b43ae96ed9935b4cf91ade1eed9 | [
"Apache-2.0"
] | 1 | 2015-05-29T08:40:48.000Z | 2015-05-29T08:40:48.000Z | Relue/Eu76.py | jialing3/corner_cases | 54a316518fcf4b43ae96ed9935b4cf91ade1eed9 | [
"Apache-2.0"
] | null | null | null | Relue/Eu76.py | jialing3/corner_cases | 54a316518fcf4b43ae96ed9935b4cf91ade1eed9 | [
"Apache-2.0"
] | null | null | null | class Solution:
def __init__(self):
self.memo = {}
def break_down(self, n, to_use):
if type(n) != int or type(to_use) != list:
return 0
else:
if len(to_use) == 0:
return 1 if n == 0 else 0
elif len(to_use) == 1 and to_use[0] == n:
return 1
elif (n, tuple(to_use)) in self.memo:
return self.memo[n, tuple(to_use)]
else:
not_used = self.break_down(n, to_use[:-1])
used = self.break_down(n - to_use[-1], list(filter(lambda x: x <= n - to_use[-1], to_use)))
self.memo[n, tuple(to_use)] = not_used + used
#print(n, to_use[-1], not_used, used)
return not_used + used
def break_down_wrapper(self, n):
return self.break_down(n, list(range(1, n)))
sol = Solution()
assert sol.break_down_wrapper(0) == 1
assert sol.break_down_wrapper(1) == 0
assert sol.break_down_wrapper(2) == 1
assert sol.break_down_wrapper(3) == 2
assert sol.break_down_wrapper(4) == 4
assert sol.break_down_wrapper(5) == 6
sol.break_down_wrapper(100) # 190569291
| 34.117647 | 107 | 0.562069 |
79578e2775f3dd600b92fb3dbe033e834dd9a0f9 | 1,045 | py | Python | models/model_factory.py | yangzonglin1994/yangzl-deep-text-matching | 2beadd1c2ebf2b169558b9978e0cbc66d1d25fc6 | [
"MIT"
] | 2 | 2018-08-10T20:02:44.000Z | 2018-08-10T20:02:50.000Z | models/model_factory.py | yangzonglin1994/yangzl-text-matching | 2beadd1c2ebf2b169558b9978e0cbc66d1d25fc6 | [
"MIT"
] | 1 | 2018-07-30T08:54:35.000Z | 2018-07-30T08:54:35.000Z | models/model_factory.py | yangzonglin1994/yangzl-text-matching | 2beadd1c2ebf2b169558b9978e0cbc66d1d25fc6 | [
"MIT"
] | null | null | null | from configs.net_conf import available_models
from models.avg_seq_dense import AvgSeqDenseModel
from models.basic_model import BasicModel
from models.rnmt_encoder_bilstm_dense import RNMTPlusEncoderBiLSTMDenseModel
from models.stacked_bilstm_dense import StackedBiLSTMDenseModel
from models.transformer_encoder_bilstm_dense import TransformerEncoderBiLSTMDenseModel
from models.multiheadattn_avg_dense import MultiHeadAttnAvgDenseModel
class ModelFactory:
# 静态工厂方法
@staticmethod
def make_model(model_name):
if model_name == available_models[0]:
return AvgSeqDenseModel()
elif model_name == available_models[1]:
return StackedBiLSTMDenseModel()
elif model_name == available_models[2]:
return TransformerEncoderBiLSTMDenseModel()
elif model_name == available_models[3]:
return RNMTPlusEncoderBiLSTMDenseModel()
elif model_name == available_models[4]:
return MultiHeadAttnAvgDenseModel()
else:
return BasicModel()
| 40.192308 | 86 | 0.757895 |
79578e2af56fa0ba23da75a791ad8f847acd5b4d | 327 | py | Python | PBO_18099/Latihan_7.1.Class1.py | viviyanti/PBO | 2f9ab22a10f94d007bbe5407c91287c53f10a435 | [
"MIT"
] | null | null | null | PBO_18099/Latihan_7.1.Class1.py | viviyanti/PBO | 2f9ab22a10f94d007bbe5407c91287c53f10a435 | [
"MIT"
] | null | null | null | PBO_18099/Latihan_7.1.Class1.py | viviyanti/PBO | 2f9ab22a10f94d007bbe5407c91287c53f10a435 | [
"MIT"
] | 1 | 2020-03-25T03:48:40.000Z | 2020-03-25T03:48:40.000Z | class Mahasiswa :
def __init__(self):
self.nama = "vivi"
self.nilai = (90, 70, 90, 80)
def hitung_nilai(self):
return sum(self.nilai)/len(self.nilai)
#cara pemanggilan class
mahasiswa = Mahasiswa()
print("Nama : ", mahasiswa.nama)
print("Total Nilai : ", mahasiswa.hitung_nilai()) | 25.153846 | 49 | 0.620795 |
79578ea873d9537abe931006f970d96dddb98194 | 3,481 | py | Python | Porosity in selective laser sintering/powderGeneration.py | zwang586/MICNN | 3d27a7f624ed03502fd500628b8e5136cb3f0730 | [
"MIT"
] | null | null | null | Porosity in selective laser sintering/powderGeneration.py | zwang586/MICNN | 3d27a7f624ed03502fd500628b8e5136cb3f0730 | [
"MIT"
] | null | null | null | Porosity in selective laser sintering/powderGeneration.py | zwang586/MICNN | 3d27a7f624ed03502fd500628b8e5136cb3f0730 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
##Add one powder to the lowest point of input structure - s_eta
##Return new structure with the added powder and other updated infomation.
import numpy as np
import matplotlib.pyplot as plt
def powder(s_eta,nx,ny,layer,t,ymax):
#s_eta: input structure.
#layer:layer_no.
#t: layer thickness.
#ymax: surface of s_eta structure.
nyMin = int(t)*(layer-1)-15
if nyMin < 0:
nyMin = 0
nyMax = int(t)*layer
rad_mean = 12.5
rad_std = 1.25
radius00 = np.random.normal(rad_mean,rad_std)
if radius00 < 2:
radius00 = np.random.normal(rad_mean,rad_std)
phi_top = np.zeros((nx,ny))
for i in range(0,nx): #loop pver all ymax[i]
for j in range( max( ([0,i-int(radius00)-2]) ) ,min( ([nx,i+int(radius00)+2]) ) ):
for k in range( max( ([nyMin,int(ymax[i]-radius00)-2]) ) ,min( ([nyMax,int(ymax[i]+radius00)+2]) ) ):
if ( (j-i)**2+(k-ymax[i])**2 <= radius00**2):
phi_top[j,k] = 1.0
yc = np.zeros(nx)
for i in range(int(radius00)-1,nx-int(radius00)-1):
for j in range(nyMin,nyMax):
if(phi_top[i,j] == 1.0):
yc[i] = j
yc00 = min(yc[int(radius00)-1:nx-int(radius00)-1])
for i in range(1,nx):
if (yc[i] == yc00):
xc00 = i
if (yc00+radius00 < t*layer):
for i in range(0,nx):
for j in range(0,ny):
if( (i-xc00)**2 + (j-yc00)**2 <= radius00**2):
s_eta[i,j] = 1.0
ymax1 = ymax
for i in range(max(0,int(xc00-radius00-2)),min(int(xc00+radius00+2),nx)):
for j in range(nyMin,nyMax):
if (s_eta[i,j] > 0.9):
ymax1[i] = j
return(s_eta,yc00+radius00,ymax1)
def powder2(s_eta,nx,ny,layer,t,ymax,rad_std): ##For use in full-component sintering simulation. Include powder radius_std as input.
nyMin = int(t)*(layer-1)-40
if nyMin < 0:
nyMin = 0
nyMax = int(t)*layer
rad_mean = 6.5
radius00 = np.random.normal(rad_mean,rad_std)
if radius00 < 2:
radius00 = 2
phi_top = np.zeros((nx,ny))
for i in range(0,nx): #loop over all ymax[i]
for j in range( max( ([0,i-int(radius00)-2]) ) ,min( ([nx,i+int(radius00)+2]) ) ):
for k in range( max( ([nyMin,int(ymax[i]-radius00)-2]) ) ,min( ([nyMax,int(ymax[i]+radius00)+2]) ) ):
if ( (j-i)**2+(k-ymax[i])**2 <= radius00**2):
phi_top[j,k] = 1.0
yc = np.zeros(nx)
for i in range(int(radius00)-1,nx-int(radius00)-1):
for j in range(nyMin,nyMax):
if(phi_top[i,j] == 1.0):
yc[i] = j
yc00 = min(yc[int(radius00)-1:nx-int(radius00)-1])
for i in range(1,nx):
if (yc[i] == yc00):
xc00 = i
if (yc00+radius00 < t*layer):
for i in range(0,nx):
for j in range(0,ny):
if( (i-xc00)**2 + (j-yc00)**2 <= radius00**2):
s_eta[i,j] = 1.0
ymax1 = ymax
for i in range(max(0,int(xc00-radius00-2)),min(int(xc00+radius00+2),nx)):
for j in range(nyMin,nyMax):
if (s_eta[i,j] > 0.9):
ymax1[i] = j
return(s_eta,yc00+radius00,ymax1)
| 34.127451 | 134 | 0.494398 |
79578f4b889bf7bbc2e60e212272b9dd9fc9aa25 | 550 | py | Python | test/test_issues/test_issue223.py | jjon/rdflib | 4c2ab7b392b353bf3c6088017ec9351ce8ac3db6 | [
"BSD-3-Clause"
] | null | null | null | test/test_issues/test_issue223.py | jjon/rdflib | 4c2ab7b392b353bf3c6088017ec9351ce8ac3db6 | [
"BSD-3-Clause"
] | 6 | 2021-11-22T19:10:32.000Z | 2022-01-31T19:16:37.000Z | test/test_issues/test_issue223.py | jjon/rdflib | 4c2ab7b392b353bf3c6088017ec9351ce8ac3db6 | [
"BSD-3-Clause"
] | null | null | null | from rdflib.graph import Graph
from rdflib.term import URIRef
ttl = """
@prefix : <http://example.org/>.
:s :p (:a :b :a).
"""
def test_collection_with_duplicates():
g = Graph().parse(data=ttl, format="turtle")
for _, _, o in g.triples(
(URIRef("http://example.org/s"), URIRef("http://example.org/p"), None)
):
break
c = g.collection(o)
assert list(c) == list(URIRef("http://example.org/" + x) for x in ["a", "b", "a"])
assert len(c) == 3
if __name__ == "__main__":
test_collection_with_duplicates()
| 22.916667 | 86 | 0.598182 |
79578f4d7a3021993ef096aa79e0cb402461f587 | 272 | py | Python | 2019/day-1/part-2.py | joaopalmeiro/advent-of-code-2019 | c42f907c7338968eaa41afd3daccca3ec0e1284b | [
"MIT"
] | null | null | null | 2019/day-1/part-2.py | joaopalmeiro/advent-of-code-2019 | c42f907c7338968eaa41afd3daccca3ec0e1284b | [
"MIT"
] | null | null | null | 2019/day-1/part-2.py | joaopalmeiro/advent-of-code-2019 | c42f907c7338968eaa41afd3daccca3ec0e1284b | [
"MIT"
] | null | null | null | from math import floor
with open('input.txt', 'r') as reader:
inp = list(map(int, reader))
total_fuel = 0
while sum(inp) > 0:
inp = [floor(mass / 3) - 2 if floor(mass / 3) -
2 > 0 else 0 for mass in inp]
total_fuel += sum(inp)
print(total_fuel)
| 19.428571 | 51 | 0.595588 |
79579054f0bccfe8bb10d3c18aae6bcb6dc2b47e | 7,548 | py | Python | tests/python/pants_test/backend/python/tasks/test_select_interpreter.py | ghthor/pants | 450de702414f87f563081ddefaefd8a554de07a3 | [
"Apache-2.0"
] | null | null | null | tests/python/pants_test/backend/python/tasks/test_select_interpreter.py | ghthor/pants | 450de702414f87f563081ddefaefd8a554de07a3 | [
"Apache-2.0"
] | null | null | null | tests/python/pants_test/backend/python/tasks/test_select_interpreter.py | ghthor/pants | 450de702414f87f563081ddefaefd8a554de07a3 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from builtins import str
from textwrap import dedent
import mock
from pex.interpreter import PythonInterpreter
from pants.backend.python.interpreter_cache import PythonInterpreterCache
from pants.backend.python.subsystems.python_setup import PythonSetup
from pants.backend.python.targets.python_library import PythonLibrary
from pants.backend.python.targets.python_requirement_library import PythonRequirementLibrary
from pants.backend.python.tasks.select_interpreter import SelectInterpreter
from pants.base.exceptions import TaskError
from pants.util.dirutil import chmod_plus_x, safe_mkdtemp
from pants_test.task_test_base import TaskTestBase
class SelectInterpreterTest(TaskTestBase):
@classmethod
def task_type(cls):
return SelectInterpreter
def setUp(self):
super(SelectInterpreterTest, self).setUp()
self.set_options(interpreter=['IronPython>=2.55'])
self.set_options_for_scope(PythonSetup.options_scope)
# We're tied tightly to pex implementation details here faking out a python binary that outputs
# only one value no matter what arguments, environment or input stream it has attached. That
# value is the interpreter identity which is - minimally, one line containing:
# <impl> <abi> <impl_version> <major> <minor> <patch>
def fake_interpreter(id_str):
interpreter_dir = safe_mkdtemp()
binary = os.path.join(interpreter_dir, 'binary')
with open(binary, 'w') as fp:
fp.write(dedent("""
#!{}
from __future__ import print_function
print({!r})
""".format(PythonInterpreter.get().binary, id_str)).strip())
chmod_plus_x(binary)
return PythonInterpreter.from_binary(binary)
# impl, abi, impl_version, major, minor, patch
self.fake_interpreters = [
fake_interpreter('ip ip2 2 2 77 777'),
fake_interpreter('ip ip2 2 2 88 888'),
fake_interpreter('ip ip2 2 2 99 999')
]
self.reqtgt = self.make_target(
spec='req',
target_type=PythonRequirementLibrary,
requirements=[],
)
self.tgt1 = self._fake_target('tgt1')
self.tgt2 = self._fake_target('tgt2', compatibility=['IronPython>2.77.777'])
self.tgt3 = self._fake_target('tgt3', compatibility=['IronPython>2.88.888'])
self.tgt4 = self._fake_target('tgt4', compatibility=['IronPython<2.99.999'])
self.tgt20 = self._fake_target('tgt20', dependencies=[self.tgt2])
self.tgt30 = self._fake_target('tgt30', dependencies=[self.tgt3])
self.tgt40 = self._fake_target('tgt40', dependencies=[self.tgt4])
def _fake_target(self, spec, compatibility=None, sources=None, dependencies=None):
return self.make_target(spec=spec, target_type=PythonLibrary, sources=sources or [],
dependencies=dependencies, compatibility=compatibility)
def _select_interpreter(self, target_roots, should_invalidate=None):
context = self.context(target_roots=target_roots)
task = self.create_task(context)
if should_invalidate is not None:
task._create_interpreter_path_file = mock.MagicMock(wraps=task._create_interpreter_path_file)
# Mock out the interpreter cache setup, so we don't actually look for real interpreters
# on the filesystem.
with mock.patch.object(PythonInterpreterCache, 'setup', autospec=True) as mock_resolve:
def se(me, *args, **kwargs):
me._interpreters = self.fake_interpreters
return self.fake_interpreters
mock_resolve.side_effect = se
task.execute()
if should_invalidate is not None:
if should_invalidate:
task._create_interpreter_path_file.assert_called_once()
else:
task._create_interpreter_path_file.assert_not_called()
return context.products.get_data(PythonInterpreter)
def _select_interpreter_and_get_version(self, target_roots, should_invalidate=None):
"""Return the version string of the interpreter selected for the target roots."""
interpreter = self._select_interpreter(target_roots, should_invalidate)
self.assertTrue(isinstance(interpreter, PythonInterpreter))
return interpreter.version_string
def test_interpreter_selection(self):
self.assertIsNone(self._select_interpreter([]))
self.assertEquals('IronPython-2.77.777', self._select_interpreter_and_get_version([self.reqtgt]))
self.assertEquals('IronPython-2.77.777', self._select_interpreter_and_get_version([self.tgt1]))
self.assertEquals('IronPython-2.88.888', self._select_interpreter_and_get_version([self.tgt2]))
self.assertEquals('IronPython-2.99.999', self._select_interpreter_and_get_version([self.tgt3]))
self.assertEquals('IronPython-2.77.777', self._select_interpreter_and_get_version([self.tgt4]))
self.assertEquals('IronPython-2.88.888', self._select_interpreter_and_get_version([self.tgt20]))
self.assertEquals('IronPython-2.99.999', self._select_interpreter_and_get_version([self.tgt30]))
self.assertEquals('IronPython-2.77.777', self._select_interpreter_and_get_version([self.tgt40]))
self.assertEquals('IronPython-2.99.999', self._select_interpreter_and_get_version([self.tgt2, self.tgt3]))
self.assertEquals('IronPython-2.88.888', self._select_interpreter_and_get_version([self.tgt2, self.tgt4]))
with self.assertRaises(TaskError) as cm:
self._select_interpreter_and_get_version([self.tgt3, self.tgt4])
self.assertIn('Unable to detect a suitable interpreter for compatibilities: '
'IronPython<2.99.999 && IronPython>2.88.888', str(cm.exception))
def test_interpreter_selection_invalidation(self):
tgta = self._fake_target('tgta', compatibility=['IronPython>2.77.777'],
dependencies=[self.tgt3])
self.assertEquals('IronPython-2.99.999',
self._select_interpreter_and_get_version([tgta], should_invalidate=True))
# A new target with different sources, but identical compatibility, shouldn't invalidate.
self.create_file('tgtb/foo/bar/baz.py', 'fake content')
tgtb = self._fake_target('tgtb', compatibility=['IronPython>2.77.777'],
dependencies=[self.tgt3], sources=['foo/bar/baz.py'])
self.assertEquals('IronPython-2.99.999',
self._select_interpreter_and_get_version([tgtb], should_invalidate=False))
def test_compatibility_AND(self):
tgt = self._fake_target('tgt5', compatibility=['IronPython>2.77.777,<2.99.999'])
self.assertEquals('IronPython-2.88.888', self._select_interpreter_and_get_version([tgt]))
def test_compatibility_AND_impossible(self):
tgt = self._fake_target('tgt5', compatibility=['IronPython>2.77.777,<2.88.888'])
with self.assertRaises(PythonInterpreterCache.UnsatisfiableInterpreterConstraintsError):
self._select_interpreter_and_get_version([tgt])
def test_compatibility_OR(self):
tgt = self._fake_target('tgt6', compatibility=['IronPython>2.88.888', 'IronPython<2.7'])
self.assertEquals('IronPython-2.99.999', self._select_interpreter_and_get_version([tgt]))
def test_compatibility_OR_impossible(self):
tgt = self._fake_target('tgt6', compatibility=['IronPython>2.99.999', 'IronPython<2.77.777'])
with self.assertRaises(PythonInterpreterCache.UnsatisfiableInterpreterConstraintsError):
self._select_interpreter_and_get_version([tgt])
| 48.384615 | 110 | 0.744966 |
795791d22d5a690416135b0b1b48c4eb2da05e91 | 15,008 | py | Python | jina/peapods/pods/config/k8s.py | liusy182/jina | eb1b605f5c635f7db83e6adf9d77e52747ea7a8d | [
"Apache-2.0"
] | 1 | 2021-04-11T08:06:05.000Z | 2021-04-11T08:06:05.000Z | jina/peapods/pods/config/k8s.py | sthagen/jina | c272bbddef733167804c5a68d5f41ec789fa1732 | [
"Apache-2.0"
] | 1 | 2021-07-16T17:36:22.000Z | 2021-09-22T13:48:18.000Z | jina/peapods/pods/config/k8s.py | sthagen/jina | c272bbddef733167804c5a68d5f41ec789fa1732 | [
"Apache-2.0"
] | null | null | null | import copy
from argparse import Namespace
from typing import Dict, Union, List, Optional, Tuple
from jina import __default_executor__
from jina.enums import PeaRoleType
from jina.peapods.pods.config.k8slib import kubernetes_deployment
from jina.peapods.pods.config.helper import (
get_image_name,
to_compatible_name,
get_base_executor_version,
)
from jina.peapods.networking import K8sGrpcConnectionPool
from jina.peapods.pods import BasePod
class K8sPodConfig:
"""
Class that implements the output of configuration files for Kubernetes for a given Pod.
"""
class _K8sDeployment:
def __init__(
self,
name: str,
version: str,
pea_type: PeaRoleType,
jina_pod_name: str,
shard_id: Optional[int],
common_args: Union['Namespace', Dict],
deployment_args: Union['Namespace', Dict],
k8s_namespace: str,
k8s_connection_pool: bool = True,
k8s_pod_addresses: Optional[Dict[str, List[str]]] = None,
):
self.name = name
self.dns_name = to_compatible_name(name)
self.version = version
self.pea_type = pea_type
self.jina_pod_name = jina_pod_name
self.shard_id = shard_id
self.common_args = common_args
self.deployment_args = deployment_args
self.num_replicas = getattr(self.deployment_args, 'replicas', 1)
self.k8s_namespace = k8s_namespace
self.k8s_connection_pool = k8s_connection_pool
self.k8s_pod_addresses = k8s_pod_addresses
def get_gateway_yamls(
self,
) -> List[Dict]:
import os
test_pip = os.getenv('JINA_K8S_USE_TEST_PIP') is not None
image_name = (
'jinaai/jina:test-pip'
if test_pip
else f'jinaai/jina:{self.version}-py38-standard'
)
cargs = copy.copy(self.deployment_args)
cargs.env = None
cargs.pods_addresses = self.k8s_pod_addresses
from jina.helper import ArgNamespace
from jina.parsers import set_gateway_parser
non_defaults = ArgNamespace.get_non_defaults_args(
cargs,
set_gateway_parser(),
)
_args = ArgNamespace.kwargs2list(non_defaults)
container_args = ['gateway'] + _args
if not cargs.k8s_connection_pool:
container_args.append('--k8s-disable-connection-pool')
return kubernetes_deployment.get_deployment_yamls(
self.dns_name,
namespace=self.k8s_namespace,
image_name=image_name,
container_cmd='["jina"]',
container_args=f'{container_args}',
replicas=1,
pull_policy='IfNotPresent',
jina_pod_name='gateway',
pea_type=self.pea_type,
port_expose=self.common_args.port_expose,
env=cargs.env,
)
@staticmethod
def _construct_runtime_container_args(cargs, uses_metas, uses_with, pea_type):
import json
from jina.helper import ArgNamespace
from jina.parsers import set_pea_parser
non_defaults = ArgNamespace.get_non_defaults_args(
cargs,
set_pea_parser(),
taboo={
'uses_with',
'uses_metas',
'volumes',
'uses_before',
'uses_after',
},
)
_args = ArgNamespace.kwargs2list(non_defaults)
container_args = ['executor'] + _args
if not cargs.k8s_connection_pool and pea_type == PeaRoleType.HEAD:
container_args.append('--k8s-disable-connection-pool')
if uses_metas is not None:
container_args.extend(['--uses-metas', json.dumps(uses_metas)])
if uses_with is not None:
container_args.extend(['--uses-with', json.dumps(uses_with)])
container_args.append('--native')
return container_args
def _get_image_name(self, uses: Optional[str]):
import os
test_pip = os.getenv('JINA_K8S_USE_TEST_PIP') is not None
image_name = (
'jinaai/jina:test-pip'
if test_pip
else f'jinaai/jina:{self.version}-py38-perf'
)
if uses is not None and uses != __default_executor__:
image_name = get_image_name(uses)
return image_name
def _get_container_args(self, cargs, pea_type):
uses_metas = cargs.uses_metas or {}
if self.shard_id is not None:
uses_metas['pea_id'] = self.shard_id
uses_with = self.deployment_args.uses_with
if cargs.uses != __default_executor__:
cargs.uses = 'config.yml'
return self._construct_runtime_container_args(
cargs, uses_metas, uses_with, pea_type
)
def get_runtime_yamls(
self,
) -> List[Dict]:
cargs = copy.copy(self.deployment_args)
image_name = self._get_image_name(cargs.uses)
image_name_uses_before = (
self._get_image_name(cargs.uses_before)
if hasattr(cargs, 'uses_before') and cargs.uses_before
else None
)
image_name_uses_after = (
self._get_image_name(cargs.uses_after)
if hasattr(cargs, 'uses_after') and cargs.uses_after
else None
)
container_args = self._get_container_args(cargs, pea_type=self.pea_type)
container_args_uses_before = None
if getattr(cargs, 'uses_before', False):
uses_before_cargs = copy.copy(cargs)
uses_before_cargs.uses = cargs.uses_before
uses_before_cargs.name = f'{self.common_args.name}/uses-before'
uses_before_cargs.port_in = K8sGrpcConnectionPool.K8S_PORT_USES_BEFORE
uses_before_cargs.uses_before_address = None
uses_before_cargs.uses_after_address = None
uses_before_cargs.uses_before = None
uses_before_cargs.uses_after = None
uses_before_cargs.uses_with = None
uses_before_cargs.uses_metas = None
uses_before_cargs.env = None
uses_before_cargs.connection_list = None
uses_before_cargs.runtime_cls = 'WorkerRuntime'
uses_before_cargs.pea_role = PeaRoleType.WORKER
uses_before_cargs.polling = None
container_args_uses_before = self._get_container_args(
uses_before_cargs, PeaRoleType.WORKER
)
container_args_uses_after = None
if getattr(cargs, 'uses_after', False):
uses_after_cargs = copy.copy(cargs)
uses_after_cargs.uses = cargs.uses_after
uses_after_cargs.name = f'{self.common_args.name}/uses-after'
uses_after_cargs.port_in = K8sGrpcConnectionPool.K8S_PORT_USES_AFTER
uses_after_cargs.uses_before_address = None
uses_after_cargs.uses_after_address = None
uses_after_cargs.uses_before = None
uses_after_cargs.uses_after = None
uses_after_cargs.uses_with = None
uses_after_cargs.uses_metas = None
uses_after_cargs.env = None
uses_after_cargs.connection_list = None
uses_after_cargs.runtime_cls = 'WorkerRuntime'
uses_after_cargs.pea_role = PeaRoleType.WORKER
uses_after_cargs.polling = None
container_args_uses_after = self._get_container_args(
uses_after_cargs, PeaRoleType.WORKER
)
return kubernetes_deployment.get_deployment_yamls(
self.dns_name,
namespace=self.k8s_namespace,
image_name=image_name,
image_name_uses_after=image_name_uses_after,
image_name_uses_before=image_name_uses_before,
container_cmd='["jina"]',
container_cmd_uses_before='["jina"]',
container_cmd_uses_after='["jina"]',
container_args=f'{container_args}',
container_args_uses_before=container_args_uses_before,
container_args_uses_after=container_args_uses_after,
replicas=self.num_replicas,
pull_policy='IfNotPresent',
jina_pod_name=self.jina_pod_name,
pea_type=self.pea_type,
shard_id=self.shard_id,
env=cargs.env,
gpus=cargs.gpus if hasattr(cargs, 'gpus') else None,
)
def __init__(
self,
args: Union['Namespace', Dict],
k8s_namespace: Optional[str] = None,
k8s_connection_pool: bool = True,
k8s_pod_addresses: Optional[Dict[str, List[str]]] = None,
):
self.k8s_namespace = k8s_namespace
self.k8s_connection_pool = k8s_connection_pool
self.k8s_pod_addresses = k8s_pod_addresses
if self.k8s_connection_pool is True:
self.k8s_pod_addresses = None
self.head_deployment = None
self.args = copy.copy(args)
if k8s_namespace is not None:
# otherwise it will remain with the one from the original Pod
self.args.k8s_namespace = k8s_namespace
self.args.k8s_connection_pool = k8s_connection_pool
self.name = self.args.name
self.deployment_args = self._get_deployment_args(self.args)
if self.deployment_args['head_deployment'] is not None:
self.head_deployment = self._K8sDeployment(
name=self.deployment_args['head_deployment'].name,
version=get_base_executor_version(),
shard_id=None,
jina_pod_name=self.name,
common_args=self.args,
deployment_args=self.deployment_args['head_deployment'],
pea_type=PeaRoleType.HEAD,
k8s_namespace=self.k8s_namespace,
k8s_connection_pool=self.k8s_connection_pool,
k8s_pod_addresses=self.k8s_pod_addresses,
)
self.worker_deployments = []
deployment_args = self.deployment_args['deployments']
for i, args in enumerate(deployment_args):
name = f'{self.name}-{i}' if len(deployment_args) > 1 else f'{self.name}'
self.worker_deployments.append(
self._K8sDeployment(
name=name,
version=get_base_executor_version(),
shard_id=i,
common_args=self.args,
deployment_args=args,
pea_type=PeaRoleType.WORKER
if name != 'gateway'
else PeaRoleType.GATEWAY,
jina_pod_name=self.name,
k8s_namespace=self.k8s_namespace,
k8s_connection_pool=self.k8s_connection_pool,
k8s_pod_addresses=self.k8s_pod_addresses
if name == 'gateway'
else None,
)
)
def _get_deployment_args(self, args):
parsed_args = {
'head_deployment': None,
'deployments': [],
}
shards = getattr(args, 'shards', 1)
uses_before = getattr(args, 'uses_before', None)
uses_after = getattr(args, 'uses_after', None)
if args.name != 'gateway':
parsed_args['head_deployment'] = BasePod._copy_to_head_args(self.args)
parsed_args['head_deployment'].port_in = K8sGrpcConnectionPool.K8S_PORT_IN
parsed_args['head_deployment'].uses = None
parsed_args['head_deployment'].uses_metas = None
parsed_args['head_deployment'].uses_with = None
parsed_args['head_deployment'].env = None
# if the k8s connection pool is disabled, the connection pool is managed manually
if not self.k8s_connection_pool:
import json
connection_list = {}
for i in range(shards):
name = (
f'{to_compatible_name(self.name)}-{i}'
if shards > 1
else f'{to_compatible_name(self.name)}'
)
connection_list[
str(i)
] = f'{name}.{self.k8s_namespace}.svc:{K8sGrpcConnectionPool.K8S_PORT_IN}'
parsed_args['head_deployment'].connection_list = json.dumps(
connection_list
)
if uses_before:
parsed_args[
'head_deployment'
].uses_before_address = (
f'127.0.0.1:{K8sGrpcConnectionPool.K8S_PORT_USES_BEFORE}'
)
if uses_after:
parsed_args[
'head_deployment'
].uses_after_address = (
f'127.0.0.1:{K8sGrpcConnectionPool.K8S_PORT_USES_AFTER}'
)
for i in range(shards):
cargs = copy.deepcopy(args)
cargs.shard_id = i
cargs.uses_before = None
cargs.uses_after = None
cargs.port_in = K8sGrpcConnectionPool.K8S_PORT_IN
cargs.uses_before_address = None
cargs.uses_after_address = None
if shards > 1:
cargs.name = f'{cargs.name}-{i}'
if args.name == 'gateway':
cargs.pea_role = PeaRoleType.GATEWAY
# the worker runtimes do not care
else:
cargs.k8s_connection_pool = False
parsed_args['deployments'].append(cargs)
return parsed_args
def to_k8s_yaml(
self,
) -> List[Tuple[str, List[Dict]]]:
"""
Return a list of dictionary configurations. One for each deployment in this Pod
.. # noqa: DAR201
.. # noqa: DAR101
"""
if self.name == 'gateway':
return [
(
'gateway',
self.worker_deployments[0].get_gateway_yamls(),
)
]
else:
deployments = [self.head_deployment]
deployments.extend(self.worker_deployments)
return [
(
deployment.dns_name,
deployment.get_runtime_yamls(),
)
for deployment in deployments
]
| 40.128342 | 94 | 0.567764 |
79579226606f93317586d77801582dc5014e6c84 | 2,230 | py | Python | jwt_auth/utils.py | vwrobel/django-jwt-auth | 12b14645cc29f0b2e5824ff7ff7911026de81cdc | [
"MIT"
] | 1 | 2019-08-27T09:38:45.000Z | 2019-08-27T09:38:45.000Z | jwt_auth/utils.py | vwrobel/django-jwt-auth | 12b14645cc29f0b2e5824ff7ff7911026de81cdc | [
"MIT"
] | null | null | null | jwt_auth/utils.py | vwrobel/django-jwt-auth | 12b14645cc29f0b2e5824ff7ff7911026de81cdc | [
"MIT"
] | 2 | 2016-10-19T11:38:47.000Z | 2019-08-27T09:38:48.000Z | from __future__ import unicode_literals
from datetime import datetime
import importlib
import jwt
def jwt_payload_handler(user):
from jwt_auth import settings
try:
username = user.get_username()
except AttributeError:
username = user.username
return {
'user_id': user.pk,
'email': user.email,
'username': username,
'exp': datetime.utcnow() + settings.JWT_EXPIRATION_DELTA
}
def jwt_get_user_id_from_payload_handler(payload):
"""
Override this function if user_id is formatted differently in payload
"""
user_id = payload.get('user_id')
return user_id
def jwt_encode_handler(payload):
from jwt_auth import settings
return jwt.encode(
payload,
settings.JWT_SECRET_KEY,
settings.JWT_ALGORITHM
).decode('utf-8')
def jwt_decode_handler(token):
from jwt_auth import settings
options = {
'verify_exp': settings.JWT_VERIFY_EXPIRATION,
}
return jwt.decode(
token,
settings.JWT_SECRET_KEY,
settings.JWT_VERIFY,
options=options,
leeway=settings.JWT_LEEWAY,
audience=settings.JWT_AUDIENCE
)
def import_from_string(val):
"""
Attempt to import a class from a string representation.
From: https://github.com/tomchristie/django-rest-framework/blob/master/rest_framework/settings.py
"""
try:
# Nod to tastypie's use of importlib.
parts = val.split('.')
module_path, class_name = '.'.join(parts[:-1]), parts[-1]
module = importlib.import_module(module_path)
return getattr(module, class_name)
except ImportError as e:
msg = "Could not import '%s' for setting. %s: %s." % (val, e.__class__.__name__, e)
raise ImportError(msg)
def get_authorization_header(request):
"""
Return request's 'Authorization:' header, as a bytestring.
From: https://github.com/tomchristie/django-rest-framework/blob/master/rest_framework/authentication.py
"""
auth = request.META.get('HTTP_AUTHORIZATION', b'')
if isinstance(auth, type('')):
# Work around django test client oddness
auth = auth.encode('iso-8859-1')
return auth
| 25.340909 | 107 | 0.662332 |
7957931e05f7ccb7e59239d281025ac9977b6650 | 24,271 | py | Python | PyCORe_main.py | zili1010/LLE-Simulation | 0faf51ec32b99e388b05311b39bc6349da966e87 | [
"MIT"
] | null | null | null | PyCORe_main.py | zili1010/LLE-Simulation | 0faf51ec32b99e388b05311b39bc6349da966e87 | [
"MIT"
] | null | null | null | PyCORe_main.py | zili1010/LLE-Simulation | 0faf51ec32b99e388b05311b39bc6349da966e87 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
from scipy.integrate import complex_ode,solve_ivp
import matplotlib.ticker as ticker
import matplotlib.colors as mcolors
from scipy.constants import pi, c, hbar
from matplotlib.widgets import Slider, Button, TextBox
from matplotlib.animation import FuncAnimation
import matplotlib.image as mpimg
from scipy.optimize import curve_fit
import time
from scipy.sparse import block_diag,identity,diags
class Resonator:
def __init__(self, resonator_parameters):
#Physical parameters initialization
self.n0 = resonator_parameters['n0']
self.n2 = resonator_parameters['n2']
self.FSR = resonator_parameters['FSR']
self.w0 = resonator_parameters['w0']
self.width = resonator_parameters['width']
self.height = resonator_parameters['height']
self.kappa_0 = resonator_parameters['kappa_0']
self.kappa_ex = resonator_parameters['kappa_ex']
self.Dint = np.fft.fftshift(resonator_parameters['Dint'])
#Auxiliary physical parameters
self.Tr = 1/self.FSR #round trip time
self.Aeff = self.width*self.height
self.Leff = c/self.n0*self.Tr
self.Veff = self.Aeff*self.Leff
self.g0 = hbar*self.w0**2*c*self.n2/self.n0**2/self.Veff
self.gamma = self.n2*self.w0/c/self.Aeff
self.kappa = self.kappa_0 + self.kappa_ex
self.N_points = len(self.Dint)
mu = np.fft.fftshift(np.arange(-self.N_points/2, self.N_points/2))
def func(x, a, b, c, d):
return a + x*b + c*x**2/2 + d*x**3/6
popt, pcov = curve_fit(func, mu, self.Dint)
self.D2 = popt[2]
self.D3 = popt[3]
def noise(self, a):
# return a*np.exp(1j*np.random.uniform(-1,1,self.N_points)*np.pi)
return a*(np.random.uniform(-1,1,self.N_points) + 1j*np.random.uniform(-1,1,self.N_points))
# Propagate Using the Step Adaptive Method
def Propagate_SAM(self, simulation_parameters,Seed,Pump):
start_time = time.time()
pump = np.sqrt(Pump/(hbar*self.w0))
seed = Seed*np.sqrt(2*self.g0/self.kappa)
T = simulation_parameters['slow_time']
abtol = simulation_parameters['absolute_tolerance']
reltol = simulation_parameters['relative_tolerance']
out_param = simulation_parameters['output']
nmax = simulation_parameters['max_internal_steps']
detuning = simulation_parameters['detuning_array']
eps = simulation_parameters['noise_level']
coupling = simulation_parameters['coupling_strength']
s = simulation_parameters['coupling_distance']
### renarmalization
T_rn = (self.kappa/2)*T
J = coupling*2/self.kappa
f0 = pump*np.sqrt(8*self.g0*self.kappa_ex/self.kappa**3)
print('f0^2 = ' + str(np.round(max(abs(f0)**2), 2)))
print('xi [' + str(detuning[0]*2/self.kappa) + ',' +str(detuning[-1]*2/self.kappa)+ ']')
noise_const = self.noise(eps) # set the noise level
nn = len(detuning)
mu = np.fft.fftshift(np.arange(-self.N_points/2, self.N_points/2,dtype=int))
### define the rhs function
def LLE_1d(Time, A):
A -= noise_const
A_dir = np.fft.ifft(A)*len(A)## in the direct space
dAdT = -1*(1 + 1j*(self.Dint + dOm_curr)*2/self.kappa)*A + 1j*np.fft.fft(A_dir*np.abs(A_dir)**2)/len(A) + f0#*len(A)
dAdT[mu] += 1j*J/2*(A[mu+s]*np.exp(-1j/2*self.D2*s*(2*mu+s)*Time ) + A[mu-s]*np.exp(1j/2*self.D2*s*(2*mu-s)*Time ))
return dAdT
t_st = float(T_rn)/len(detuning)
r = complex_ode(LLE_1d).set_integrator('dop853', atol=abtol, rtol=reltol,nsteps=nmax)# set the solver
r.set_initial_value(seed, 0)# seed the cavity
sol = np.ndarray(shape=(len(detuning), self.N_points), dtype='complex') # define an array to store the data
sol[0,:] = seed
#printProgressBar(0, nn, prefix = 'Progress:', suffix = 'Complete', length = 50, fill='elapsed time = ' + str((time.time() - start_time)) + ' s')
for it in range(1,len(detuning)):
self.printProgressBar(it + 1, nn, prefix = 'Progress:', suffix = 'Complete,', time='elapsed time = ' + '{:04.1f}'.format(time.time() - start_time) + ' s', length = 50)
#self.print('elapsed time = ', (time.time() - start_time))
dOm_curr = detuning[it] # detuning value
sol[it] = r.integrate(r.t+t_st)
if out_param == 'map':
return sol
elif out_param == 'fin_res':
return sol[-1, :]
else:
print ('wrong parameter')
def Propagate_SplitStep(self, simulation_parameters, Seed, Pump):
start_time = time.time()
pump = np.sqrt(Pump/(hbar*self.w0))
seed = Seed*np.sqrt(2*self.g0/self.kappa)
T = simulation_parameters['slow_time']
abtol = simulation_parameters['absolute_tolerance']
reltol = simulation_parameters['relative_tolerance']
out_param = simulation_parameters['output']
nmax = simulation_parameters['max_internal_steps']
detuning = simulation_parameters['detuning_array']
eps = simulation_parameters['noise_level']
coupling = simulation_parameters['coupling_strength']
s = simulation_parameters['coupling_distance']
### renarmalization
J = coupling*2/self.kappa
T_rn = (self.kappa/2)*T
f0 = pump*np.sqrt(8*self.g0*self.kappa_ex/self.kappa**3)
print('f0^2 = ' + str(np.round(max(abs(f0)**2), 2)))
print('xi [' + str(detuning[0]*2/self.kappa) + ',' +str(detuning[-1]*2/self.kappa)+ ']')
noise_const = self.noise(eps) # set the noise level
nn = len(detuning)
mu = np.fft.fftshift(np.arange(-self.N_points/2, self.N_points/2,dtype=int))
t_st = float(T_rn)/len(detuning)
dt=1e-3 #t_ph
sol = np.ndarray(shape=(len(detuning), self.N_points), dtype='complex') # define an array to store the data
sol[0,:] = (seed)
self.printProgressBar(0, nn, prefix = 'Progress:', suffix = 'Complete', length = 50)
for it in range(1,len(detuning)):
self.printProgressBar(it + 1, nn, prefix = 'Progress:', suffix = 'Complete,', time='elapsed time = ' + '{:04.1f}'.format(time.time() - start_time) + ' s', length = 50)
dOm_curr = detuning[it] # detuning value
t=0
buf = sol[it-1]
buf-=noise_const
while t<t_st:
buf_dir = np.fft.ifft(buf)*len(buf)## in the direct space
# First step
buf =buf + dt*(1j/len(buf)*np.fft.fft(buf_dir*np.abs(buf_dir)**2) + f0)
buf +=dt*1j*J/2*(buf[mu+s]*np.exp(-1j/2*self.D2*s*(2*mu+s)*t ) + buf[mu-s]*np.exp(1j/2*self.D2*s*(2*mu-s)*t ))
#buf += dt*1j*J/2*(buf[mu+s]*np.exp(-1j/2*self.D2*s*(2*mu+s)*t ) + buf[mu-s]*np.exp(1j/2*self.D2*s*(2*mu-s)*t ))
#second step
buf = np.exp(-dt *(1+1j*(self.Dint + dOm_curr)*2/self.kappa )) * buf
t+=dt
sol[it] = buf
if out_param == 'map':
return sol
elif out_param == 'fin_res':
return sol[-1, :]
else:
print ('wrong parameter')
def seed_level (self, pump, detuning):
f_norm = np.sqrt(pump/(hbar*self.w0))*np.sqrt(8*self.g0*self.kappa_ex/self.kappa**3)
detuning_norm = detuning*2/self.kappa
stat_roots = np.roots([1, -2*detuning_norm, (detuning_norm**2+1), -abs(f_norm[0])**2])
ind_roots = [np.imag(ii)==0 for ii in stat_roots]
res_seed = np.zeros_like(f_norm)
res_seed[0] = abs(stat_roots[ind_roots])**.5/np.sqrt(2*self.g0/self.kappa)
return res_seed
def seed_soliton(self, pump, detuning):
fast_t = np.linspace(-pi,pi,len(pump))*np.sqrt(self.kappa/2/self.D2)
f_norm = np.sqrt(pump/(hbar*self.w0))*np.sqrt(8*self.g0*self.kappa_ex/self.kappa**3)
detuning_norm = detuning*2/self.kappa
stat_roots = np.roots([1, -2*detuning_norm, (detuning_norm**2+1), -abs(f_norm[0])**2])
ind_roots = [np.imag(ii)==0 for ii in stat_roots]
B = np.sqrt(2*detuning_norm)
return np.fft.fft(np.min(np.abs(stat_roots[ind_roots]))**.5 + B*np.exp(1j*np.arccos(2*B/np.pi/f_norm[0])*2)*np.cosh(B*fast_t)**-1)/np.sqrt(2*self.g0/self.kappa)/len(pump)
def NeverStopSAM (self, T_step, detuning_0=-1, Pump_P=2., nmax=1000, abtol=1e-10, reltol=1e-9, out_param='fin_res'):
self.Pump = self.Pump/abs(self.Pump)
def deriv_1(dt, field_in):
# computes the first-order derivative of field_in
field_fft = np.fft.fft(field_in)
omega = 2.*np.pi*np.fft.fftfreq(len(field_in),dt)
out_field = np.fft.ifft(-1j*omega*field_fft)
return out_field
def deriv_2(dt, field_in):
# computes the second-order derivative of field_in
field_fft = np.fft.fft(field_in)
omega = 2.*np.pi*np.fft.fftfreq(len(field_in),dt)
field_fft *= -omega**2
out_field = np.fft.ifft(field_fft)
return out_field
def disp(field_in,Dint_in):
# computes the dispersion term in Fourier space
field_fft = np.fft.fft(field_in)
out_field = np.fft.ifft(Dint_in*field_fft)
return out_field
### define the rhs function
def LLE_1d(Z, A):
# for nomalized
if np.size(self.Dint)==1 and self.Dint == 1:
dAdt2 = deriv_2(self.TimeStep, A)
dAdT = 1j*dAdt2/2 + 1j*self.gamma*self.L/self.Tr*np.abs(A)**2*A - (self.kappa/2+1j*dOm_curr)*A + np.sqrt(self.kappa/2/self.Tr)*self.Pump*Pump_P**.5
elif np.size(self.Dint)==1 and self.Dint == -1:
dAdt2 = deriv_2(self.TimeStep, A)
dAdT = -1j*dAdt2/2 + 1j*self.gamma*self.L/self.Tr*np.abs(A)**2*A - (self.kappa/2+1j*dOm_curr)*A + np.sqrt(self.kappa/2/self.Tr)*self.Pump*Pump_P**.5
else:
# with out raman
Disp_int = disp(A,self.Dint)
if self.Traman==0:
dAdT = -1j*Disp_int + 1j*self.gamma*self.L/self.Tr*np.abs(A)**2*A - (self.kappa/2+1j*dOm_curr)*A + np.sqrt(self.kappa/2/self.Tr)*self.Pump*Pump_P**.5
else:
# with raman
dAAdt = deriv_1(self.TimeStep,abs(A)**2)
dAdT = -1j*Disp_int + 1j*self.gamma*self.L/self.Tr*np.abs(A)**2*A - (self.kappa/2+1j*dOm_curr)*A -1j*self.gamma*self.Traman*dAAdt*A + np.sqrt(self.kappa/2/self.Tr)*self.Pump*Pump_P**.5
return dAdT
r = complex_ode(LLE_1d).set_integrator('dopri5', atol=abtol, rtol=reltol,nsteps=nmax)# set the solver
r.set_initial_value(self.seed, 0)# seed the cavity
img = mpimg.imread('phase_space.png')
xx = np.linspace(-1,5,np.size(img,axis=1))
yy = np.linspace(11,0,np.size(img,axis=0))
XX,YY = np.meshgrid(xx,yy)
fig = plt.figure(figsize=(11,7))
plt.subplots_adjust(top=0.95,bottom=0.1,left=0.06,right=0.986,hspace=0.2,wspace=0.16)
ax1 = plt.subplot(221)
ax1.pcolormesh(XX,YY,img[:,:,1])
plt.xlabel('Detuning')
plt.ylabel('f^2')
plt.title('Choose the region')
plt.xlim(min(xx),max(xx))
dot = plt.plot(detuning_0, Pump_P,'rx')
ax2 = plt.subplot(222)
line, = plt.plot(abs(self.seed)**2)
plt.ylim(0,1.1)
plt.ylabel('$|\Psi|^2$')
ax3 = plt.subplot(224)
line2, = plt.semilogy(self.mu, np.abs(np.fft.fft(self.seed))**2)
plt.ylabel('PSD')
plt.xlabel('mode number')
### widjets
axcolor = 'lightgoldenrodyellow'
resetax = plt.axes([0.4, 0.025, 0.1, 0.04])
button = Button(resetax, 'Stop', color=axcolor, hovercolor='0.975')
axboxf = plt.axes([0.1, 0.35, 0.1, 0.075])
text_box_f = TextBox(axboxf, 'f^2', initial=str(Pump_P))
axboxd = plt.axes([0.1, 0.25, 0.1, 0.075])
text_box_d = TextBox(axboxd, 'Detuning', initial=str(detuning_0))
Run = True
def setup(event):
global Run
Run = False
button.on_clicked(setup)
def onclick(event):
if event.inaxes == ax1:
ix, iy = event.xdata, event.ydata
text_box_d.set_val(np.round(ix,4))
text_box_f.set_val(np.round(iy,4))
ax1.plot([ix],[iy],'rx')
fig.canvas.mpl_connect('button_press_event', onclick)
while Run:
dOm_curr = float(text_box_d.text) # get the detuning value
Pump_P = float(text_box_f.text)
Field = r.integrate(r.t+T_step)
F_mod_sq = np.abs(Field)**2
F_sp = np.abs(np.fft.fft(Field))**2
line.set_ydata(F_mod_sq)
line2.set_ydata(F_sp)
ax2.set_ylim(0, max(F_mod_sq))
ax3.set_ylim(min(F_sp),max(F_sp))
plt.pause(1e-10)
def printProgressBar (self, iteration, total, prefix = '', suffix = '', time = '', decimals = 1, length = 100, fill = '█', printEnd = "\r"):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
printEnd - Optional : end character (e.g. "\r", "\r\n") (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s %s' % (prefix, bar, percent, suffix, time), end = printEnd)
# Print New Line on Complete
if iteration == total:
print()
class CROW(Resonator):
def __init__(self, resonator_parameters):
#Physical parameters initialization
self.n0 = resonator_parameters['n0']
self.J = np.array(resonator_parameters['J'])
self.n2 = resonator_parameters['n2']
self.FSR = resonator_parameters['FSR']
self.w0 = resonator_parameters['w0']
self.width = resonator_parameters['width']
self.height = resonator_parameters['height']
self.kappa_0 = resonator_parameters['kappa_0']
self.kappa_ex = np.array(resonator_parameters['kappa_ex'])
self.N_CROW = len(self.kappa_ex)
self.Dint = np.fft.fftshift(np.array(resonator_parameters['Dint']),axes=1)
#Auxiliary physical parameters
self.Tr = 1/self.FSR #round trip time
self.Aeff = self.width*self.height
self.Leff = c/self.n0*self.Tr
self.Veff = self.Aeff*self.Leff
self.g0 = hbar*self.w0**2*c*self.n2/self.n0**2/self.Veff
self.gamma = self.n2*self.w0/c/self.Aeff
self.kappa = self.kappa_0 + self.kappa_ex
self.N_points = len(self.Dint[0])
mu = np.fft.fftshift(np.arange(-self.N_points/2, self.N_points/2))
### linear part matrix
DINT = np.reshape(np.multiply(self.Dint.T,2/self.kappa).T,(-1,self.Dint.size))[0]
self.L = diags(1j*DINT,0,dtype=complex)+identity(self.Dint.size,dtype=complex)
### coupling
JJ_up = np.reshape(np.multiply(np.multiply(self.J,np.exp(1j*mu*np.pi)).T,2/self.kappa[1:]).T,(-1,self.Dint.size-self.Dint[0].size))[0]
J_down = np.reshape(np.multiply(np.multiply(self.J,np.exp(-1j*mu*np.pi)).T,2/self.kappa[:-1]).T,(-1,self.Dint.size-self.Dint[0].size))[0]
self.C = diags(JJ_up, 1, dtype=complex) + diags(J_down, -1, dtype=complex)
print(self.C)
def SAM_CROW(self, simulation_parameters, Seed,Pump):
start_time = time.time()
pump = np.sqrt(Pump/(hbar*self.w0))
seed = np.reshape(np.multiply(np.reshape(Seed,(self.N_CROW,-1)).T, np.sqrt(self.g0*2/self.kappa)).T,(-1,self.Dint.size))[0]
T = simulation_parameters['slow_time']
abtol = simulation_parameters['absolute_tolerance']
reltol = simulation_parameters['relative_tolerance']
out_param = simulation_parameters['output']
nmax = simulation_parameters['max_internal_steps']
detuning = simulation_parameters['detuning_array']
eps = simulation_parameters['noise_level']
### renarmalization
T_rn = (self.kappa/2)*T
f0 = np.reshape(np.multiply(np.reshape(pump,(self.N_CROW,-1)).T,np.sqrt(8*self.g0*self.kappa_ex/self.kappa**3)).T, (-1,self.Dint.size))[0]
print('f0^2 = ' + str(np.round(max(abs(f0)**2), 2)))
print('xi [' + str(detuning[0]*2/self.kappa) + ',' +str(detuning[-1]*2/self.kappa)+ ']')
noise_const = self.noise(eps) # set the noise level
nn = len(detuning)
### define the rhs function
def LLE_1d(Time, A):
A -= noise_const
A_dir = np.reshape(np.fft.ifft(np.reshape(A, (-1, self.N_points)),axes=1), (1,-1))*self.N_points# in the direct space
# dAdT = -1*(1 + 1j*(self.Dint + dOm_curr)*2/self.kappa)*A + 1j*np.fft.fft(A_dir*np.abs(A_dir)**2)/len(A) + f0
dAdT = self.L.dot(A) + dOm_curr*2/self.kappa.dot(A) + self.C.dot(A)+ np.abs(A_dir)**2*A + f0 ### apply repeat to kappa
return dAdT
t_st = float(T_rn)/len(detuning)
r = complex_ode(LLE_1d).set_integrator('dop853', atol=abtol, rtol=reltol,nsteps=nmax)# set the solver
r.set_initial_value(seed, 0)# seed the cavity
sol = np.ndarray(shape=(len(detuning), self.N_points), dtype='complex') # define an array to store the data
sol[0,:] = seed
#printProgressBar(0, nn, prefix = 'Progress:', suffix = 'Complete', length = 50, fill='elapsed time = ' + str((time.time() - start_time)) + ' s')
for it in range(1,len(detuning)):
self.printProgressBar(it + 1, nn, prefix = 'Progress:', suffix = 'Complete,', time='elapsed time = ' + '{:04.1f}'.format(time.time() - start_time) + ' s', length = 50)
dOm_curr = detuning[it] # detuning value
sol[it] = r.integrate(r.t+t_st)
if out_param == 'map':
return sol
elif out_param == 'fin_res':
return sol[-1, :]
else:
print ('wrong parameter')
class Lattice(Resonator):
pass
def Plot_Map(map_data,dt=1,dz=1,colormap = 'cubehelix',z0=0):
def shiftedColorMap(cmap, start=0, midpoint=0.5, stop=1.0, name='shiftedcmap'):
'''
Function to offset the "center" of a colormap. Useful for
data with a negative min and positive max and you want the
middle of the colormap's dynamic range to be at zero
Input
-----
cmap : The matplotlib colormap to be altered
start : Offset from lowest point in the colormap's range.
Defaults to 0.0 (no lower ofset). Should be between
0.0 and `midpoint`.
midpoint : The new center of the colormap. Defaults to
0.5 (no shift). Should be between 0.0 and 1.0. In
general, this should be 1 - vmax/(vmax + abs(vmin))
For example if your data range from -15.0 to +5.0 and
you want the center of the colormap at 0.0, `midpoint`
should be set to 1 - 5/(5 + 15)) or 0.75
stop : Offset from highets point in the colormap's range.
Defaults to 1.0 (no upper ofset). Should be between
`midpoint` and 1.0.
'''
cdict = {
'red': [],
'green': [],
'blue': [],
'alpha': []
}
# regular index to compute the colors
reg_index = np.linspace(start, stop, 257)
# shifted index to match the data
shift_index = np.hstack([
np.linspace(0.0, midpoint, 128, endpoint=False),
np.linspace(midpoint, 1.0, 129, endpoint=True)
])
for ri, si in zip(reg_index, shift_index):
r, g, b, a = cmap(ri)
cdict['red'].append((si, r, r))
cdict['green'].append((si, g, g))
cdict['blue'].append((si, b, b))
cdict['alpha'].append((si, a, a))
newcmap = mcolors.LinearSegmentedColormap(name, cdict)
plt.register_cmap(cmap=newcmap)
return newcmap
def onclick(event):
ix, iy = event.xdata, event.ydata
x = int(np.floor(ix/dz))
plt.suptitle('Chosen distance z = %f km'%ix, fontsize=20)
ax.lines.pop(0)
ax.plot([ix,ix], [0, dt*np.size(map_data,1)],'r')
ax2 = plt.subplot2grid((4, 1), (2, 0))
ax2.plot(np.arange(0,dt*np.size(map_data,1),dt), abs(map_data[x,:])**2, 'r')
ax2.set_ylabel('Power (W)')
ax2.set_xlim(0, dt*np.size(map_data,1))
ax3 = plt.subplot2grid((4, 1), (3, 0))
ax3.plot(np.arange(0,dt*np.size(map_data,1),dt), np.angle(map_data[x,:])/(np.pi),'b')
if max( np.unwrap(np.angle(map_data[x,:]))/(np.pi)) - min( np.unwrap(np.angle(map_data[x,:]))/(np.pi))<10:
ax3.plot(np.arange(0,dt*np.size(map_data,1),dt), np.unwrap(np.angle(map_data[x,:]))/(np.pi),'g')
ax3.set_xlabel('Time (ps)')
ax3.set_ylabel('Phase (rad)')
ax3.set_xlim(0, dt*np.size(map_data,1))
ax3.yaxis.set_major_locator(ticker.MultipleLocator(base=1.0))
ax3.yaxis.set_major_formatter(ticker.FormatStrFormatter('%g $\pi$'))
ax3.grid(True)
plt.show()
f.canvas.draw()
f = plt.figure()
ax = plt.subplot2grid((4, 1), (0, 0), rowspan=2)
plt.suptitle('Choose the coordinate', fontsize=20)
f.set_size_inches(10,8)
Z,T = np.meshgrid( np.arange(0,dz*np.size(map_data,0),dz), np.arange(0, dt*np.size(map_data,1),dt))
# orig_cmap = plt.get_cmap('viridis')
# colormap = shiftedColorMap(orig_cmap, start=0., midpoint=.5, stop=1., name='shrunk')
pc = ax.pcolormesh(Z, T, abs(np.transpose(map_data))**2, cmap=colormap)
ax.plot([0, 0], [0, dt*np.size(map_data,1)-dt], 'r')
ax.set_xlabel('Distance (km)')
ax.set_ylabel('Time (ps)')
ax.set_ylim(0, dt*np.size(map_data,1))
ax.set_xlim(0, dz*np.size(map_data,0)-5*dz)
ix=z0
x = int(np.floor(ix/dz))
plt.suptitle('Chosen distance z = %f km'%ix, fontsize=20)
ax.lines.pop(0)
ax.plot([ix,ix], [0, dt*np.size(map_data,1)],'r')
ax2 = plt.subplot2grid((4, 1), (2, 0))
ax2.plot(np.arange(0,dt*np.size(map_data,1),dt), abs(map_data[x,:])**2, 'r')
ax2.set_ylabel('Power (W)')
ax2.set_xlim(0, dt*np.size(map_data,1))
ax3 = plt.subplot2grid((4, 1), (3, 0))
ax3.plot(np.arange(0,dt*np.size(map_data,1),dt), np.angle(map_data[x,:])/(np.pi),'b')
if max( np.unwrap(np.angle(map_data[x,:]))/(np.pi)) - min( np.unwrap(np.angle(map_data[x,:]))/(np.pi))<10:
ax3.plot(np.arange(0,dt*np.size(map_data,1),dt), np.unwrap(np.angle(map_data[x,:]))/(np.pi),'g')
ax3.set_xlabel('Time (ps)')
ax3.set_ylabel('Phase (rad)')
ax3.set_xlim(0, dt*np.size(map_data,1))
ax3.yaxis.set_major_locator(ticker.MultipleLocator(base=1.0))
ax3.yaxis.set_major_formatter(ticker.FormatStrFormatter('%g $\pi$'))
ax3.grid(True)
# f.colorbar(pc)
plt.subplots_adjust(left=0.07, bottom=0.07, right=0.95, top=0.93, wspace=None, hspace=0.4)
f.canvas.mpl_connect('button_press_event', onclick)
"""
here is a set of useful standard functions
"""
if __name__ == '__main__':
print('PyCORe')
| 47.497065 | 205 | 0.579787 |
7957931ecf7d30fd7b4bc0773fdc6c1009bb7684 | 6,248 | py | Python | src/dataloaders/mnist.py | maharshi95/GANTree | 5541c5fb0ba3d856081c03f37870a85fdd654681 | [
"MIT"
] | 16 | 2019-09-17T00:25:49.000Z | 2021-11-04T06:49:05.000Z | src/dataloaders/mnist.py | maharshi95/GANTree | 5541c5fb0ba3d856081c03f37870a85fdd654681 | [
"MIT"
] | 6 | 2020-01-28T22:55:51.000Z | 2022-02-10T00:20:41.000Z | src/dataloaders/mnist.py | maharshi95/GANTree | 5541c5fb0ba3d856081c03f37870a85fdd654681 | [
"MIT"
] | 3 | 2019-08-21T06:47:05.000Z | 2020-07-19T00:33:13.000Z | from __future__ import absolute_import
import torch as tr
from base.dataloader import BaseDataLoader
from torchvision.datasets import MNIST, FashionMNIST
from torch.utils.data import Dataset
import torchvision.transforms as transforms
import numpy as np
class MnistDataLoader(BaseDataLoader):
def __init__(self, img_size = 2, train_batch_size=64, test_batch_size=64, get_tensor=True, supervised=True, classes = None):
super(MnistDataLoader, self).__init__(img_size, None, train_batch_size, test_batch_size, get_tensor,
supervised, classes)
def get_data(self):
train_dataset = MNIST('../data/mnist', train=True, download=True,
transform=transforms.Compose([
transforms.Resize(self.img_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]))
test_dataset = MNIST('../data/mnist', train=False, download=True,
transform=transforms.Compose([
transforms.Resize(self.img_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]))
train_data = np.array([x[0].numpy() for x in train_dataset])
train_labels = np.array([x[1].numpy() for x in train_dataset])
test_data = np.array([x[0].numpy() for x in test_dataset])
test_labels = np.array([x[1].numpy() for x in test_dataset])
if self.classes:
train_data = train_data[np.where(np.isin(train_labels, self.classes))]
train_labels = train_labels[np.where(np.isin(train_labels, self.classes))]
test_data = test_data[np.where(np.isin(test_labels, self.classes))]
test_labels = test_labels[np.where(np.isin(test_labels, self.classes))]
return train_data, test_data, train_labels, test_labels
class FashionMnistDataLoader(BaseDataLoader):
def __init__(self, train_batch_size=32, test_batch_size=32, get_tensor=True):
super(FashionMnistDataLoader, self).__init__((28, 28), None, train_batch_size, test_batch_size, get_tensor,
supervised=True)
def get_data(self):
FashionMNIST('../data/fashion', download=True)
train_data, train_labels = tr.load('../data/fashion/processed/training.pt')
test_data, test_labels = tr.load('../data/fashion/processed/test.pt')
train_data = normalize_mnist_images(train_data)
test_data = normalize_mnist_images(test_data)
return train_data, test_data, train_labels, test_labels
class MixedMnistDataLoader(BaseDataLoader):
def __init__(self, img_size = 2, train_batch_size=64, test_batch_size=64, get_tensor=True, supervised=True, classes = None):
super(MixedMnistDataLoader, self).__init__(img_size, None, train_batch_size, test_batch_size, get_tensor,
supervised, classes)
def get_data(self):
mnist_train_dataset = MNIST('../data/mnist', train=True, download=True,
transform=transforms.Compose([
transforms.Resize(self.img_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]))
mnist_test_dataset = MNIST('../data/mnist', train=False, download=True,
transform=transforms.Compose([
transforms.Resize(self.img_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]))
mnist_train_data = np.array([x[0].numpy() for x in mnist_train_dataset])
mnist_train_labels = np.array([x[1].numpy() for x in mnist_train_dataset])
mnist_test_data = np.array([x[0].numpy() for x in mnist_test_dataset])
mnist_test_labels = np.array([x[1].numpy() for x in mnist_test_dataset])
fashion_train_dataset = FashionMNIST('../data/fashion', train = True, download=True,
transform=transforms.Compose([
transforms.Resize(self.img_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]))
fashion_test_dataset = FashionMNIST('../data/fashion', train = False, download=True,
transform=transforms.Compose([
transforms.Resize(self.img_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]))
fashion_train_data = np.array([x[0].numpy() for x in fashion_train_dataset])
fashion_train_labels = np.array([x[1].numpy() for x in fashion_train_dataset])
fashion_test_data = np.array([x[0].numpy() for x in fashion_test_dataset])
fashion_test_labels = np.array([x[1].numpy() for x in fashion_test_dataset])
train_data = np.concatenate((mnist_train_data, fashion_train_data))
train_labels = np.concatenate((mnist_train_labels, 10 + fashion_train_labels))
test_data = np.concatenate((mnist_test_data, fashion_test_data))
test_labels = np.concatenate((mnist_test_labels, 10 + fashion_test_labels))
return train_data, test_data, train_labels, test_labels
| 54.807018 | 128 | 0.540493 |
7957932eded2f3cf164eccf231970c3f3336f74d | 4,141 | py | Python | webproctor/settings.py | mrabhi05/webproctor | a5da4d909f71b3f7b50b00727edbdf52483451d1 | [
"MIT"
] | null | null | null | webproctor/settings.py | mrabhi05/webproctor | a5da4d909f71b3f7b50b00727edbdf52483451d1 | [
"MIT"
] | null | null | null | webproctor/settings.py | mrabhi05/webproctor | a5da4d909f71b3f7b50b00727edbdf52483451d1 | [
"MIT"
] | null | null | null | """
Django settings for webproctor project.
Generated by 'django-admin startproject' using Django 3.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATE_DIR = os.path.join(BASE_DIR,'templates')
STATIC_DIR=os.path.join(BASE_DIR,'static')
MEDIA_ROOT=os.path.join(BASE_DIR,'static')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@k0#p3kidu)yaaa3u1hplxz)f@^6xiy384*(+n@@s5x#1bx@m5'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'quiz',
'teacher',
'student',
'widget_tweaks',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
#'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
CSRF_COOKIE_SECURE=False
ROOT_URLCONF = 'webproctor.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR,],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'webproctor.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS=[
STATIC_DIR,
]
LOGIN_REDIRECT_URL='/afterlogin'
#for contact us give your gmail id and password
EMAIL_BACKEND ='django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_USE_TLS = True
EMAIL_PORT = 587
EMAIL_HOST_USER = 'aajminiproject@gmail.com' # this email will be used to send emails
EMAIL_HOST_PASSWORD = 'aajminiproject321' # host email password required
# now sign in with your host gmail account in your browser
# open following link and turn it ON
# https://myaccount.google.com/lesssecureapps
# otherwise you will get SMTPAuthenticationError at /contactus
# this process is required because google blocks apps authentication by default
EMAIL_RECEIVING_USER = ['aajminiproject@gmail.com'] # email on which you will receive messages sent from website
| 27.97973 | 112 | 0.719391 |
795793318a6ae2d56f6b7b082a6739b58462a18b | 974 | py | Python | start_instances.py | danilo-lopes/terra-ec2-cron | 47d92b71f30d740da8d4ca1fd3a3c47b33aa3d5c | [
"MIT"
] | null | null | null | start_instances.py | danilo-lopes/terra-ec2-cron | 47d92b71f30d740da8d4ca1fd3a3c47b33aa3d5c | [
"MIT"
] | null | null | null | start_instances.py | danilo-lopes/terra-ec2-cron | 47d92b71f30d740da8d4ca1fd3a3c47b33aa3d5c | [
"MIT"
] | null | null | null | import boto3
def lambda_handler(event, context):
ec2 = boto3.resource('ec2')
# Filtering instances by their tags
stoppedInstancesFilter = [
{
'Name': 'tag:poweron',
'Values': ['true']
},
{
'Name': 'tag:system',
'Values': ['unitetech']
},
{
'Name': 'instance-state-name',
'Values': ['stopped']
}
]
getInstances = ec2.instances.filter(Filters=stoppedInstancesFilter)
# List comprehension to export instances ids
instances = [instance.id for instance in getInstances]
if instances:
try:
ec2.instances.filter(InstanceIds=instances).start()
print(f'Powered on Instances: {instances}')
return
except Exception as erro:
print(f'Couldnt Poweron the Instances: {erro}')
return
print('None Existent Instances to Poweron')
return
| 22.651163 | 71 | 0.550308 |
79579381707eee4ab5c265ff5b7cd29ab23ec347 | 384 | py | Python | SIA Model 50/SIA_PICKLE_DUMPER.py | SergentLime/SIA---Smart-Interactive-App | e7afedf01519bca4750101a67f12e2081d4cf171 | [
"Apache-2.0"
] | null | null | null | SIA Model 50/SIA_PICKLE_DUMPER.py | SergentLime/SIA---Smart-Interactive-App | e7afedf01519bca4750101a67f12e2081d4cf171 | [
"Apache-2.0"
] | null | null | null | SIA Model 50/SIA_PICKLE_DUMPER.py | SergentLime/SIA---Smart-Interactive-App | e7afedf01519bca4750101a67f12e2081d4cf171 | [
"Apache-2.0"
] | 1 | 2019-05-12T09:50:19.000Z | 2019-05-12T09:50:19.000Z | import pickle
# Dump data in pickle files of S.I.R.
# Created by GreenGames
# Place name of file in between quote marks
FILE = ".pkl"
# Place Temporary Code Here
# Dumping
with open(FILE, 'wb') as fileObject:
pickle.dump('', fileObject, pickle.HIGHEST_PROTOCOL)
fileObject.close()
# Loading to Check
with open(FILE, 'rb') as fileObject:
print(pickle.load(fileObject))
| 21.333333 | 56 | 0.716146 |
795795f6bc769dca7fc2a583f145579be82b9043 | 2,762 | py | Python | quex/engine/state_machine/algorithm/TEST/test-nfa-to-dfa.py | smmckay/quex-mirror | 7d75ed560e9f3a591935e59243188676eecb112a | [
"MIT"
] | null | null | null | quex/engine/state_machine/algorithm/TEST/test-nfa-to-dfa.py | smmckay/quex-mirror | 7d75ed560e9f3a591935e59243188676eecb112a | [
"MIT"
] | null | null | null | quex/engine/state_machine/algorithm/TEST/test-nfa-to-dfa.py | smmckay/quex-mirror | 7d75ed560e9f3a591935e59243188676eecb112a | [
"MIT"
] | null | null | null | #! /usr/bin/env python
import sys
import os
sys.path.insert(0, os.environ["QUEX_PATH"])
from quex.engine.state_machine.TEST_help.some_dfas import *
from quex.engine.state_machine.core import *
import quex.engine.state_machine.construction.repeat as repeat
import quex.engine.state_machine.algorithm.nfa_to_dfa as nfa_to_dfa
if "--hwut-info" in sys.argv:
print "NFA: Conversion to DFA (subset construction)"
sys.exit(0)
print "_______________________________________________________________________________"
print "Example A:"
sm = DFA()
n0 = sm.init_state_index
n1 = sm.add_transition(n0, ord('a'), AcceptanceF=True)
sm = repeat.do(sm, 1)
dfa = nfa_to_dfa.do(sm)
print dfa
print "_______________________________________________________________________________"
print "Example B:"
sm = DFA()
n0 = sm.init_state_index
n1 = sm.add_transition(n0, ord('a'), AcceptanceF=True)
sm = repeat.do(sm)
dfa = nfa_to_dfa.do(sm)
print dfa
print "_______________________________________________________________________________"
print "Example C:"
# (*) create a simple state machine:
# ,--<------------ eps ------------------.
# / \
# | ,- eps -->(4)-- 'b' -->(5)-- eps -. |
# \ / \ /
# (0)-- 'a' -->(1)-- eps -->(2)-- eps -->(3) (8)-- eps -->((9))
# \ \ / /
# \ '- eps -->(6)-- 'c' -->(7)-- eps -' /
# \ /
# '----------------------- eps ----------->---------------'
#
# ((9)) is the acceptance state.
#
sm = DFA()
n0 = sm.init_state_index
n1 = sm.add_transition(n0, ord('a'))
n2 = sm.add_epsilon_transition(n1)
n3 = sm.add_epsilon_transition(n2)
#
n4 = sm.add_epsilon_transition(n3)
n5 = sm.add_transition(n4, ord('b'))
#
n6 = sm.add_epsilon_transition(n3)
n7 = sm.add_transition(n6, ord('c'))
n8 = sm.add_epsilon_transition(n7)
#
sm.add_epsilon_transition(n5, n8)
#
n9 = sm.add_epsilon_transition(n8, RaiseAcceptanceF=True)
#
sm.add_epsilon_transition(n2, n9)
sm.add_epsilon_transition(n8, n3)
# (*) create the DFA from the specified NFA
dfa = nfa_to_dfa.do(sm)
print dfa
print "_______________________________________________________________________________"
print "Example D:"
tmp = repeat.do(sm3)
## print tmp.get_string(NormalizeF=False)
dfa = nfa_to_dfa.do(tmp)
print dfa
| 34.525 | 100 | 0.546705 |
79579631495458ddd6051d5e9e59bb5a7f87b5e1 | 193 | py | Python | Chapter10/named_tuple_with_list_values.py | kaushalkumarshah/Learn-Python-in-7-Days | 2663656767c8959ace836f0c0e272f3e501bbe6e | [
"MIT"
] | 12 | 2018-07-09T16:20:31.000Z | 2022-03-21T22:52:15.000Z | Chapter10/named_tuple_with_list_values.py | kaushalkumarshah/Learn-Python-in-7-Days | 2663656767c8959ace836f0c0e272f3e501bbe6e | [
"MIT"
] | null | null | null | Chapter10/named_tuple_with_list_values.py | kaushalkumarshah/Learn-Python-in-7-Days | 2663656767c8959ace836f0c0e272f3e501bbe6e | [
"MIT"
] | 19 | 2018-01-09T12:49:06.000Z | 2021-11-23T08:05:55.000Z | import collections
employee = collections.namedtuple('emp','name, age, empid')
list1 = ['BOB', 21, 34567]
record2 =employee._make(list1)
print record2
print "\n"
print (record2._asdict()) | 27.571429 | 60 | 0.715026 |
7957974ae2a4771447f73df0ca439b67869da3ba | 861 | py | Python | pqdm/threads.py | dangercrow/pqdm | d840131048ec609c0c4184d497d5f9f3187d855b | [
"MIT"
] | 129 | 2020-03-18T13:42:39.000Z | 2022-03-28T10:32:53.000Z | pqdm/threads.py | dangercrow/pqdm | d840131048ec609c0c4184d497d5f9f3187d855b | [
"MIT"
] | 139 | 2020-03-11T23:25:16.000Z | 2022-03-18T18:00:42.000Z | pqdm/threads.py | dangercrow/pqdm | d840131048ec609c0c4184d497d5f9f3187d855b | [
"MIT"
] | 11 | 2020-05-04T00:56:45.000Z | 2021-12-17T11:33:08.000Z | from concurrent.futures import ThreadPoolExecutor
from typing import Any, Callable, Iterable, Optional, Union
from bounded_pool_executor import BoundedThreadPoolExecutor
from typing_extensions import Literal
from pqdm._base import _parallel_process
def pqdm(
array: Iterable[Any],
function: Callable[[Any], Any],
n_jobs: int,
argument_type: Optional[Union[Literal['kwargs'], Literal['args']]] = None,
bounded: bool = False,
exception_behaviour: Union[Literal['ignore'], Literal['immediate'], Literal['deferred']] = 'ignore',
**kwargs
):
return _parallel_process(
iterable=array,
function=function,
argument_type=argument_type,
n_jobs=n_jobs,
executor=BoundedThreadPoolExecutor if bounded else ThreadPoolExecutor,
exception_behaviour=exception_behaviour,
**kwargs
)
| 30.75 | 104 | 0.721254 |
795797a1ae86ce99b3d6bae81d3ea93777243ad2 | 9,550 | py | Python | train_demo.py | md-experiments/FewRel | a91c0a12ccb35c422d58b51231657806fcb14dea | [
"MIT"
] | null | null | null | train_demo.py | md-experiments/FewRel | a91c0a12ccb35c422d58b51231657806fcb14dea | [
"MIT"
] | null | null | null | train_demo.py | md-experiments/FewRel | a91c0a12ccb35c422d58b51231657806fcb14dea | [
"MIT"
] | null | null | null | from fewshot_re_kit.data_loader import get_loader, get_loader_pair, get_loader_unsupervised
from fewshot_re_kit.framework import FewShotREFramework
from fewshot_re_kit.sentence_encoder import CNNSentenceEncoder, BERTSentenceEncoder, BERTPAIRSentenceEncoder, RobertaSentenceEncoder, RobertaPAIRSentenceEncoder
import models
from models.proto import Proto
from models.gnn import GNN
from models.snail import SNAIL
from models.metanet import MetaNet
from models.siamese import Siamese
from models.pair import Pair
from models.d import Discriminator
import sys
import torch
from torch import optim, nn
import numpy as np
import json
import argparse
import os
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--train', default='train_wiki',
help='train file')
parser.add_argument('--val', default='val_wiki',
help='val file')
parser.add_argument('--test', default='test_wiki',
help='test file')
parser.add_argument('--adv', default=None,
help='adv file')
parser.add_argument('--trainN', default=10, type=int,
help='N in train')
parser.add_argument('--N', default=5, type=int,
help='N way')
parser.add_argument('--K', default=5, type=int,
help='K shot')
parser.add_argument('--Q', default=5, type=int,
help='Num of query per class')
parser.add_argument('--batch_size', default=4, type=int,
help='batch size')
parser.add_argument('--train_iter', default=30000, type=int,
help='num of iters in training')
parser.add_argument('--val_iter', default=1000, type=int,
help='num of iters in validation')
parser.add_argument('--test_iter', default=10000, type=int,
help='num of iters in testing')
parser.add_argument('--val_step', default=2000, type=int,
help='val after training how many iters')
parser.add_argument('--model', default='proto',
help='model name')
parser.add_argument('--encoder', default='cnn',
help='encoder: cnn or bert or roberta')
parser.add_argument('--max_length', default=128, type=int,
help='max length')
parser.add_argument('--lr', default=1e-1, type=float,
help='learning rate')
parser.add_argument('--weight_decay', default=1e-5, type=float,
help='weight decay')
parser.add_argument('--dropout', default=0.0, type=float,
help='dropout rate')
parser.add_argument('--na_rate', default=0, type=int,
help='NA rate (NA = Q * na_rate)')
parser.add_argument('--grad_iter', default=1, type=int,
help='accumulate gradient every x iterations')
parser.add_argument('--optim', default='sgd',
help='sgd / adam / adamw')
parser.add_argument('--hidden_size', default=230, type=int,
help='hidden size')
parser.add_argument('--load_ckpt', default=None,
help='load ckpt')
parser.add_argument('--save_ckpt', default=None,
help='save ckpt')
parser.add_argument('--fp16', action='store_true',
help='use nvidia apex fp16')
parser.add_argument('--only_test', action='store_true',
help='only test')
# only for bert / roberta
parser.add_argument('--pair', action='store_true',
help='use pair model')
parser.add_argument('--pretrain_ckpt', default=None,
help='bert / roberta pre-trained checkpoint')
parser.add_argument('--cat_entity_rep', action='store_true',
help='concatenate entity representation as sentence rep')
# only for prototypical networks
parser.add_argument('--dot', action='store_true',
help='use dot instead of L2 distance for proto')
# experiment
parser.add_argument('--mask_entity', action='store_true',
help='mask entity names')
opt = parser.parse_args()
trainN = opt.trainN
N = opt.N
K = opt.K
Q = opt.Q
batch_size = opt.batch_size
model_name = opt.model
encoder_name = opt.encoder
max_length = opt.max_length
print("{}-way-{}-shot Few-Shot Relation Classification".format(N, K))
print("model: {}".format(model_name))
print("encoder: {}".format(encoder_name))
print("max_length: {}".format(max_length))
if encoder_name == 'cnn':
try:
glove_mat = np.load('./pretrain/glove/glove_mat.npy')
glove_word2id = json.load(open('./pretrain/glove/glove_word2id.json'))
except:
raise Exception("Cannot find glove files. Run glove/download_glove.sh to download glove files.")
sentence_encoder = CNNSentenceEncoder(
glove_mat,
glove_word2id,
max_length)
elif encoder_name == 'bert':
pretrain_ckpt = opt.pretrain_ckpt or 'bert-base-uncased'
if opt.pair:
sentence_encoder = BERTPAIRSentenceEncoder(
pretrain_ckpt,
max_length)
else:
sentence_encoder = BERTSentenceEncoder(
pretrain_ckpt,
max_length,
cat_entity_rep=opt.cat_entity_rep,
mask_entity=opt.mask_entity)
elif encoder_name == 'roberta':
pretrain_ckpt = opt.pretrain_ckpt or 'roberta-base'
if opt.pair:
sentence_encoder = RobertaPAIRSentenceEncoder(
pretrain_ckpt,
max_length)
else:
sentence_encoder = RobertaSentenceEncoder(
pretrain_ckpt,
max_length,
cat_entity_rep=opt.cat_entity_rep)
else:
raise NotImplementedError
if opt.pair:
train_data_loader = get_loader_pair(opt.train, sentence_encoder,
N=trainN, K=K, Q=Q, na_rate=opt.na_rate, batch_size=batch_size, encoder_name=encoder_name)
val_data_loader = get_loader_pair(opt.val, sentence_encoder,
N=N, K=K, Q=Q, na_rate=opt.na_rate, batch_size=batch_size, encoder_name=encoder_name)
test_data_loader = get_loader_pair(opt.val, sentence_encoder,
N=N, K=K, Q=Q, na_rate=opt.na_rate, batch_size=batch_size, encoder_name=encoder_name)
else:
train_data_loader = get_loader(opt.train, sentence_encoder,
N=trainN, K=K, Q=Q, na_rate=opt.na_rate, batch_size=batch_size)
val_data_loader = get_loader(opt.val, sentence_encoder,
N=N, K=K, Q=Q, na_rate=opt.na_rate, batch_size=batch_size)
test_data_loader = get_loader(opt.val, sentence_encoder,
N=N, K=K, Q=Q, na_rate=opt.na_rate, batch_size=batch_size)
if opt.adv:
adv_data_loader = get_loader_unsupervised(opt.adv, sentence_encoder,
N=trainN, K=K, Q=Q, na_rate=opt.na_rate, batch_size=batch_size)
if opt.optim == 'sgd':
pytorch_optim = optim.SGD
elif opt.optim == 'adam':
pytorch_optim = optim.Adam
elif opt.optim == 'adamw':
from transformers import AdamW
pytorch_optim = AdamW
else:
raise NotImplementedError
if opt.adv:
d = Discriminator(opt.hidden_size)
framework = FewShotREFramework(train_data_loader, val_data_loader, test_data_loader, adv_data_loader, adv=opt.adv, d=d)
else:
framework = FewShotREFramework(train_data_loader, val_data_loader, test_data_loader)
prefix = '-'.join([model_name, encoder_name, opt.train, opt.val, str(N), str(K)])
if opt.adv is not None:
prefix += '-adv_' + opt.adv
if opt.na_rate != 0:
prefix += '-na{}'.format(opt.na_rate)
if opt.dot:
prefix += '-dot'
if opt.cat_entity_rep:
prefix += '-catentity'
if model_name == 'proto':
model = Proto(sentence_encoder, dot=opt.dot)
elif model_name == 'gnn':
model = GNN(sentence_encoder, N, hidden_size=opt.hidden_size)
elif model_name == 'snail':
model = SNAIL(sentence_encoder, N, K, hidden_size=opt.hidden_size)
elif model_name == 'metanet':
model = MetaNet(N, K, sentence_encoder.embedding, max_length)
elif model_name == 'siamese':
model = Siamese(sentence_encoder, hidden_size=opt.hidden_size, dropout=opt.dropout)
elif model_name == 'pair':
model = Pair(sentence_encoder, hidden_size=opt.hidden_size)
else:
raise NotImplementedError
if not os.path.exists('checkpoint'):
os.mkdir('checkpoint')
ckpt = 'checkpoint/{}.pth.tar'.format(prefix)
if opt.save_ckpt:
ckpt = opt.save_ckpt
if torch.cuda.is_available():
model.cuda()
if not opt.only_test:
if encoder_name in ['bert', 'roberta']:
bert_optim = True
else:
bert_optim = False
framework.train(model, prefix, batch_size, trainN, N, K, Q,
pytorch_optim=pytorch_optim, load_ckpt=opt.load_ckpt, save_ckpt=ckpt,
na_rate=opt.na_rate, val_step=opt.val_step, fp16=opt.fp16, pair=opt.pair,
train_iter=opt.train_iter, val_iter=opt.val_iter, bert_optim=bert_optim)
else:
ckpt = opt.load_ckpt
if ckpt is None:
print("Warning: --load_ckpt is not specified. Will load Hugginface pre-trained checkpoint.")
ckpt = 'none'
acc = framework.eval(model, batch_size, N, K, Q, opt.test_iter, na_rate=opt.na_rate, ckpt=ckpt, pair=opt.pair)
print("RESULT: %.2f" % (acc * 100))
if __name__ == "__main__":
main()
| 41.163793 | 160 | 0.635602 |
7957983330a7ce14d77c9ed95ea06bb98edbd3fc | 7,259 | py | Python | data/MuPoTS/MuPoTS.py | Arthur151/3DMPPE_POSENET_RELEASE | 49b71fec03fcb646f1c0e00515dfb2441c41e26b | [
"MIT"
] | 1 | 2021-03-18T09:40:01.000Z | 2021-03-18T09:40:01.000Z | data/MuPoTS/MuPoTS.py | Arthur151/3DMPPE_POSENET_RELEASE | 49b71fec03fcb646f1c0e00515dfb2441c41e26b | [
"MIT"
] | null | null | null | data/MuPoTS/MuPoTS.py | Arthur151/3DMPPE_POSENET_RELEASE | 49b71fec03fcb646f1c0e00515dfb2441c41e26b | [
"MIT"
] | null | null | null | import os
import os.path as osp
import scipy.io as sio
import numpy as np
from pycocotools.coco import COCO
from config import cfg
import json
import cv2
import random
import math
from utils.pose_utils import pixel2cam, process_bbox
from utils.vis import vis_keypoints, vis_3d_skeleton
class MuPoTS:
def __init__(self, data_split):
self.data_split = data_split
self.img_dir = osp.join('..', 'data', 'MuPoTS', 'data', 'MultiPersonTestSet')
self.test_annot_path = osp.join('..', 'data', 'MuPoTS', 'data', 'MuPoTS-3D.json')
self.human_bbox_root_dir = osp.join('..', 'data', 'MuPoTS', 'bbox_root', 'bbox_root_mupots_output.json')
self.joint_num = 21 # MuCo-3DHP
self.joints_name = ('Head_top', 'Thorax', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'R_Hip', 'R_Knee', 'R_Ankle', 'L_Hip', 'L_Knee', 'L_Ankle', 'Pelvis', 'Spine', 'Head', 'R_Hand', 'L_Hand', 'R_Toe', 'L_Toe') # MuCo-3DHP
self.original_joint_num = 17 # MuPoTS
self.original_joints_name = ('Head_top', 'Thorax', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'R_Hip', 'R_Knee', 'R_Ankle', 'L_Hip', 'L_Knee', 'L_Ankle', 'Pelvis', 'Spine', 'Head') # MuPoTS
self.flip_pairs = ( (2, 5), (3, 6), (4, 7), (8, 11), (9, 12), (10, 13) )
self.skeleton = ( (0, 16), (16, 1), (1, 15), (15, 14), (14, 8), (14, 11), (8, 9), (9, 10), (11, 12), (12, 13), (1, 2), (2, 3), (3, 4), (1, 5), (5, 6), (6, 7) )
self.eval_joint = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)
self.joints_have_depth = True
self.root_idx = self.joints_name.index('Pelvis')
self.data = self.load_data()
def load_data(self):
if self.data_split != 'test':
print('Unknown data subset')
assert 0
data = []
db = COCO(self.test_annot_path)
# use gt bbox and root
if cfg.use_gt_info:
print("Get bounding box and root from groundtruth")
for aid in db.anns.keys():
ann = db.anns[aid]
if ann['is_valid'] == 0:
continue
image_id = ann['image_id']
img = db.loadImgs(image_id)[0]
img_path = osp.join(self.img_dir, img['file_name'])
fx, fy, cx, cy = img['intrinsic']
f = np.array([fx, fy]); c = np.array([cx, cy]);
joint_cam = np.array(ann['keypoints_cam'])
root_cam = joint_cam[self.root_idx]
joint_img = np.array(ann['keypoints_img'])
joint_img = np.concatenate([joint_img, joint_cam[:,2:]],1)
joint_img[:,2] = joint_img[:,2] - root_cam[2]
joint_vis = np.ones((self.original_joint_num,1))
img_width, img_height = img['width'], img['height']
bbox = process_bbox(bbox, img_width, img_height)
if bbox is None: continue
data.append({
'img_path': img_path,
'bbox': bbox,
'joint_img': joint_img, # [org_img_x, org_img_y, depth - root_depth]
'joint_cam': joint_cam, # [X, Y, Z] in camera coordinate
'joint_vis': joint_vis,
'root_cam': root_cam, # [X, Y, Z] in camera coordinate
'f': f,
'c': c,
})
else:
print("Get bounding box and root from " + self.human_bbox_root_dir)
with open(self.human_bbox_root_dir) as f:
annot = json.load(f)
for i in range(len(annot)):
image_id = annot[i]['image_id']
img = db.loadImgs(image_id)[0]
img_width, img_height = img['width'], img['height']
img_path = osp.join(self.img_dir, img['file_name'])
fx, fy, cx, cy = img['intrinsic']
f = np.array([fx, fy]); c = np.array([cx, cy]);
root_cam = np.array(annot[i]['root_cam']).reshape(3)
bbox = np.array(annot[i]['bbox']).reshape(4)
data.append({
'img_path': img_path,
'bbox': bbox,
'joint_img': np.zeros((self.original_joint_num, 3)), # dummy
'joint_cam': np.zeros((self.original_joint_num, 3)), # dummy
'joint_vis': np.zeros((self.original_joint_num, 1)), # dummy
'root_cam': root_cam, # [X, Y, Z] in camera coordinate
'f': f,
'c': c,
})
return data
def evaluate(self, preds, result_dir):
print('Evaluation start...')
gts = self.data
sample_num = len(preds)
joint_num = self.original_joint_num
pred_2d_save = {}
pred_3d_save = {}
for n in range(sample_num):
gt = gts[n]
f = gt['f']
c = gt['c']
bbox = gt['bbox']
gt_3d_root = gt['root_cam']
img_name = gt['img_path'].split('/')
img_name = img_name[-2] + '_' + img_name[-1].split('.')[0] # e.g., TS1_img_0001
# restore coordinates to original space
pred_2d_kpt = preds[n].copy()
# only consider eval_joint
pred_2d_kpt = np.take(pred_2d_kpt, self.eval_joint, axis=0)
pred_2d_kpt[:,0] = pred_2d_kpt[:,0] / cfg.output_shape[1] * bbox[2] + bbox[0]
pred_2d_kpt[:,1] = pred_2d_kpt[:,1] / cfg.output_shape[0] * bbox[3] + bbox[1]
pred_2d_kpt[:,2] = (pred_2d_kpt[:,2] / cfg.depth_dim * 2 - 1) * (cfg.bbox_3d_shape[0]/2) + gt_3d_root[2]
# 2d kpt save
if img_name in pred_2d_save:
pred_2d_save[img_name].append(pred_2d_kpt[:,:2])
else:
pred_2d_save[img_name] = [pred_2d_kpt[:,:2]]
vis = False
if vis:
cvimg = cv2.imread(gt['img_path'], cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)
filename = str(random.randrange(1,500))
tmpimg = cvimg.copy().astype(np.uint8)
tmpkps = np.zeros((3,joint_num))
tmpkps[0,:], tmpkps[1,:] = pred_2d_kpt[:,0], pred_2d_kpt[:,1]
tmpkps[2,:] = 1
tmpimg = vis_keypoints(tmpimg, tmpkps, self.skeleton)
cv2.imwrite(filename + '_output.jpg', tmpimg)
# back project to camera coordinate system
pred_3d_kpt = pixel2cam(pred_2d_kpt, f, c)
# 3d kpt save
if img_name in pred_3d_save:
pred_3d_save[img_name].append(pred_3d_kpt)
else:
pred_3d_save[img_name] = [pred_3d_kpt]
output_path = osp.join(result_dir,'preds_2d_kpt_mupots.mat')
sio.savemat(output_path, pred_2d_save)
print("Testing result is saved at " + output_path)
output_path = osp.join(result_dir,'preds_3d_kpt_mupots.mat')
sio.savemat(output_path, pred_3d_save)
print("Testing result is saved at " + output_path)
| 43.728916 | 258 | 0.522937 |
7957983d15e5560c2c8deed46f5a787a4682691f | 288 | py | Python | snippet/permissions.py | JackieQu/WowFunServer | 072e037ea62009c1cee4d3ce954cb6cf8205b506 | [
"MIT"
] | null | null | null | snippet/permissions.py | JackieQu/WowFunServer | 072e037ea62009c1cee4d3ce954cb6cf8205b506 | [
"MIT"
] | null | null | null | snippet/permissions.py | JackieQu/WowFunServer | 072e037ea62009c1cee4d3ce954cb6cf8205b506 | [
"MIT"
] | null | null | null | from rest_framework import permissions
class IsOwnerOrReadOnly(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
return True
return obj.owner == request.user | 28.8 | 56 | 0.690972 |
79579944213c7a2de87579deb94121b4ba0dfe15 | 1,283 | py | Python | src/interface.py | kimvanwyk/ndlc_finances | 7c19a2a9778875fcb08dd7b98685d3aa36f5c7b1 | [
"BSD-3-Clause"
] | null | null | null | src/interface.py | kimvanwyk/ndlc_finances | 7c19a2a9778875fcb08dd7b98685d3aa36f5c7b1 | [
"BSD-3-Clause"
] | null | null | null | src/interface.py | kimvanwyk/ndlc_finances | 7c19a2a9778875fcb08dd7b98685d3aa36f5c7b1 | [
"BSD-3-Clause"
] | null | null | null | from datetime import date
import os, os.path
import shutil
import socket
import build_report
import kppe
UDP_IP = "0.0.0.0"
UDP_PORT = 5001
def build(verbose=True, month=None):
text = build_report.build_markup_file()
fn = f'{date.today():%y%m%d}_ndlc_finance_report'
text = kppe.markup(text)
(ret, retcode) = kppe.build_document(text, os.path.abspath('templates/no_frills_latex.txt'), fn, toc=False)
if verbose:
print('Pandoc output:')
print()
print(ret)
if retcode == 0:
try:
os.remove(f'/io/{fn}.pdf')
except Exception as e:
pass
shutil.move(f'/app/{fn}.pdf', '/io')
return (ret, retcode, f'{fn}.pdf')
if __name__ == '__main__':
sock = socket.socket(socket.AF_INET,
socket.SOCK_DGRAM)
sock.setblocking(1)
sock.bind((UDP_IP, UDP_PORT))
rec = []
while True:
(data, addr) = sock.recvfrom(1024)
rec.append(data)
s = ''.join([str(r) for r in rec])
if 'build' in s:
(ret, retcode, fn) = build(verbose=True)
sock.sendto(bytes(fn, 'utf8') if retcode == 0 else b'error', addr)
sock.sendto(bytes(ret, 'utf8'))
rec = []
if 'quit' in s:
break
| 27.297872 | 111 | 0.5682 |
795799d2301d521a859322225ab813ba4fdd1bf7 | 8,109 | py | Python | subliminal/providers/opensubtitles.py | jtwill/subliminal | 6aaece1c44173d3fbef00637f824d11f3f3dbd32 | [
"MIT"
] | 152 | 2015-01-06T00:56:19.000Z | 2022-03-11T21:08:32.000Z | subliminal/providers/opensubtitles.py | jtwill/subliminal | 6aaece1c44173d3fbef00637f824d11f3f3dbd32 | [
"MIT"
] | 25 | 2015-01-19T15:57:43.000Z | 2020-06-29T08:51:22.000Z | subliminal/providers/opensubtitles.py | jtwill/subliminal | 6aaece1c44173d3fbef00637f824d11f3f3dbd32 | [
"MIT"
] | 34 | 2015-02-10T01:45:14.000Z | 2022-01-03T13:52:35.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import base64
import logging
import os
import re
import zlib
import babelfish
import guessit
from . import Provider
from .. import __version__
from ..compat import ServerProxy, TimeoutTransport
from ..exceptions import ProviderError, AuthenticationError, DownloadLimitExceeded
from ..subtitle import Subtitle, fix_line_endings, compute_guess_matches
from ..video import Episode, Movie
logger = logging.getLogger(__name__)
class OpenSubtitlesSubtitle(Subtitle):
provider_name = 'opensubtitles'
series_re = re.compile('^"(?P<series_name>.*)" (?P<series_title>.*)$')
def __init__(self, language, hearing_impaired, id, matched_by, movie_kind, hash, movie_name, movie_release_name, # @ReservedAssignment
movie_year, movie_imdb_id, series_season, series_episode, page_link):
super(OpenSubtitlesSubtitle, self).__init__(language, hearing_impaired, page_link)
self.id = id
self.matched_by = matched_by
self.movie_kind = movie_kind
self.hash = hash
self.movie_name = movie_name
self.movie_release_name = movie_release_name
self.movie_year = movie_year
self.movie_imdb_id = movie_imdb_id
self.series_season = series_season
self.series_episode = series_episode
@property
def series_name(self):
return self.series_re.match(self.movie_name).group('series_name')
@property
def series_title(self):
return self.series_re.match(self.movie_name).group('series_title')
def compute_matches(self, video):
matches = set()
# episode
if isinstance(video, Episode) and self.movie_kind == 'episode':
# series
if video.series and self.series_name.lower() == video.series.lower():
matches.add('series')
# season
if video.season and self.series_season == video.season:
matches.add('season')
# episode
if video.episode and self.series_episode == video.episode:
matches.add('episode')
# guess
matches |= compute_guess_matches(video, guessit.guess_episode_info(self.movie_release_name + '.mkv'))
# movie
elif isinstance(video, Movie) and self.movie_kind == 'movie':
# year
if video.year and self.movie_year == video.year:
matches.add('year')
# guess
matches |= compute_guess_matches(video, guessit.guess_movie_info(self.movie_release_name + '.mkv'))
else:
logger.info('%r is not a valid movie_kind for %r', self.movie_kind, video)
return matches
# hash
if 'opensubtitles' in video.hashes and self.hash == video.hashes['opensubtitles']:
matches.add('hash')
# imdb_id
if video.imdb_id and self.movie_imdb_id == video.imdb_id:
matches.add('imdb_id')
# title
if video.title and self.movie_name.lower() == video.title.lower():
matches.add('title')
return matches
class OpenSubtitlesProvider(Provider):
languages = {babelfish.Language.fromopensubtitles(l) for l in babelfish.language_converters['opensubtitles'].codes}
def __init__(self):
self.server = ServerProxy('http://api.opensubtitles.org/xml-rpc', transport=TimeoutTransport(10))
self.token = None
def initialize(self):
response = checked(self.server.LogIn('', '', 'eng', 'subliminal v%s' % __version__.split('-')[0]))
self.token = response['token']
def terminate(self):
checked(self.server.LogOut(self.token))
self.server.close()
def no_operation(self):
checked(self.server.NoOperation(self.token))
def query(self, languages, hash=None, size=None, imdb_id=None, query=None, season=None, episode=None): # @ReservedAssignment
searches = []
if hash and size:
searches.append({'moviehash': hash, 'moviebytesize': str(size)})
if imdb_id:
searches.append({'imdbid': imdb_id})
if query and season and episode:
searches.append({'query': query, 'season': season, 'episode': episode})
elif query:
searches.append({'query': query})
if not searches:
raise ValueError('One or more parameter missing')
for search in searches:
search['sublanguageid'] = ','.join(l.opensubtitles for l in languages)
logger.debug('Searching subtitles %r', searches)
response = checked(self.server.SearchSubtitles(self.token, searches))
if not response['data']:
logger.debug('No subtitle found')
return []
return [OpenSubtitlesSubtitle(babelfish.Language.fromopensubtitles(r['SubLanguageID']),
bool(int(r['SubHearingImpaired'])), r['IDSubtitleFile'], r['MatchedBy'],
r['MovieKind'], r['MovieHash'], r['MovieName'], r['MovieReleaseName'],
int(r['MovieYear']) if r['MovieYear'] else None, int(r['IDMovieImdb']),
int(r['SeriesSeason']) if r['SeriesSeason'] else None,
int(r['SeriesEpisode']) if r['SeriesEpisode'] else None, r['SubtitlesLink'])
for r in response['data']]
def list_subtitles(self, video, languages):
query = None
season = None
episode = None
if ('opensubtitles' not in video.hashes or not video.size) and not video.imdb_id:
query = video.name.split(os.sep)[-1]
if isinstance(video, Episode):
query = video.series
season = video.season
episode = video.episode
return self.query(languages, hash=video.hashes.get('opensubtitles'), size=video.size, imdb_id=video.imdb_id,
query=query, season=season, episode=episode)
def download_subtitle(self, subtitle):
response = checked(self.server.DownloadSubtitles(self.token, [subtitle.id]))
if not response['data']:
raise ProviderError('Nothing to download')
subtitle.content = fix_line_endings(zlib.decompress(base64.b64decode(response['data'][0]['data']), 47))
class OpenSubtitlesError(ProviderError):
"""Base class for non-generic :class:`OpenSubtitlesProvider` exceptions"""
class Unauthorized(OpenSubtitlesError, AuthenticationError):
"""Exception raised when status is '401 Unauthorized'"""
class NoSession(OpenSubtitlesError, AuthenticationError):
"""Exception raised when status is '406 No session'"""
class DownloadLimitReached(OpenSubtitlesError, DownloadLimitExceeded):
"""Exception raised when status is '407 Download limit reached'"""
class InvalidImdbid(OpenSubtitlesError):
"""Exception raised when status is '413 Invalid ImdbID'"""
class UnknownUserAgent(OpenSubtitlesError, AuthenticationError):
"""Exception raised when status is '414 Unknown User Agent'"""
class DisabledUserAgent(OpenSubtitlesError, AuthenticationError):
"""Exception raised when status is '415 Disabled user agent'"""
class ServiceUnavailable(OpenSubtitlesError):
"""Exception raised when status is '503 Service Unavailable'"""
def checked(response):
"""Check a response status before returning it
:param response: a response from a XMLRPC call to OpenSubtitles
:return: the response
:raise: :class:`OpenSubtitlesError`
"""
status_code = int(response['status'][:3])
if status_code == 401:
raise Unauthorized
if status_code == 406:
raise NoSession
if status_code == 407:
raise DownloadLimitReached
if status_code == 413:
raise InvalidImdbid
if status_code == 414:
raise UnknownUserAgent
if status_code == 415:
raise DisabledUserAgent
if status_code == 503:
raise ServiceUnavailable
if status_code != 200:
raise OpenSubtitlesError(response['status'])
return response
| 39.173913 | 139 | 0.648045 |
79579a1c3df0c22796322266d73577cefe681043 | 13,630 | py | Python | homeassistant/components/nest/legacy/__init__.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 30,023 | 2016-04-13T10:17:53.000Z | 2020-03-02T12:56:31.000Z | homeassistant/components/nest/legacy/__init__.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 24,710 | 2016-04-13T08:27:26.000Z | 2020-03-02T12:59:13.000Z | homeassistant/components/nest/legacy/__init__.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 11,956 | 2016-04-13T18:42:31.000Z | 2020-03-02T09:32:12.000Z | """Support for Nest devices."""
# mypy: ignore-errors
from datetime import datetime, timedelta
import logging
import threading
from nest import Nest
from nest.nest import APIError, AuthorizationError
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_CLIENT_ID,
CONF_CLIENT_SECRET,
CONF_FILENAME,
CONF_STRUCTURE,
EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP,
Platform,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect, dispatcher_send
from homeassistant.helpers.entity import DeviceInfo, Entity
from . import local_auth
from .const import DATA_NEST, DATA_NEST_CONFIG, DOMAIN, SIGNAL_NEST_UPDATE
_CONFIGURING = {}
_LOGGER = logging.getLogger(__name__)
PLATFORMS = [
Platform.BINARY_SENSOR,
Platform.CAMERA,
Platform.CLIMATE,
Platform.SENSOR,
]
# Configuration for the legacy nest API
SERVICE_CANCEL_ETA = "cancel_eta"
SERVICE_SET_ETA = "set_eta"
NEST_CONFIG_FILE = "nest.conf"
ATTR_ETA = "eta"
ATTR_ETA_WINDOW = "eta_window"
ATTR_STRUCTURE = "structure"
ATTR_TRIP_ID = "trip_id"
AWAY_MODE_AWAY = "away"
AWAY_MODE_HOME = "home"
ATTR_AWAY_MODE = "away_mode"
SERVICE_SET_AWAY_MODE = "set_away_mode"
# Services for the legacy API
SET_AWAY_MODE_SCHEMA = vol.Schema(
{
vol.Required(ATTR_AWAY_MODE): vol.In([AWAY_MODE_AWAY, AWAY_MODE_HOME]),
vol.Optional(ATTR_STRUCTURE): vol.All(cv.ensure_list, [cv.string]),
}
)
SET_ETA_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ETA): cv.time_period,
vol.Optional(ATTR_TRIP_ID): cv.string,
vol.Optional(ATTR_ETA_WINDOW): cv.time_period,
vol.Optional(ATTR_STRUCTURE): vol.All(cv.ensure_list, [cv.string]),
}
)
CANCEL_ETA_SCHEMA = vol.Schema(
{
vol.Required(ATTR_TRIP_ID): cv.string,
vol.Optional(ATTR_STRUCTURE): vol.All(cv.ensure_list, [cv.string]),
}
)
def nest_update_event_broker(hass, nest):
"""
Dispatch SIGNAL_NEST_UPDATE to devices when nest stream API received data.
Used for the legacy nest API.
Runs in its own thread.
"""
_LOGGER.debug("Listening for nest.update_event")
while hass.is_running:
nest.update_event.wait()
if not hass.is_running:
break
nest.update_event.clear()
_LOGGER.debug("Dispatching nest data update")
dispatcher_send(hass, SIGNAL_NEST_UPDATE)
_LOGGER.debug("Stop listening for nest.update_event")
async def async_setup_legacy(hass: HomeAssistant, config: dict) -> bool:
"""Set up Nest components using the legacy nest API."""
if DOMAIN not in config:
return True
conf = config[DOMAIN]
local_auth.initialize(hass, conf[CONF_CLIENT_ID], conf[CONF_CLIENT_SECRET])
filename = config.get(CONF_FILENAME, NEST_CONFIG_FILE)
access_token_cache_file = hass.config.path(filename)
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={"nest_conf_path": access_token_cache_file},
)
)
# Store config to be used during entry setup
hass.data[DATA_NEST_CONFIG] = conf
return True
async def async_setup_legacy_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Nest from legacy config entry."""
nest = Nest(access_token=entry.data["tokens"]["access_token"])
_LOGGER.debug("proceeding with setup")
conf = hass.data.get(DATA_NEST_CONFIG, {})
hass.data[DATA_NEST] = NestLegacyDevice(hass, conf, nest)
if not await hass.async_add_executor_job(hass.data[DATA_NEST].initialize):
return False
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
def validate_structures(target_structures):
all_structures = [structure.name for structure in nest.structures]
for target in target_structures:
if target not in all_structures:
_LOGGER.info("Invalid structure: %s", target)
def set_away_mode(service):
"""Set the away mode for a Nest structure."""
if ATTR_STRUCTURE in service.data:
target_structures = service.data[ATTR_STRUCTURE]
validate_structures(target_structures)
else:
target_structures = hass.data[DATA_NEST].local_structure
for structure in nest.structures:
if structure.name in target_structures:
_LOGGER.info(
"Setting away mode for: %s to: %s",
structure.name,
service.data[ATTR_AWAY_MODE],
)
structure.away = service.data[ATTR_AWAY_MODE]
def set_eta(service):
"""Set away mode to away and include ETA for a Nest structure."""
if ATTR_STRUCTURE in service.data:
target_structures = service.data[ATTR_STRUCTURE]
validate_structures(target_structures)
else:
target_structures = hass.data[DATA_NEST].local_structure
for structure in nest.structures:
if structure.name in target_structures:
if structure.thermostats:
_LOGGER.info(
"Setting away mode for: %s to: %s",
structure.name,
AWAY_MODE_AWAY,
)
structure.away = AWAY_MODE_AWAY
now = datetime.utcnow()
trip_id = service.data.get(
ATTR_TRIP_ID, f"trip_{int(now.timestamp())}"
)
eta_begin = now + service.data[ATTR_ETA]
eta_window = service.data.get(ATTR_ETA_WINDOW, timedelta(minutes=1))
eta_end = eta_begin + eta_window
_LOGGER.info(
"Setting ETA for trip: %s, "
"ETA window starts at: %s and ends at: %s",
trip_id,
eta_begin,
eta_end,
)
structure.set_eta(trip_id, eta_begin, eta_end)
else:
_LOGGER.info(
"No thermostats found in structure: %s, unable to set ETA",
structure.name,
)
def cancel_eta(service):
"""Cancel ETA for a Nest structure."""
if ATTR_STRUCTURE in service.data:
target_structures = service.data[ATTR_STRUCTURE]
validate_structures(target_structures)
else:
target_structures = hass.data[DATA_NEST].local_structure
for structure in nest.structures:
if structure.name in target_structures:
if structure.thermostats:
trip_id = service.data[ATTR_TRIP_ID]
_LOGGER.info("Cancelling ETA for trip: %s", trip_id)
structure.cancel_eta(trip_id)
else:
_LOGGER.info(
"No thermostats found in structure: %s, "
"unable to cancel ETA",
structure.name,
)
hass.services.async_register(
DOMAIN, SERVICE_SET_AWAY_MODE, set_away_mode, schema=SET_AWAY_MODE_SCHEMA
)
hass.services.async_register(
DOMAIN, SERVICE_SET_ETA, set_eta, schema=SET_ETA_SCHEMA
)
hass.services.async_register(
DOMAIN, SERVICE_CANCEL_ETA, cancel_eta, schema=CANCEL_ETA_SCHEMA
)
@callback
def start_up(event):
"""Start Nest update event listener."""
threading.Thread(
name="Nest update listener",
target=nest_update_event_broker,
args=(hass, nest),
).start()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, start_up)
@callback
def shut_down(event):
"""Stop Nest update event listener."""
nest.update_event.set()
entry.async_on_unload(
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, shut_down)
)
_LOGGER.debug("async_setup_nest is done")
return True
class NestLegacyDevice:
"""Structure Nest functions for hass for legacy API."""
def __init__(self, hass, conf, nest):
"""Init Nest Devices."""
self.hass = hass
self.nest = nest
self.local_structure = conf.get(CONF_STRUCTURE)
def initialize(self):
"""Initialize Nest."""
try:
# Do not optimize next statement, it is here for initialize
# persistence Nest API connection.
structure_names = [s.name for s in self.nest.structures]
if self.local_structure is None:
self.local_structure = structure_names
except (AuthorizationError, APIError, OSError) as err:
_LOGGER.error("Connection error while access Nest web service: %s", err)
return False
return True
def structures(self):
"""Generate a list of structures."""
try:
for structure in self.nest.structures:
if structure.name not in self.local_structure:
_LOGGER.debug(
"Ignoring structure %s, not in %s",
structure.name,
self.local_structure,
)
continue
yield structure
except (AuthorizationError, APIError, OSError) as err:
_LOGGER.error("Connection error while access Nest web service: %s", err)
def thermostats(self):
"""Generate a list of thermostats."""
return self._devices("thermostats")
def smoke_co_alarms(self):
"""Generate a list of smoke co alarms."""
return self._devices("smoke_co_alarms")
def cameras(self):
"""Generate a list of cameras."""
return self._devices("cameras")
def _devices(self, device_type):
"""Generate a list of Nest devices."""
try:
for structure in self.nest.structures:
if structure.name not in self.local_structure:
_LOGGER.debug(
"Ignoring structure %s, not in %s",
structure.name,
self.local_structure,
)
continue
for device in getattr(structure, device_type, []):
try:
# Do not optimize next statement,
# it is here for verify Nest API permission.
device.name_long
except KeyError:
_LOGGER.warning(
"Cannot retrieve device name for [%s]"
", please check your Nest developer "
"account permission settings",
device.serial,
)
continue
yield (structure, device)
except (AuthorizationError, APIError, OSError) as err:
_LOGGER.error("Connection error while access Nest web service: %s", err)
class NestSensorDevice(Entity):
"""Representation of a Nest sensor."""
def __init__(self, structure, device, variable):
"""Initialize the sensor."""
self.structure = structure
self.variable = variable
if device is not None:
# device specific
self.device = device
self._name = f"{self.device.name_long} {self.variable.replace('_', ' ')}"
else:
# structure only
self.device = structure
self._name = f"{self.structure.name} {self.variable.replace('_', ' ')}"
self._state = None
self._unit = None
@property
def name(self):
"""Return the name of the nest, if any."""
return self._name
@property
def should_poll(self):
"""Do not need poll thanks using Nest streaming API."""
return False
@property
def unique_id(self):
"""Return unique id based on device serial and variable."""
return f"{self.device.serial}-{self.variable}"
@property
def device_info(self) -> DeviceInfo:
"""Return information about the device."""
if not hasattr(self.device, "name_long"):
name = self.structure.name
model = "Structure"
else:
name = self.device.name_long
if self.device.is_thermostat:
model = "Thermostat"
elif self.device.is_camera:
model = "Camera"
elif self.device.is_smoke_co_alarm:
model = "Nest Protect"
else:
model = None
return DeviceInfo(
identifiers={(DOMAIN, self.device.serial)},
manufacturer="Nest Labs",
model=model,
name=name,
)
def update(self):
"""Do not use NestSensorDevice directly."""
raise NotImplementedError
async def async_added_to_hass(self):
"""Register update signal handler."""
async def async_update_state():
"""Update sensor state."""
await self.async_update_ha_state(True)
self.async_on_remove(
async_dispatcher_connect(self.hass, SIGNAL_NEST_UPDATE, async_update_state)
)
| 32.375297 | 88 | 0.596919 |
79579b78dbffc1c77448e20ca96c31a18aff936d | 2,794 | py | Python | django_server/feedback_map/rest/serializers/map_data_point.py | ForumViriumHelsinki/FVHFeedbackMap | cfbf3dd8715e34351fb1ba628ebb0c5eb82e78e0 | [
"MIT"
] | null | null | null | django_server/feedback_map/rest/serializers/map_data_point.py | ForumViriumHelsinki/FVHFeedbackMap | cfbf3dd8715e34351fb1ba628ebb0c5eb82e78e0 | [
"MIT"
] | null | null | null | django_server/feedback_map/rest/serializers/map_data_point.py | ForumViriumHelsinki/FVHFeedbackMap | cfbf3dd8715e34351fb1ba628ebb0c5eb82e78e0 | [
"MIT"
] | null | null | null | from django.conf import settings
from rest_framework import serializers
from rest_framework.generics import get_object_or_404
from feedback_map import models
from .base import BaseMapDataPointSerializer
class TagSerializer(serializers.ModelSerializer):
class Meta:
model = models.Tag
fields = '__all__'
class MapDataPointCommentSerializer(serializers.ModelSerializer):
user = serializers.SlugRelatedField(slug_field='username', read_only=True)
class Meta:
model = models.MapDataPointComment
fields = '__all__'
class MapDataPointCommentNotificationSerializer(serializers.ModelSerializer):
comment = MapDataPointCommentSerializer(read_only=True)
class Meta:
model = models.MapDataPointCommentNotification
fields = ['comment', 'id']
class DictMapDataPointSerializer(BaseMapDataPointSerializer):
is_processed = serializers.BooleanField(read_only=True, source='processed_by_id')
created_by = serializers.IntegerField(read_only=True, source='created_by_id')
image = serializers.SerializerMethodField()
false_default_fields = ['is_processed']
class Meta:
model = models.MapDataPoint
fields = BaseMapDataPointSerializer.Meta.fields
def to_representation(self, instance):
result = super().to_representation(instance)
for field in self.false_default_fields:
result.setdefault(field, False)
return result
def get_image(self, note):
return settings.MEDIA_URL + note['image'] if note.get('image', None) else None
class ButtonPositionField(serializers.Field):
def to_representation(self, data_point):
if len(data_point.tags):
tag = models.Tag.objects.filter(
tag__in=data_point.tags, published__isnull=False, button_position__isnull=False).first()
if tag:
return {'button_position': tag.button_position}
return {}
def to_internal_value(self, position):
if position:
return {'tags': [get_object_or_404(models.Tag, button_position=position, published__isnull=False).tag]}
return {}
class MapDataPointSerializer(BaseMapDataPointSerializer):
# upvotes = serializers.SlugRelatedField(many=True, read_only=True, slug_field='user_id')
# downvotes = serializers.SlugRelatedField(many=True, read_only=True, slug_field='user_id')
comments = MapDataPointCommentSerializer(many=True, read_only=True)
button_position = ButtonPositionField(source='*', required=False)
class Meta:
model = models.MapDataPoint
fields = ['id', 'comment', 'image', 'lat', 'lon', 'created_at', 'button_position',
'is_processed', 'tags', 'created_by', 'comments', 'device_id'] #, 'upvotes', 'downvotes']
| 35.820513 | 115 | 0.716178 |
79579c70b8892064cf487cd5d0b995b6d318fb5b | 424 | py | Python | 594.py | vitkarpenko/leetcode | 3d20c329987dd37fb9764f7f5d624cdaa0a5e2d7 | [
"MIT"
] | null | null | null | 594.py | vitkarpenko/leetcode | 3d20c329987dd37fb9764f7f5d624cdaa0a5e2d7 | [
"MIT"
] | null | null | null | 594.py | vitkarpenko/leetcode | 3d20c329987dd37fb9764f7f5d624cdaa0a5e2d7 | [
"MIT"
] | null | null | null | import collections
class Solution:
def findLHS(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
counts = collections.Counter(nums)
LHS = 0
for count in counts:
if count+1 in counts:
LHS = max(LHS, counts[count] + counts[count+1])
return LHS
if __name__ == '__main__':
print(Solution().findLHS([1,3,2,2,5,2,3,7]))
| 20.190476 | 63 | 0.523585 |
79579c8d632d06b35c34f6ddacf6374642932fb4 | 26,171 | py | Python | discord/player.py | Daggy1234/enhanced-discord.py | 3ffe1348956ebc1e2512439b532fdb6516c267c6 | [
"MIT"
] | null | null | null | discord/player.py | Daggy1234/enhanced-discord.py | 3ffe1348956ebc1e2512439b532fdb6516c267c6 | [
"MIT"
] | 1 | 2022-01-21T08:20:30.000Z | 2022-01-21T08:20:30.000Z | discord/player.py | Daggy1234/enhanced-discord.py | 3ffe1348956ebc1e2512439b532fdb6516c267c6 | [
"MIT"
] | null | null | null | """
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import threading
import traceback
import subprocess
import audioop
import asyncio
import logging
import shlex
import time
import json
import sys
import re
import io
from typing import Any, Callable, Generic, IO, Optional, TYPE_CHECKING, Tuple, Type, TypeVar, Union
from .errors import ClientException
from .opus import Encoder as OpusEncoder
from .oggparse import OggStream
from .utils import MISSING
if TYPE_CHECKING:
from .voice_client import VoiceClient
AT = TypeVar('AT', bound='AudioSource')
FT = TypeVar('FT', bound='FFmpegOpusAudio')
_log = logging.getLogger(__name__)
__all__ = (
'AudioSource',
'PCMAudio',
'FFmpegAudio',
'FFmpegPCMAudio',
'FFmpegOpusAudio',
'PCMVolumeTransformer',
)
CREATE_NO_WINDOW: int
CREATE_NO_WINDOW = 0 if sys.platform != 'win32' else 0x08000000
class AudioSource:
"""Represents an audio stream.
The audio stream can be Opus encoded or not, however if the audio stream
is not Opus encoded then the audio format must be 16-bit 48KHz stereo PCM.
.. warning::
The audio source reads are done in a separate thread.
"""
def read(self) -> bytes:
"""Reads 20ms worth of audio.
Subclasses must implement this.
If the audio is complete, then returning an empty
:term:`py:bytes-like object` to signal this is the way to do so.
If :meth:`~AudioSource.is_opus` method returns ``True``, then it must return
20ms worth of Opus encoded audio. Otherwise, it must be 20ms
worth of 16-bit 48KHz stereo PCM, which is about 3,840 bytes
per frame (20ms worth of audio).
Returns
--------
:class:`bytes`
A bytes like object that represents the PCM or Opus data.
"""
raise NotImplementedError
def is_opus(self) -> bool:
"""Checks if the audio source is already encoded in Opus."""
return False
def cleanup(self) -> None:
"""Called when clean-up is needed to be done.
Useful for clearing buffer data or processes after
it is done playing audio.
"""
pass
def __del__(self) -> None:
self.cleanup()
class PCMAudio(AudioSource):
"""Represents raw 16-bit 48KHz stereo PCM audio source.
Attributes
-----------
stream: :term:`py:file object`
A file-like object that reads byte data representing raw PCM.
"""
def __init__(self, stream: io.BufferedIOBase) -> None:
self.stream: io.BufferedIOBase = stream
def read(self) -> bytes:
ret = self.stream.read(OpusEncoder.FRAME_SIZE)
if len(ret) != OpusEncoder.FRAME_SIZE:
return b''
return ret
class FFmpegAudio(AudioSource):
"""Represents an FFmpeg (or AVConv) based AudioSource.
User created AudioSources using FFmpeg differently from how :class:`FFmpegPCMAudio` and
:class:`FFmpegOpusAudio` work should subclass this.
.. versionadded:: 1.3
"""
def __init__(self, source: Union[str, io.BufferedIOBase], *, executable: str = 'ffmpeg', args: Any, **subprocess_kwargs: Any):
piping = subprocess_kwargs.get('stdin') == subprocess.PIPE
if piping and isinstance(source, str):
raise TypeError("parameter conflict: 'source' parameter cannot be a string when piping to stdin")
args = [executable, *args]
kwargs = {'stdout': subprocess.PIPE}
kwargs.update(subprocess_kwargs)
self._process: subprocess.Popen = self._spawn_process(args, **kwargs)
self._stdout: IO[bytes] = self._process.stdout # type: ignore
self._stdin: Optional[IO[Bytes]] = None
self._pipe_thread: Optional[threading.Thread] = None
if piping:
n = f'popen-stdin-writer:{id(self):#x}'
self._stdin = self._process.stdin
self._pipe_thread = threading.Thread(target=self._pipe_writer, args=(source,), daemon=True, name=n)
self._pipe_thread.start()
def _spawn_process(self, args: Any, **subprocess_kwargs: Any) -> subprocess.Popen:
process = None
try:
process = subprocess.Popen(args, creationflags=CREATE_NO_WINDOW, **subprocess_kwargs)
except FileNotFoundError:
executable = args.partition(' ')[0] if isinstance(args, str) else args[0]
raise ClientException(executable + ' was not found.') from None
except subprocess.SubprocessError as exc:
raise ClientException(f'Popen failed: {exc.__class__.__name__}: {exc}') from exc
else:
return process
def _kill_process(self) -> None:
proc = self._process
if proc is MISSING:
return
_log.info('Preparing to terminate ffmpeg process %s.', proc.pid)
try:
proc.kill()
except Exception:
_log.exception('Ignoring error attempting to kill ffmpeg process %s', proc.pid)
if proc.poll() is None:
_log.info('ffmpeg process %s has not terminated. Waiting to terminate...', proc.pid)
proc.communicate()
_log.info('ffmpeg process %s should have terminated with a return code of %s.', proc.pid, proc.returncode)
else:
_log.info('ffmpeg process %s successfully terminated with return code of %s.', proc.pid, proc.returncode)
def _pipe_writer(self, source: io.BufferedIOBase) -> None:
while self._process:
# arbitrarily large read size
data = source.read(8192)
if not data:
self._process.terminate()
return
try:
self._stdin.write(data)
except Exception:
_log.debug('Write error for %s, this is probably not a problem', self, exc_info=True)
# at this point the source data is either exhausted or the process is fubar
self._process.terminate()
return
def cleanup(self) -> None:
self._kill_process()
self._process = self._stdout = self._stdin = MISSING
class FFmpegPCMAudio(FFmpegAudio):
"""An audio source from FFmpeg (or AVConv).
This launches a sub-process to a specific input file given.
.. warning::
You must have the ffmpeg or avconv executable in your path environment
variable in order for this to work.
Parameters
------------
source: Union[:class:`str`, :class:`io.BufferedIOBase`]
The input that ffmpeg will take and convert to PCM bytes.
If ``pipe`` is ``True`` then this is a file-like object that is
passed to the stdin of ffmpeg.
executable: :class:`str`
The executable name (and path) to use. Defaults to ``ffmpeg``.
pipe: :class:`bool`
If ``True``, denotes that ``source`` parameter will be passed
to the stdin of ffmpeg. Defaults to ``False``.
stderr: Optional[:term:`py:file object`]
A file-like object to pass to the Popen constructor.
Could also be an instance of ``subprocess.PIPE``.
before_options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg before the ``-i`` flag.
options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg after the ``-i`` flag.
Raises
--------
ClientException
The subprocess failed to be created.
"""
def __init__(
self,
source: Union[str, io.BufferedIOBase],
*,
executable: str = 'ffmpeg',
pipe: bool = False,
stderr: Optional[IO[str]] = None,
before_options: Optional[str] = None,
options: Optional[str] = None
) -> None:
args = []
subprocess_kwargs = {'stdin': subprocess.PIPE if pipe else subprocess.DEVNULL, 'stderr': stderr}
if isinstance(before_options, str):
args.extend(shlex.split(before_options))
args.append('-i')
args.append('-' if pipe else source)
args.extend(('-f', 's16le', '-ar', '48000', '-ac', '2', '-loglevel', 'warning'))
if isinstance(options, str):
args.extend(shlex.split(options))
args.append('pipe:1')
super().__init__(source, executable=executable, args=args, **subprocess_kwargs)
def read(self) -> bytes:
ret = self._stdout.read(OpusEncoder.FRAME_SIZE)
if len(ret) != OpusEncoder.FRAME_SIZE:
return b''
return ret
def is_opus(self) -> bool:
return False
class FFmpegOpusAudio(FFmpegAudio):
"""An audio source from FFmpeg (or AVConv).
This launches a sub-process to a specific input file given. However, rather than
producing PCM packets like :class:`FFmpegPCMAudio` does that need to be encoded to
Opus, this class produces Opus packets, skipping the encoding step done by the library.
Alternatively, instead of instantiating this class directly, you can use
:meth:`FFmpegOpusAudio.from_probe` to probe for bitrate and codec information. This
can be used to opportunistically skip pointless re-encoding of existing Opus audio data
for a boost in performance at the cost of a short initial delay to gather the information.
The same can be achieved by passing ``copy`` to the ``codec`` parameter, but only if you
know that the input source is Opus encoded beforehand.
.. versionadded:: 1.3
.. warning::
You must have the ffmpeg or avconv executable in your path environment
variable in order for this to work.
Parameters
------------
source: Union[:class:`str`, :class:`io.BufferedIOBase`]
The input that ffmpeg will take and convert to Opus bytes.
If ``pipe`` is ``True`` then this is a file-like object that is
passed to the stdin of ffmpeg.
bitrate: :class:`int`
The bitrate in kbps to encode the output to. Defaults to ``128``.
codec: Optional[:class:`str`]
The codec to use to encode the audio data. Normally this would be
just ``libopus``, but is used by :meth:`FFmpegOpusAudio.from_probe` to
opportunistically skip pointlessly re-encoding Opus audio data by passing
``copy`` as the codec value. Any values other than ``copy``, ``opus``, or
``libopus`` will be considered ``libopus``. Defaults to ``libopus``.
.. warning::
Do not provide this parameter unless you are certain that the audio input is
already Opus encoded. For typical use :meth:`FFmpegOpusAudio.from_probe`
should be used to determine the proper value for this parameter.
executable: :class:`str`
The executable name (and path) to use. Defaults to ``ffmpeg``.
pipe: :class:`bool`
If ``True``, denotes that ``source`` parameter will be passed
to the stdin of ffmpeg. Defaults to ``False``.
stderr: Optional[:term:`py:file object`]
A file-like object to pass to the Popen constructor.
Could also be an instance of ``subprocess.PIPE``.
before_options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg before the ``-i`` flag.
options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg after the ``-i`` flag.
Raises
--------
ClientException
The subprocess failed to be created.
"""
def __init__(
self,
source: Union[str, io.BufferedIOBase],
*,
bitrate: int = 128,
codec: Optional[str] = None,
executable: str = 'ffmpeg',
pipe=False,
stderr=None,
before_options=None,
options=None,
) -> None:
args = []
subprocess_kwargs = {'stdin': subprocess.PIPE if pipe else subprocess.DEVNULL, 'stderr': stderr}
if isinstance(before_options, str):
args.extend(shlex.split(before_options))
args.append('-i')
args.append('-' if pipe else source)
codec = 'copy' if codec in ('opus', 'libopus') else 'libopus'
args.extend(('-map_metadata', '-1',
'-f', 'opus',
'-c:a', codec,
'-ar', '48000',
'-ac', '2',
'-b:a', f'{bitrate}k',
'-loglevel', 'warning'))
if isinstance(options, str):
args.extend(shlex.split(options))
args.append('pipe:1')
super().__init__(source, executable=executable, args=args, **subprocess_kwargs)
self._packet_iter = OggStream(self._stdout).iter_packets()
@classmethod
async def from_probe(
cls: Type[FT],
source: str,
*,
method: Optional[Union[str, Callable[[str, str], Tuple[Optional[str], Optional[int]]]]] = None,
**kwargs: Any,
) -> FT:
"""|coro|
A factory method that creates a :class:`FFmpegOpusAudio` after probing
the input source for audio codec and bitrate information.
Examples
----------
Use this function to create an :class:`FFmpegOpusAudio` instance instead of the constructor: ::
source = await discord.FFmpegOpusAudio.from_probe("song.webm")
voice_client.play(source)
If you are on Windows and don't have ffprobe installed, use the ``fallback`` method
to probe using ffmpeg instead: ::
source = await discord.FFmpegOpusAudio.from_probe("song.webm", method='fallback')
voice_client.play(source)
Using a custom method of determining codec and bitrate: ::
def custom_probe(source, executable):
# some analysis code here
return codec, bitrate
source = await discord.FFmpegOpusAudio.from_probe("song.webm", method=custom_probe)
voice_client.play(source)
Parameters
------------
source
Identical to the ``source`` parameter for the constructor.
method: Optional[Union[:class:`str`, Callable[:class:`str`, :class:`str`]]]
The probing method used to determine bitrate and codec information. As a string, valid
values are ``native`` to use ffprobe (or avprobe) and ``fallback`` to use ffmpeg
(or avconv). As a callable, it must take two string arguments, ``source`` and
``executable``. Both parameters are the same values passed to this factory function.
``executable`` will default to ``ffmpeg`` if not provided as a keyword argument.
kwargs
The remaining parameters to be passed to the :class:`FFmpegOpusAudio` constructor,
excluding ``bitrate`` and ``codec``.
Raises
--------
AttributeError
Invalid probe method, must be ``'native'`` or ``'fallback'``.
TypeError
Invalid value for ``probe`` parameter, must be :class:`str` or a callable.
Returns
--------
:class:`FFmpegOpusAudio`
An instance of this class.
"""
executable = kwargs.get('executable')
codec, bitrate = await cls.probe(source, method=method, executable=executable)
return cls(source, bitrate=bitrate, codec=codec, **kwargs) # type: ignore
@classmethod
async def probe(
cls,
source: str,
*,
method: Optional[Union[str, Callable[[str, str], Tuple[Optional[str], Optional[int]]]]] = None,
executable: Optional[str] = None,
) -> Tuple[Optional[str], Optional[int]]:
"""|coro|
Probes the input source for bitrate and codec information.
Parameters
------------
source
Identical to the ``source`` parameter for :class:`FFmpegOpusAudio`.
method
Identical to the ``method`` parameter for :meth:`FFmpegOpusAudio.from_probe`.
executable: :class:`str`
Identical to the ``executable`` parameter for :class:`FFmpegOpusAudio`.
Raises
--------
AttributeError
Invalid probe method, must be ``'native'`` or ``'fallback'``.
TypeError
Invalid value for ``probe`` parameter, must be :class:`str` or a callable.
Returns
---------
Optional[Tuple[Optional[:class:`str`], Optional[:class:`int`]]]
A 2-tuple with the codec and bitrate of the input source.
"""
method = method or 'native'
executable = executable or 'ffmpeg'
probefunc = fallback = None
if isinstance(method, str):
probefunc = getattr(cls, '_probe_codec_' + method, None)
if probefunc is None:
raise AttributeError(f"Invalid probe method {method!r}")
if probefunc is cls._probe_codec_native:
fallback = cls._probe_codec_fallback
elif callable(method):
probefunc = method
fallback = cls._probe_codec_fallback
else:
raise TypeError("Expected str or callable for parameter 'probe', " \
f"not '{method.__class__.__name__}'")
codec = bitrate = None
loop = asyncio.get_event_loop()
try:
codec, bitrate = await loop.run_in_executor(None, lambda: probefunc(source, executable)) # type: ignore
except Exception:
if not fallback:
_log.exception("Probe '%s' using '%s' failed", method, executable)
return # type: ignore
_log.exception("Probe '%s' using '%s' failed, trying fallback", method, executable)
try:
codec, bitrate = await loop.run_in_executor(None, lambda: fallback(source, executable)) # type: ignore
except Exception:
_log.exception("Fallback probe using '%s' failed", executable)
else:
_log.info("Fallback probe found codec=%s, bitrate=%s", codec, bitrate)
else:
_log.info("Probe found codec=%s, bitrate=%s", codec, bitrate)
finally:
return codec, bitrate
@staticmethod
def _probe_codec_native(source, executable: str = 'ffmpeg') -> Tuple[Optional[str], Optional[int]]:
exe = (
executable[:2] + 'probe'
if executable in {'ffmpeg', 'avconv'}
else executable
)
args = [exe, '-v', 'quiet', '-print_format', 'json', '-show_streams', '-select_streams', 'a:0', source]
output = subprocess.check_output(args, timeout=20)
codec = bitrate = None
if output:
data = json.loads(output)
streamdata = data['streams'][0]
codec = streamdata.get('codec_name')
bitrate = int(streamdata.get('bit_rate', 0))
bitrate = max(round(bitrate/1000), 512)
return codec, bitrate
@staticmethod
def _probe_codec_fallback(source, executable: str = 'ffmpeg') -> Tuple[Optional[str], Optional[int]]:
args = [executable, '-hide_banner', '-i', source]
proc = subprocess.Popen(args, creationflags=CREATE_NO_WINDOW, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = proc.communicate(timeout=20)
output = out.decode('utf8')
codec = bitrate = None
codec_match = re.search(r"Stream #0.*?Audio: (\w+)", output)
if codec_match:
codec = codec_match.group(1)
br_match = re.search(r"(\d+) [kK]b/s", output)
if br_match:
bitrate = max(int(br_match.group(1)), 512)
return codec, bitrate
def read(self) -> bytes:
return next(self._packet_iter, b'')
def is_opus(self) -> bool:
return True
class PCMVolumeTransformer(AudioSource, Generic[AT]):
"""Transforms a previous :class:`AudioSource` to have volume controls.
This does not work on audio sources that have :meth:`AudioSource.is_opus`
set to ``True``.
Parameters
------------
original: :class:`AudioSource`
The original AudioSource to transform.
volume: :class:`float`
The initial volume to set it to.
See :attr:`volume` for more info.
Raises
-------
TypeError
Not an audio source.
ClientException
The audio source is opus encoded.
"""
def __init__(self, original: AT, volume: float = 1.0):
if not isinstance(original, AudioSource):
raise TypeError(f'expected AudioSource not {original.__class__.__name__}.')
if original.is_opus():
raise ClientException('AudioSource must not be Opus encoded.')
self.original: AT = original
self.volume = volume
@property
def volume(self) -> float:
"""Retrieves or sets the volume as a floating point percentage (e.g. ``1.0`` for 100%)."""
return self._volume
@volume.setter
def volume(self, value: float) -> None:
self._volume = max(value, 0.0)
def cleanup(self) -> None:
self.original.cleanup()
def read(self) -> bytes:
ret = self.original.read()
return audioop.mul(ret, 2, min(self._volume, 2.0))
class AudioPlayer(threading.Thread):
DELAY: float = OpusEncoder.FRAME_LENGTH / 1000.0
def __init__(self, source: AudioSource, client: VoiceClient, *, after=None):
threading.Thread.__init__(self)
self.daemon: bool = True
self.source: AudioSource = source
self.client: VoiceClient = client
self.after: Optional[Callable[[Optional[Exception]], Any]] = after
self._end: threading.Event = threading.Event()
self._resumed: threading.Event = threading.Event()
self._resumed.set() # we are not paused
self._current_error: Optional[Exception] = None
self._connected: threading.Event = client._connected
self._lock: threading.Lock = threading.Lock()
if after is not None and not callable(after):
raise TypeError('Expected a callable for the "after" parameter.')
def _do_run(self) -> None:
self.loops = 0
self._start = time.perf_counter()
# getattr lookup speed ups
play_audio = self.client.send_audio_packet
self._speak(True)
while not self._end.is_set():
# are we paused?
if not self._resumed.is_set():
# wait until we aren't
self._resumed.wait()
continue
# are we disconnected from voice?
if not self._connected.is_set():
# wait until we are connected
self._connected.wait()
# reset our internal data
self.loops = 0
self._start = time.perf_counter()
self.loops += 1
data = self.source.read()
if not data:
self.stop()
break
play_audio(data, encode=not self.source.is_opus())
next_time = self._start + self.DELAY * self.loops
delay = max(0, self.DELAY + (next_time - time.perf_counter()))
time.sleep(delay)
def run(self) -> None:
try:
self._do_run()
except Exception as exc:
self._current_error = exc
self.stop()
finally:
self.source.cleanup()
self._call_after()
def _call_after(self) -> None:
error = self._current_error
if self.after is not None:
try:
self.after(error)
except Exception as exc:
_log.exception('Calling the after function failed.')
exc.__context__ = error
traceback.print_exception(type(exc), exc, exc.__traceback__)
elif error:
msg = f'Exception in voice thread {self.name}'
_log.exception(msg, exc_info=error)
print(msg, file=sys.stderr)
traceback.print_exception(type(error), error, error.__traceback__)
def stop(self) -> None:
self._end.set()
self._resumed.set()
self._speak(False)
def pause(self, *, update_speaking: bool = True) -> None:
self._resumed.clear()
if update_speaking:
self._speak(False)
def resume(self, *, update_speaking: bool = True) -> None:
self.loops = 0
self._start = time.perf_counter()
self._resumed.set()
if update_speaking:
self._speak(True)
def is_playing(self) -> bool:
return self._resumed.is_set() and not self._end.is_set()
def is_paused(self) -> bool:
return not self._end.is_set() and not self._resumed.is_set()
def _set_source(self, source: AudioSource) -> None:
with self._lock:
self.pause(update_speaking=False)
self.source = source
self.resume(update_speaking=False)
def _speak(self, speaking: bool) -> None:
try:
asyncio.run_coroutine_threadsafe(self.client.ws.speak(speaking), self.client.loop)
except Exception as e:
_log.info("Speaking call in player failed: %s", e)
| 35.655313 | 130 | 0.615681 |
79579d765c19f2b3783df30d9608b57f71d3a6fe | 15,091 | py | Python | awaitress/parser.py | melthaw/awaitress | 4effce73479c57a0447a75e40ae3244aeace4d38 | [
"ZPL-2.1"
] | null | null | null | awaitress/parser.py | melthaw/awaitress | 4effce73479c57a0447a75e40ae3244aeace4d38 | [
"ZPL-2.1"
] | null | null | null | awaitress/parser.py | melthaw/awaitress | 4effce73479c57a0447a75e40ae3244aeace4d38 | [
"ZPL-2.1"
] | null | null | null | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""HTTP Request Parser
This server uses asyncore to accept connections and do initial
processing but threads to do work.
"""
import re
from io import BytesIO
from awaitress.buffers import OverflowableBuffer
from awaitress.compat import tostr, unquote_bytes_to_wsgi, urlparse
from awaitress.receiver import ChunkedReceiver, FixedStreamReceiver
from awaitress.utilities import (
BadRequest,
RequestEntityTooLarge,
RequestHeaderFieldsTooLarge,
ServerNotImplemented,
find_double_newline,
)
from .rfc7230 import HEADER_FIELD
class ParsingError(Exception):
pass
class TransferEncodingNotImplemented(Exception):
pass
class HTTPRequestParser(object):
"""A structure that collects the HTTP request.
Once the stream is completed, the instance is passed to
a server task constructor.
"""
completed = False # Set once request is completed.
empty = False # Set if no request was made.
expect_continue = False # client sent "Expect: 100-continue" header
headers_finished = False # True when headers have been read
header_plus = b""
chunked = False
content_length = 0
header_bytes_received = 0
body_bytes_received = 0
body_rcv = None
version = "1.0"
error = None
connection_close = False
# Other attributes: first_line, header, headers, command, uri, version,
# path, query, fragment
def __init__(self, adj):
"""
adj is an Adjustments object.
"""
# headers is a mapping containing keys translated to uppercase
# with dashes turned into underscores.
self.headers = {}
self.adj = adj
def received(self, data):
"""
Receives the HTTP stream for one request. Returns the number of
bytes consumed. Sets the completed flag once both the header and the
body have been received.
"""
if self.completed:
return 0 # Can't consume any more.
datalen = len(data)
br = self.body_rcv
if br is None:
# In header.
max_header = self.adj.max_request_header_size
s = self.header_plus + data
index = find_double_newline(s)
consumed = 0
if index >= 0:
# If the headers have ended, and we also have part of the body
# message in data we still want to validate we aren't going
# over our limit for received headers.
self.header_bytes_received += index
consumed = datalen - (len(s) - index)
else:
self.header_bytes_received += datalen
consumed = datalen
# If the first line + headers is over the max length, we return a
# RequestHeaderFieldsTooLarge error rather than continuing to
# attempt to parse the headers.
if self.header_bytes_received >= max_header:
self.parse_header(b"GET / HTTP/1.0\r\n")
self.error = RequestHeaderFieldsTooLarge(
"exceeds max_header of %s" % max_header
)
self.completed = True
return consumed
if index >= 0:
# Header finished.
header_plus = s[:index]
# Remove preceeding blank lines. This is suggested by
# https://tools.ietf.org/html/rfc7230#section-3.5 to support
# clients sending an extra CR LF after another request when
# using HTTP pipelining
header_plus = header_plus.lstrip()
if not header_plus:
self.empty = True
self.completed = True
else:
try:
self.parse_header(header_plus)
except ParsingError as e:
self.error = BadRequest(e.args[0])
self.completed = True
except TransferEncodingNotImplemented as e:
self.error = ServerNotImplemented(e.args[0])
self.completed = True
else:
if self.body_rcv is None:
# no content-length header and not a t-e: chunked
# request
self.completed = True
if self.content_length > 0:
max_body = self.adj.max_request_body_size
# we won't accept this request if the content-length
# is too large
if self.content_length >= max_body:
self.error = RequestEntityTooLarge(
"exceeds max_body of %s" % max_body
)
self.completed = True
self.headers_finished = True
return consumed
# Header not finished yet.
self.header_plus = s
return datalen
else:
# In body.
consumed = br.received(data)
self.body_bytes_received += consumed
max_body = self.adj.max_request_body_size
if self.body_bytes_received >= max_body:
# this will only be raised during t-e: chunked requests
self.error = RequestEntityTooLarge("exceeds max_body of %s" % max_body)
self.completed = True
elif br.error:
# garbage in chunked encoding input probably
self.error = br.error
self.completed = True
elif br.completed:
# The request (with the body) is ready to use.
self.completed = True
if self.chunked:
# We've converted the chunked transfer encoding request
# body into a normal request body, so we know its content
# length; set the header here. We already popped the
# TRANSFER_ENCODING header in parse_header, so this will
# appear to the client to be an entirely non-chunked HTTP
# request with a valid content-length.
self.headers["CONTENT_LENGTH"] = str(br.__len__())
return consumed
def parse_header(self, header_plus):
"""
Parses the header_plus block of text (the headers plus the
first line of the request).
"""
index = header_plus.find(b"\r\n")
if index >= 0:
first_line = header_plus[:index].rstrip()
header = header_plus[index + 2 :]
else:
raise ParsingError("HTTP message header invalid")
if b"\r" in first_line or b"\n" in first_line:
raise ParsingError("Bare CR or LF found in HTTP message")
self.first_line = first_line # for testing
lines = get_header_lines(header)
headers = self.headers
for line in lines:
header = HEADER_FIELD.match(line)
if not header:
raise ParsingError("Invalid header")
key, value = header.group("name", "value")
if b"_" in key:
# TODO(xistence): Should we drop this request instead?
continue
# Only strip off whitespace that is considered valid whitespace by
# RFC7230, don't strip the rest
value = value.strip(b" \t")
key1 = tostr(key.upper().replace(b"-", b"_"))
# If a header already exists, we append subsequent values
# separated by a comma. Applications already need to handle
# the comma separated values, as HTTP front ends might do
# the concatenation for you (behavior specified in RFC2616).
try:
headers[key1] += tostr(b", " + value)
except KeyError:
headers[key1] = tostr(value)
# command, uri, version will be bytes
command, uri, version = crack_first_line(first_line)
version = tostr(version)
command = tostr(command)
self.command = command
self.version = version
(
self.proxy_scheme,
self.proxy_netloc,
self.path,
self.query,
self.fragment,
) = split_uri(uri)
self.url_scheme = self.adj.url_scheme
connection = headers.get("CONNECTION", "")
if version == "1.0":
if connection.lower() != "keep-alive":
self.connection_close = True
if version == "1.1":
# since the server buffers data from chunked transfers and clients
# never need to deal with chunked requests, downstream clients
# should not see the HTTP_TRANSFER_ENCODING header; we pop it
# here
te = headers.pop("TRANSFER_ENCODING", "")
# NB: We can not just call bare strip() here because it will also
# remove other non-printable characters that we explicitly do not
# want removed so that if someone attempts to smuggle a request
# with these characters we don't fall prey to it.
#
# For example \x85 is stripped by default, but it is not considered
# valid whitespace to be stripped by RFC7230.
encodings = [
encoding.strip(" \t").lower() for encoding in te.split(",") if encoding
]
for encoding in encodings:
# Out of the transfer-codings listed in
# https://tools.ietf.org/html/rfc7230#section-4 we only support
# chunked at this time.
# Note: the identity transfer-coding was removed in RFC7230:
# https://tools.ietf.org/html/rfc7230#appendix-A.2 and is thus
# not supported
if encoding not in {"chunked"}:
raise TransferEncodingNotImplemented(
"Transfer-Encoding requested is not supported."
)
if encodings and encodings[-1] == "chunked":
self.chunked = True
buf = OverflowableBuffer(self.adj.inbuf_overflow)
self.body_rcv = ChunkedReceiver(buf)
elif encodings: # pragma: nocover
raise TransferEncodingNotImplemented(
"Transfer-Encoding requested is not supported."
)
expect = headers.get("EXPECT", "").lower()
self.expect_continue = expect == "100-continue"
if connection.lower() == "close":
self.connection_close = True
if not self.chunked:
try:
cl = int(headers.get("CONTENT_LENGTH", 0))
except ValueError:
raise ParsingError("Content-Length is invalid")
self.content_length = cl
if cl > 0:
buf = OverflowableBuffer(self.adj.inbuf_overflow)
self.body_rcv = FixedStreamReceiver(cl, buf)
def get_body_stream(self):
body_rcv = self.body_rcv
if body_rcv is not None:
return body_rcv.getfile()
else:
return BytesIO()
def close(self):
body_rcv = self.body_rcv
if body_rcv is not None:
body_rcv.getbuf().close()
def split_uri(uri):
# urlsplit handles byte input by returning bytes on py3, so
# scheme, netloc, path, query, and fragment are bytes
scheme = netloc = path = query = fragment = b""
# urlsplit below will treat this as a scheme-less netloc, thereby losing
# the original intent of the request. Here we shamelessly stole 4 lines of
# code from the CPython stdlib to parse out the fragment and query but
# leave the path alone. See
# https://github.com/python/cpython/blob/8c9e9b0cd5b24dfbf1424d1f253d02de80e8f5ef/Lib/urllib/parse.py#L465-L468
# and https://github.com/Pylons/waitress/issues/260
if uri[:2] == b"//":
path = uri
if b"#" in path:
path, fragment = path.split(b"#", 1)
if b"?" in path:
path, query = path.split(b"?", 1)
else:
try:
scheme, netloc, path, query, fragment = urlparse.urlsplit(uri)
except UnicodeError:
raise ParsingError("Bad URI")
return (
tostr(scheme),
tostr(netloc),
unquote_bytes_to_wsgi(path),
tostr(query),
tostr(fragment),
)
def get_header_lines(header):
"""
Splits the header into lines, putting multi-line headers together.
"""
r = []
lines = header.split(b"\r\n")
for line in lines:
if not line:
continue
if b"\r" in line or b"\n" in line:
raise ParsingError('Bare CR or LF found in header line "%s"' % tostr(line))
if line.startswith((b" ", b"\t")):
if not r:
# https://corte.si/posts/code/pathod/pythonservers/index.html
raise ParsingError('Malformed header line "%s"' % tostr(line))
r[-1] += line
else:
r.append(line)
return r
first_line_re = re.compile(
b"([^ ]+) "
b"((?:[^ :?#]+://[^ ?#/]*(?:[0-9]{1,5})?)?[^ ]+)"
b"(( HTTP/([0-9.]+))$|$)"
)
def crack_first_line(line):
m = first_line_re.match(line)
if m is not None and m.end() == len(line):
if m.group(3):
version = m.group(5)
else:
version = b""
method = m.group(1)
# the request methods that are currently defined are all uppercase:
# https://www.iana.org/assignments/http-methods/http-methods.xhtml and
# the request method is case sensitive according to
# https://tools.ietf.org/html/rfc7231#section-4.1
# By disallowing anything but uppercase methods we save poor
# unsuspecting souls from sending lowercase HTTP methods to awaitress
# and having the request complete, while servers like nginx drop the
# request onto the floor.
if method != method.upper():
raise ParsingError('Malformed HTTP method "%s"' % tostr(method))
uri = m.group(2)
return method, uri, version
else:
return b"", b"", b""
| 36.363855 | 115 | 0.561129 |
79579d9b8e697999fbdab436c031b76700a72cd8 | 1,273 | py | Python | data/p4VQE/R4/benchmark/startPyquil810.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p4VQE/R4/benchmark/startPyquil810.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p4VQE/R4/benchmark/startPyquil810.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=4
# total number=15
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(1) # number=2
prog += H(2) # number=3
prog += H(3) # number=4
prog += Y(3) # number=5
prog += SWAP(1,0) # number=6
prog += SWAP(1,0) # number=7
prog += Y(3) # number=8
prog += CNOT(2,0) # number=12
prog += Z(2) # number=13
prog += CNOT(2,0) # number=14
prog += Y(3) # number=9
prog += RX(0.5466371217246238,1) # number=10
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil810.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| 23.574074 | 64 | 0.600943 |
7957a0915014afad0df012923cbbc3d50bedf99e | 1,112 | py | Python | alchemy_provider/utils/aliased_manager.py | SherkhanSyzdykov/alchemy_provider | d964f8dcfb59f803e9d5f69316eef5199bf71529 | [
"MIT"
] | 1 | 2022-03-30T22:12:50.000Z | 2022-03-30T22:12:50.000Z | alchemy_provider/utils/aliased_manager.py | SherkhanSyzdykov/alchemy_provider | d964f8dcfb59f803e9d5f69316eef5199bf71529 | [
"MIT"
] | null | null | null | alchemy_provider/utils/aliased_manager.py | SherkhanSyzdykov/alchemy_provider | d964f8dcfb59f803e9d5f69316eef5199bf71529 | [
"MIT"
] | null | null | null | from uuid import UUID
from typing import Dict
from sqlalchemy.orm import DeclarativeMeta
from sqlalchemy.orm.util import AliasedClass
from .alchemy_orm import make_aliased_mapper
class AliasedManager:
__aliased_map: Dict[UUID, Dict[str, AliasedClass]] = dict()
@classmethod
def get_or_create(
cls,
uuid: UUID,
mapper: DeclarativeMeta,
field_name: str
) -> AliasedClass:
if cls.is_exist(uuid=uuid, field_name=field_name):
return cls.__aliased_map[uuid][field_name]
aliased_mapper = make_aliased_mapper(
mapper=mapper,
field_name=field_name
)
cls.__aliased_map[uuid] = cls.__aliased_map.get(uuid, {})
cls.__aliased_map[uuid][field_name] = aliased_mapper
return aliased_mapper
@classmethod
def delete(
cls,
uuid: UUID,
):
cls.__aliased_map.pop(uuid, None)
@classmethod
def is_exist(
cls,
uuid: UUID,
field_name: str,
) -> bool:
return bool(cls.__aliased_map.get(uuid, {}).get(field_name, False))
| 25.272727 | 75 | 0.638489 |
7957a1b74ccbc6188d648537c8c4be33e809b019 | 7,549 | py | Python | tests/test_config.py | kasium/alembic | af7963889abffe2ab8dc640d4fdcb8cea6d53942 | [
"MIT"
] | 1,324 | 2018-11-27T05:44:41.000Z | 2022-03-30T19:49:20.000Z | tests/test_config.py | kasium/alembic | af7963889abffe2ab8dc640d4fdcb8cea6d53942 | [
"MIT"
] | 452 | 2018-11-27T22:43:38.000Z | 2022-03-28T04:33:43.000Z | tests/test_config.py | kasium/alembic | af7963889abffe2ab8dc640d4fdcb8cea6d53942 | [
"MIT"
] | 159 | 2018-11-29T18:46:15.000Z | 2022-03-28T16:34:19.000Z | #!coding: utf-8
import os
import tempfile
from alembic import config
from alembic import testing
from alembic import util
from alembic.migration import MigrationContext
from alembic.operations import Operations
from alembic.script import ScriptDirectory
from alembic.testing import assert_raises_message
from alembic.testing import eq_
from alembic.testing import mock
from alembic.testing.assertions import expect_raises_message
from alembic.testing.env import _no_sql_testing_config
from alembic.testing.env import _write_config_file
from alembic.testing.env import clear_staging_env
from alembic.testing.env import staging_env
from alembic.testing.fixtures import capture_db
from alembic.testing.fixtures import TestBase
class FileConfigTest(TestBase):
def test_config_args(self):
cfg = _write_config_file(
"""
[alembic]
migrations = %(base_path)s/db/migrations
"""
)
test_cfg = config.Config(
cfg.config_file_name, config_args=dict(base_path="/home/alembic")
)
eq_(
test_cfg.get_section_option("alembic", "migrations"),
"/home/alembic/db/migrations",
)
def tearDown(self):
clear_staging_env()
class ConfigTest(TestBase):
def test_config_no_file_main_option(self):
cfg = config.Config()
cfg.set_main_option("url", "postgresql://foo/bar")
eq_(cfg.get_main_option("url"), "postgresql://foo/bar")
def test_config_no_file_section_option(self):
cfg = config.Config()
cfg.set_section_option("foo", "url", "postgresql://foo/bar")
eq_(cfg.get_section_option("foo", "url"), "postgresql://foo/bar")
cfg.set_section_option("foo", "echo", "True")
eq_(cfg.get_section_option("foo", "echo"), "True")
def test_config_set_main_option_percent(self):
cfg = config.Config()
cfg.set_main_option("foob", "a %% percent")
eq_(cfg.get_main_option("foob"), "a % percent")
def test_config_set_section_option_percent(self):
cfg = config.Config()
cfg.set_section_option("some_section", "foob", "a %% percent")
eq_(cfg.get_section_option("some_section", "foob"), "a % percent")
def test_config_set_section_option_interpolation(self):
cfg = config.Config()
cfg.set_section_option("some_section", "foob", "foob_value")
cfg.set_section_option("some_section", "bar", "bar with %(foob)s")
eq_(
cfg.get_section_option("some_section", "bar"),
"bar with foob_value",
)
def test_standalone_op(self):
eng, buf = capture_db()
env = MigrationContext.configure(eng)
op = Operations(env)
op.alter_column("t", "c", nullable=True)
eq_(buf, ["ALTER TABLE t ALTER COLUMN c DROP NOT NULL"])
def test_no_script_error(self):
cfg = config.Config()
assert_raises_message(
util.CommandError,
"No 'script_location' key found in configuration.",
ScriptDirectory.from_config,
cfg,
)
def test_attributes_attr(self):
m1 = mock.Mock()
cfg = config.Config()
cfg.attributes["connection"] = m1
eq_(cfg.attributes["connection"], m1)
def test_attributes_construtor(self):
m1 = mock.Mock()
m2 = mock.Mock()
cfg = config.Config(attributes={"m1": m1})
cfg.attributes["connection"] = m2
eq_(cfg.attributes, {"m1": m1, "connection": m2})
@testing.combinations(
(
"legacy raw string 1",
None,
"/foo",
["/foo"],
),
(
"legacy raw string 2",
None,
"/foo /bar",
["/foo", "/bar"],
),
(
"legacy raw string 3",
"space",
"/foo",
["/foo"],
),
(
"legacy raw string 4",
"space",
"/foo /bar",
["/foo", "/bar"],
),
(
"Linux pathsep 1",
":",
"/Project A",
["/Project A"],
),
(
"Linux pathsep 2",
":",
"/Project A:/Project B",
["/Project A", "/Project B"],
),
(
"Windows pathsep 1",
";",
r"C:\Project A",
[r"C:\Project A"],
),
(
"Windows pathsep 2",
";",
r"C:\Project A;C:\Project B",
[r"C:\Project A", r"C:\Project B"],
),
(
"os pathsep",
"os",
r"path_number_one%(sep)spath_number_two%(sep)s"
% {"sep": os.pathsep},
[r"path_number_one", r"path_number_two"],
),
(
"invalid pathsep 2",
"|",
"/foo|/bar",
ValueError(
"'|' is not a valid value for version_path_separator; "
"expected 'space', 'os', ':', ';'"
),
),
id_="iaaa",
argnames="separator, string_value, expected_result",
)
def test_version_locations(self, separator, string_value, expected_result):
cfg = config.Config()
if separator is not None:
cfg.set_main_option(
"version_path_separator",
separator,
)
cfg.set_main_option("script_location", tempfile.gettempdir())
cfg.set_main_option("version_locations", string_value)
if isinstance(expected_result, ValueError):
with expect_raises_message(ValueError, expected_result.args[0]):
ScriptDirectory.from_config(cfg)
else:
s = ScriptDirectory.from_config(cfg)
eq_(s.version_locations, expected_result)
class StdoutOutputEncodingTest(TestBase):
def test_plain(self):
stdout = mock.Mock(encoding="latin-1")
cfg = config.Config(stdout=stdout)
cfg.print_stdout("test %s %s", "x", "y")
eq_(
stdout.mock_calls,
[mock.call.write("test x y"), mock.call.write("\n")],
)
def test_utf8_unicode(self):
stdout = mock.Mock(encoding="latin-1")
cfg = config.Config(stdout=stdout)
cfg.print_stdout("méil %s %s", "x", "y")
eq_(
stdout.mock_calls,
[mock.call.write("méil x y"), mock.call.write("\n")],
)
def test_ascii_unicode(self):
stdout = mock.Mock(encoding=None)
cfg = config.Config(stdout=stdout)
cfg.print_stdout("méil %s %s", "x", "y")
eq_(
stdout.mock_calls,
[mock.call.write("m?il x y"), mock.call.write("\n")],
)
def test_only_formats_output_with_args(self):
stdout = mock.Mock(encoding=None)
cfg = config.Config(stdout=stdout)
cfg.print_stdout("test 3%")
eq_(
stdout.mock_calls,
[mock.call.write("test 3%"), mock.call.write("\n")],
)
class TemplateOutputEncodingTest(TestBase):
def setUp(self):
staging_env()
self.cfg = _no_sql_testing_config()
def tearDown(self):
clear_staging_env()
def test_default(self):
script = ScriptDirectory.from_config(self.cfg)
eq_(script.output_encoding, "utf-8")
def test_setting(self):
self.cfg.set_main_option("output_encoding", "latin-1")
script = ScriptDirectory.from_config(self.cfg)
eq_(script.output_encoding, "latin-1")
| 29.837945 | 79 | 0.567227 |
7957a21c6d5fa346adfbf797ac8814b172632ccf | 8,991 | py | Python | tests/admin/controller/test_admin_auth_services.py | pic-ed/LibrarySimplifies | 16dd6459b794fb805b7a5e10758b3c151356908a | [
"Apache-2.0"
] | null | null | null | tests/admin/controller/test_admin_auth_services.py | pic-ed/LibrarySimplifies | 16dd6459b794fb805b7a5e10758b3c151356908a | [
"Apache-2.0"
] | null | null | null | tests/admin/controller/test_admin_auth_services.py | pic-ed/LibrarySimplifies | 16dd6459b794fb805b7a5e10758b3c151356908a | [
"Apache-2.0"
] | null | null | null | from nose.tools import (
set_trace,
eq_,
assert_raises
)
import flask
import json
from werkzeug import MultiDict
from api.admin.exceptions import *
from core.model import (
AdminRole,
ConfigurationSetting,
create,
ExternalIntegration,
get_one,
)
from test_controller import SettingsControllerTest
class TestAdminAuthServices(SettingsControllerTest):
def test_admin_auth_services_get_with_no_services(self):
with self.request_context_with_admin("/"):
response = self.manager.admin_auth_services_controller.process_admin_auth_services()
eq_(response.get("admin_auth_services"), [])
# All the protocols in ExternalIntegration.ADMIN_AUTH_PROTOCOLS
# are supported by the admin interface.
eq_(sorted([p.get("name") for p in response.get("protocols")]),
sorted(ExternalIntegration.ADMIN_AUTH_PROTOCOLS))
self.admin.remove_role(AdminRole.SYSTEM_ADMIN)
self._db.flush()
assert_raises(AdminNotAuthorized,
self.manager.admin_auth_services_controller.process_admin_auth_services)
def test_admin_auth_services_get_with_google_oauth_service(self):
auth_service, ignore = create(
self._db, ExternalIntegration,
protocol=ExternalIntegration.GOOGLE_OAUTH,
goal=ExternalIntegration.ADMIN_AUTH_GOAL
)
auth_service.url = "http://oauth.test"
auth_service.username = "user"
auth_service.password = "pass"
auth_service.libraries += [self._default_library]
ConfigurationSetting.for_library_and_externalintegration(
self._db, "domains", self._default_library, auth_service
).value = json.dumps(["nypl.org"])
with self.request_context_with_admin("/"):
response = self.manager.admin_auth_services_controller.process_admin_auth_services()
[service] = response.get("admin_auth_services")
eq_(auth_service.id, service.get("id"))
eq_(auth_service.name, service.get("name"))
eq_(auth_service.protocol, service.get("protocol"))
eq_(auth_service.url, service.get("settings").get("url"))
eq_(auth_service.username, service.get("settings").get("username"))
eq_(auth_service.password, service.get("settings").get("password"))
[library_info] = service.get("libraries")
eq_(self._default_library.short_name, library_info.get("short_name"))
eq_(["nypl.org"], library_info.get("domains"))
def test_admin_auth_services_post_errors(self):
with self.request_context_with_admin("/", method="POST"):
flask.request.form = MultiDict([
("protocol", "Unknown"),
])
response = self.manager.admin_auth_services_controller.process_admin_auth_services()
eq_(response, UNKNOWN_PROTOCOL)
with self.request_context_with_admin("/", method="POST"):
flask.request.form = MultiDict([])
response = self.manager.admin_auth_services_controller.process_admin_auth_services()
eq_(response, NO_PROTOCOL_FOR_NEW_SERVICE)
with self.request_context_with_admin("/", method="POST"):
flask.request.form = MultiDict([
("id", "1234"),
])
response = self.manager.admin_auth_services_controller.process_admin_auth_services()
eq_(response, MISSING_SERVICE)
auth_service, ignore = create(
self._db, ExternalIntegration,
protocol=ExternalIntegration.GOOGLE_OAUTH,
goal=ExternalIntegration.ADMIN_AUTH_GOAL
)
with self.request_context_with_admin("/", method="POST"):
flask.request.form = MultiDict([
("id", str(auth_service.id)),
])
response = self.manager.admin_auth_services_controller.process_admin_auth_services()
eq_(response, CANNOT_CHANGE_PROTOCOL)
with self.request_context_with_admin("/", method="POST"):
flask.request.form = MultiDict([
("protocol", "Google OAuth"),
])
response = self.manager.admin_auth_services_controller.process_admin_auth_services()
eq_(response.uri, INCOMPLETE_CONFIGURATION.uri)
self.admin.remove_role(AdminRole.SYSTEM_ADMIN)
self._db.flush()
with self.request_context_with_admin("/", method="POST"):
flask.request.form = MultiDict([
("name", "oauth"),
("protocol", "Google OAuth"),
("url", "url"),
("username", "username"),
("password", "password"),
("domains", "nypl.org"),
])
assert_raises(AdminNotAuthorized,
self.manager.admin_auth_services_controller.process_admin_auth_services)
def test_admin_auth_services_post_create(self):
with self.request_context_with_admin("/", method="POST"):
flask.request.form = MultiDict([
("name", "oauth"),
("protocol", "Google OAuth"),
("url", "http://url2"),
("username", "username"),
("password", "password"),
("libraries", json.dumps([{ "short_name": self._default_library.short_name,
"domains": ["nypl.org", "gmail.com"] }])),
])
response = self.manager.admin_auth_services_controller.process_admin_auth_services()
eq_(response.status_code, 201)
# The auth service was created and configured properly.
auth_service = ExternalIntegration.admin_authentication(self._db)
eq_(auth_service.protocol, response.response[0])
eq_("oauth", auth_service.name)
eq_("http://url2", auth_service.url)
eq_("username", auth_service.username)
eq_("password", auth_service.password)
eq_([self._default_library], auth_service.libraries)
setting = ConfigurationSetting.for_library_and_externalintegration(
self._db, "domains", self._default_library, auth_service
)
eq_("domains", setting.key)
eq_(["nypl.org", "gmail.com"], json.loads(setting.value))
def test_admin_auth_services_post_google_oauth_edit(self):
# The auth service exists.
auth_service, ignore = create(
self._db, ExternalIntegration,
protocol=ExternalIntegration.GOOGLE_OAUTH,
goal=ExternalIntegration.ADMIN_AUTH_GOAL
)
auth_service.url = "url"
auth_service.username = "user"
auth_service.password = "pass"
auth_service.libraries += [self._default_library]
setting = ConfigurationSetting.for_library_and_externalintegration(
self._db, "domains", self._default_library, auth_service)
setting.value = json.dumps(["library1.org"])
with self.request_context_with_admin("/", method="POST"):
flask.request.form = MultiDict([
("name", "oauth"),
("protocol", "Google OAuth"),
("url", "http://url2"),
("username", "user2"),
("password", "pass2"),
("libraries", json.dumps([{ "short_name": self._default_library.short_name,
"domains": ["library2.org"] }])),
])
response = self.manager.admin_auth_services_controller.process_admin_auth_services()
eq_(response.status_code, 200)
eq_(auth_service.protocol, response.response[0])
eq_("oauth", auth_service.name)
eq_("http://url2", auth_service.url)
eq_("user2", auth_service.username)
eq_("domains", setting.key)
eq_(["library2.org"], json.loads(setting.value))
def test_admin_auth_service_delete(self):
auth_service, ignore = create(
self._db, ExternalIntegration,
protocol=ExternalIntegration.GOOGLE_OAUTH,
goal=ExternalIntegration.ADMIN_AUTH_GOAL
)
auth_service.url = "url"
auth_service.username = "user"
auth_service.password = "pass"
auth_service.set_setting("domains", json.dumps(["library1.org"]))
with self.request_context_with_admin("/", method="DELETE"):
self.admin.remove_role(AdminRole.SYSTEM_ADMIN)
assert_raises(AdminNotAuthorized,
self.manager.admin_auth_services_controller.process_delete,
auth_service.protocol)
self.admin.add_role(AdminRole.SYSTEM_ADMIN)
response = self.manager.admin_auth_services_controller.process_delete(auth_service.protocol)
eq_(response.status_code, 200)
service = get_one(self._db, ExternalIntegration, id=auth_service.id)
eq_(None, service)
| 44.073529 | 104 | 0.626404 |
7957a27315478af9ee087bae6758652cabeb578e | 3,149 | py | Python | apps/portalbase/macros/page/actorsdocs/3_main.py | threefoldtech/jumpscale_portal_classic | d14fe4a17c0486df7a87d149e900746654091fda | [
"Apache-2.0"
] | 1 | 2017-06-07T08:12:09.000Z | 2017-06-07T08:12:09.000Z | apps/portalbase/macros/page/actorsdocs/3_main.py | threefoldtech/jumpscale_portal_classic | d14fe4a17c0486df7a87d149e900746654091fda | [
"Apache-2.0"
] | 36 | 2017-05-18T10:54:44.000Z | 2019-03-27T11:24:20.000Z | apps/portalbase/macros/page/actorsdocs/3_main.py | threefoldtech/jumpscale_portal_classic | d14fe4a17c0486df7a87d149e900746654091fda | [
"Apache-2.0"
] | 1 | 2018-06-12T05:18:01.000Z | 2018-06-12T05:18:01.000Z | def main(j, args, params, tags, tasklet):
page = args.page
actors = args.tags.tagGet('actors', '')
group = args.tags.tagGet('group', '')
page.addCSS('/jslib/swagger/css/typography.css', media='screen')
page.addCSS('/jslib/swagger/css/reset.css', media='screen')
page.addCSS('/jslib/swagger/css/screen.css', media='screen')
page.addCSS('/jslib/swagger/css/typography.css', media='print')
page.addCSS('/jslib/swagger/css/reset.css', media='print')
page.addCSS('/jslib/swagger/css/screen.css', media='print')
page.addJS('/jslib/swagger/lib/jsoneditor.min.js')
page.addJS('/jslib/swagger/lib/swagger-oauth.js')
page.addJS('/jslib/swagger/lib/jquery-1.8.0.min.js')
page.addJS('/jslib/swagger/lib/jquery.slideto.min.js')
page.addJS('/jslib/swagger/lib/jquery.wiggle.min.js')
page.addJS('/jslib/swagger/lib/jquery.ba-bbq.min.js')
page.addJS('/jslib/swagger/lib/handlebars-2.0.0.js')
page.addJS('/jslib/swagger/lib//lodash.min.js')
page.addJS('/jslib/swagger/lib/backbone-min.js')
page.addJS('/jslib/swagger/swagger-ui.min.js')
page.addJS('/jslib/swagger/lib/highlight.9.1.0.pack.js')
page.addJS('/jslib/swagger/lib/highlight.9.1.0.pack_extended.js')
page.addJS('/jslib/swagger/lib/marked.js')
page.addJS('/jslib/swagger/lib/object-assign-pollyfill.js')
page.addJS('/jslib/swagger/lib/js-yaml.min.js')
head = """
<title>Swagger UI</title>
<script type="text/javascript">
$(function () {
window.swaggerUi = new SwaggerUi({
url:"/restmachine/system/docgenerator/prepareCatalog?actors=%s&group=%s&format=jsonraw",
validatorUrl: null,
dom_id:"swagger-ui-container",
supportHeaderParams: false,
supportedSubmitMethods: ['get', 'post', 'put'],
onComplete: function(swaggerApi, swaggerUi){
if(console) {
console.log("Loaded SwaggerUI")
console.log(swaggerApi);
console.log(swaggerUi);
}
$('pre code').each(function(i, e) {hljs.highlightBlock(e)});
},
onFailure: function(data) {
if(console) {
console.log("Unable to Load SwaggerUI");
console.log(data);
}
},
docExpansion: "none",
sorter : "alpha",
apisSorter : "alpha",
operationsSorter: "alpha"
});
window.swaggerUi.load();
});
</script>
""" % (j.portal.tools.html.htmlfactory.escape(actors), j.portal.tools.html.htmlfactory.escape(group))
body = """
<div class="swagger-section">
<div id="message-bar" class="swagger-ui-wrap">
</div>
<div id="swagger-ui-container" class="swagger-ui-wrap">
</div>
</div>
"""
page.addHTMLHeader(head)
page.addHTMLBody(body)
params.result = page
return params
def match(j, args, params, tags, tasklet):
return True
| 36.195402 | 105 | 0.576691 |
7957a28b610b22d970101b4c572fba0f94bc3f5d | 311 | py | Python | data/multilingual/Deva.MAI/Sun-ExtA_16/pdf_to_json_test_Deva.MAI_Sun-ExtA_16.py | antoinecarme/pdf_to_json_tests | d57a024fde862e698d916a1178f285883d7a3b2f | [
"BSD-3-Clause"
] | 1 | 2021-09-19T19:47:35.000Z | 2021-09-19T19:47:35.000Z | data/multilingual/Deva.MAI/Sun-ExtA_16/pdf_to_json_test_Deva.MAI_Sun-ExtA_16.py | antoinecarme/pdf_to_json_tests | d57a024fde862e698d916a1178f285883d7a3b2f | [
"BSD-3-Clause"
] | null | null | null | data/multilingual/Deva.MAI/Sun-ExtA_16/pdf_to_json_test_Deva.MAI_Sun-ExtA_16.py | antoinecarme/pdf_to_json_tests | d57a024fde862e698d916a1178f285883d7a3b2f | [
"BSD-3-Clause"
] | null | null | null | import pdf_to_json as p2j
import json
url = "file:data/multilingual/Deva.MAI/Sun-ExtA_16/udhr_Deva.MAI_Sun-ExtA_16.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
| 31.1 | 81 | 0.810289 |
7957a2d36cfa0ab2f49a9b6329bd1b349f95c595 | 8,134 | py | Python | hmtl/models/hmtl.py | alasdairtran/hmtl | 80daa2a1e1ba21c775f0352d11b7e625384c0ad1 | [
"MIT"
] | null | null | null | hmtl/models/hmtl.py | alasdairtran/hmtl | 80daa2a1e1ba21c775f0352d11b7e625384c0ad1 | [
"MIT"
] | null | null | null | hmtl/models/hmtl.py | alasdairtran/hmtl | 80daa2a1e1ba21c775f0352d11b7e625384c0ad1 | [
"MIT"
] | null | null | null | # coding: utf-8
import os
import sys
import logging
from typing import Dict
from overrides import overrides
import torch
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import Seq2SeqEncoder, TextFieldEmbedder
from allennlp.nn import RegularizerApplicator, InitializerApplicator
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.modules import FeedForward
from allennlp.models.crf_tagger import CrfTagger
from hmtl.modules.text_field_embedders import ShortcutConnectTextFieldEmbedder
from hmtl.models.relation_extraction import RelationExtractor
from hmtl.models import CoreferenceCustom
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
@Model.register("hmtl")
class HMTL(Model):
"""
A class that implement the full HMTL model.
Parameters
----------
vocab: ``allennlp.data.Vocabulary``, required.
The vocabulary fitted on the data.
params: ``allennlp.common.Params``, required
Configuration parameters for the multi-task model.
regularizer: ``allennlp.nn.RegularizerApplicator``, optional (default = None)
A reguralizer to apply to the model's layers.
"""
def __init__(self, vocab: Vocabulary, params: Params, regularizer: RegularizerApplicator = None):
super(HMTL, self).__init__(vocab=vocab, regularizer=regularizer)
# Base text Field Embedder
text_field_embedder_params = params.pop("text_field_embedder")
text_field_embedder = BasicTextFieldEmbedder.from_params(vocab=vocab, params=text_field_embedder_params)
self._text_field_embedder = text_field_embedder
############
# NER Stuffs
############
ner_params = params.pop("ner")
# Encoder
encoder_ner_params = ner_params.pop("encoder")
encoder_ner = Seq2SeqEncoder.from_params(encoder_ner_params)
self._encoder_ner = encoder_ner
# Tagger NER - CRF Tagger
tagger_ner_params = ner_params.pop("tagger")
tagger_ner = CrfTagger(
vocab=vocab,
text_field_embedder=self._text_field_embedder,
encoder=self._encoder_ner,
label_namespace=tagger_ner_params.pop("label_namespace", "labels"),
label_encoding=tagger_ner_params.pop("label_encoding", None),
dropout=tagger_ner_params.pop("dropout", None),
regularizer=regularizer,
)
self._tagger_ner = tagger_ner
############
# EMD Stuffs
############
emd_params = params.pop("emd")
# Encoder
encoder_emd_params = emd_params.pop("encoder")
encoder_emd = Seq2SeqEncoder.from_params(encoder_emd_params)
self._encoder_emd = encoder_emd
shortcut_text_field_embedder = ShortcutConnectTextFieldEmbedder(
base_text_field_embedder=self._text_field_embedder, previous_encoders=[self._encoder_ner]
)
self._shortcut_text_field_embedder = shortcut_text_field_embedder
# Tagger: EMD - CRF Tagger
tagger_emd_params = emd_params.pop("tagger")
tagger_emd = CrfTagger(
vocab=vocab,
text_field_embedder=self._shortcut_text_field_embedder,
encoder=self._encoder_emd,
label_namespace=tagger_emd_params.pop("label_namespace", "labels"),
label_encoding=tagger_emd_params.pop("label_encoding", None),
dropout=tagger_ner_params.pop("dropout", None),
regularizer=regularizer,
)
self._tagger_emd = tagger_emd
############################
# Relation Extraction Stuffs
############################
relation_params = params.pop("relation")
# Encoder
encoder_relation_params = relation_params.pop("encoder")
encoder_relation = Seq2SeqEncoder.from_params(encoder_relation_params)
self._encoder_relation = encoder_relation
shortcut_text_field_embedder_relation = ShortcutConnectTextFieldEmbedder(
base_text_field_embedder=self._text_field_embedder, previous_encoders=[self._encoder_ner, self._encoder_emd]
)
self._shortcut_text_field_embedder_relation = shortcut_text_field_embedder_relation
# Tagger: Relation
tagger_relation_params = relation_params.pop("tagger")
tagger_relation = RelationExtractor(
vocab=vocab,
text_field_embedder=self._shortcut_text_field_embedder_relation,
context_layer=self._encoder_relation,
d=tagger_relation_params.pop_int("d"),
l=tagger_relation_params.pop_int("l"),
n_classes=tagger_relation_params.pop("n_classes"),
activation=tagger_relation_params.pop("activation"),
)
self._tagger_relation = tagger_relation
##############
# Coref Stuffs
##############
coref_params = params.pop("coref")
# Encoder
encoder_coref_params = coref_params.pop("encoder")
encoder_coref = Seq2SeqEncoder.from_params(encoder_coref_params)
self._encoder_coref = encoder_coref
shortcut_text_field_embedder_coref = ShortcutConnectTextFieldEmbedder(
base_text_field_embedder=self._text_field_embedder, previous_encoders=[self._encoder_ner, self._encoder_emd]
)
self._shortcut_text_field_embedder_coref = shortcut_text_field_embedder_coref
# Tagger: Coreference
tagger_coref_params = coref_params.pop("tagger")
eval_on_gold_mentions = tagger_coref_params.pop_bool("eval_on_gold_mentions", False)
init_params = tagger_coref_params.pop("initializer", None)
initializer = (
InitializerApplicator.from_params(init_params) if init_params is not None else InitializerApplicator()
)
tagger_coref = CoreferenceCustom(
vocab=vocab,
text_field_embedder=self._shortcut_text_field_embedder_coref,
context_layer=self._encoder_coref,
mention_feedforward=FeedForward.from_params(tagger_coref_params.pop("mention_feedforward")),
antecedent_feedforward=FeedForward.from_params(tagger_coref_params.pop("antecedent_feedforward")),
feature_size=tagger_coref_params.pop_int("feature_size"),
max_span_width=tagger_coref_params.pop_int("max_span_width"),
spans_per_word=tagger_coref_params.pop_float("spans_per_word"),
max_antecedents=tagger_coref_params.pop_int("max_antecedents"),
lexical_dropout=tagger_coref_params.pop_float("lexical_dropout", 0.2),
initializer=initializer,
regularizer=regularizer,
eval_on_gold_mentions=eval_on_gold_mentions,
)
self._tagger_coref = tagger_coref
if eval_on_gold_mentions:
self._tagger_coref._eval_on_gold_mentions = True
logger.info("Multi-Task Learning Model has been instantiated.")
@overrides
def forward(self, tensor_batch, for_training: bool = False, task_name: str = "ner") -> Dict[str, torch.Tensor]:
# pylint: disable=arguments-differ
tagger = getattr(self, "_tagger_%s" % task_name)
if task_name == "coref" and tagger._eval_on_gold_mentions:
if for_training:
tagger._use_gold_mentions = False
else:
tagger._use_gold_mentions = True
return tagger.forward(**tensor_batch)
@overrides
def get_metrics(self, task_name: str, reset: bool = False, full: bool = False) -> Dict[str, float]:
task_tagger = getattr(self, "_tagger_" + task_name)
if full and task_name == "coref":
return task_tagger.get_metrics(reset=reset, full=full)
else:
return task_tagger.get_metrics(reset)
@classmethod
def from_params(cls, vocab: Vocabulary, params: Params, regularizer: RegularizerApplicator) -> "HMTL":
return cls(vocab=vocab, params=params, regularizer=regularizer)
| 40.467662 | 120 | 0.688468 |
7957a395c374b787b43740a439b8bf496b29c0b4 | 3,224 | py | Python | flare/modules/crystals.py | YuuuuXie/Stanene_FLARE | b6678927dd7fe3b6e6dc405a5f27d1a3339782eb | [
"MIT"
] | null | null | null | flare/modules/crystals.py | YuuuuXie/Stanene_FLARE | b6678927dd7fe3b6e6dc405a5f27d1a3339782eb | [
"MIT"
] | null | null | null | flare/modules/crystals.py | YuuuuXie/Stanene_FLARE | b6678927dd7fe3b6e6dc405a5f27d1a3339782eb | [
"MIT"
] | null | null | null | import numpy as np
from ase.build import fcc111, add_adsorbate
from ase.visualize import view
from ase.io import write
def get_supercell_positions(sc_size, cell, positions):
sc_positions = []
for m in range(sc_size):
vec1 = m * cell[0]
for n in range(sc_size):
vec2 = n * cell[1]
for p in range(sc_size):
vec3 = p * cell[2]
# append translated positions
for pos in positions:
sc_positions.append(pos+vec1+vec2+vec3)
return sc_positions
# -----------------------------------------------------------------------------
# fcc helper functions
# -----------------------------------------------------------------------------
def fcc_positions(cube_lat):
positions = [np.array([0, 0, 0]),
np.array([cube_lat/2, cube_lat/2, 0]),
np.array([0, cube_lat/2, cube_lat/2]),
np.array([cube_lat/2, 0, cube_lat/2])]
return positions
# -----------------------------------------------------------------------------
# diamond helper functions
# -----------------------------------------------------------------------------
def cubic_diamond_positions(cube_lat):
positions = [np.array([0, 0, 0]),
np.array([cube_lat/2, cube_lat/2, 0]),
np.array([0, cube_lat/2, cube_lat/2]),
np.array([cube_lat/2, 0, cube_lat/2]),
np.array([cube_lat/4, cube_lat/4, cube_lat/4]),
np.array([3*cube_lat/4, 3*cube_lat/4, cube_lat/4]),
np.array([cube_lat/4, 3*cube_lat/4, 3*cube_lat/4]),
np.array([3*cube_lat/4, cube_lat/4, 3*cube_lat/4])]
return positions
def primitive_diamond_positions(prim_lat):
positions = [np.array([0, 0, 0]),
np.array([prim_lat/2, prim_lat/2, prim_lat/2])]
return positions
# -----------------------------------------------------------------------------
# slab helper functions
# -----------------------------------------------------------------------------
def get_fcc111_slab(layers, size, element, vacuum):
slab = fcc111(element, size=(size, size, layers), vacuum=vacuum)
return slab
def fcc111_and_adsorbate(layers, size, element, vacuum, height, position):
slab = fcc111(element, size=(size, size, layers))
add_adsorbate(slab, element, height, position)
slab.center(vacuum=vacuum, axis=2)
return slab
# -----------------------------------------------------------------------------
# water helper functions
# -----------------------------------------------------------------------------
def water_coordinates(ox_pos: np.ndarray,
theta: float, phi: float) -> list:
H_angle = 104.45 * (2*np.pi / 360)
OH_len = 95.84e-12
pass
if __name__ == '__main__':
layers = 2
size = 2
element = 'Pd'
vacuum = 10
height = 1
position = 'hcp'
slab_test = fcc111_and_adsorbate(layers, size, element, vacuum, height,
position)
print(slab_test.positions)
print(slab_test.cell)
| 33.237113 | 79 | 0.457816 |
7957a3d9a4ff5c7faecd851aeab7aa6b76f5aac5 | 3,783 | py | Python | start_here.py | giansegato/n-Tuple-patterns-to-forecast-prices | b83ca8b48ddd55f7d93b9daf8a91dad140a4f9a3 | [
"MIT"
] | null | null | null | start_here.py | giansegato/n-Tuple-patterns-to-forecast-prices | b83ca8b48ddd55f7d93b9daf8a91dad140a4f9a3 | [
"MIT"
] | null | null | null | start_here.py | giansegato/n-Tuple-patterns-to-forecast-prices | b83ca8b48ddd55f7d93b9daf8a91dad140a4f9a3 | [
"MIT"
] | null | null | null | from utils import *
import ml
from sklearn import tree, metrics
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import cross_val_score
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
def data_analysis():
dataframe = data_import()
results_up_down = count_up_down(dataframe)
results_only_up = count_only_up(dataframe)
results_dominant_event = count_dominant_event(dataframe)
for column in training_set.columns.values:
if not isinstance(column, tuple): continue
yn_save = raw_input("Wanna save the tables?\n")
for key, df in results_up_down.iteritems():
if (yn_save == "y"):
rename_columns(rename_indexes(df)).to_csv("{}-day-movements.csv".format(key), encoding='utf-8')
if not isinstance(key, (int, long)): continue
yn = raw_input("Want to proceed with the k = " + str(key) + " U/D plot? [y/n]\t")
if (yn == "y"):
plot(df, key, 'U/D movements in a ' + str(key) + '-day sequence')
for key, df in results_only_up.iteritems():
if (yn_save == "y"):
rename_columns(df).to_csv("{}-day-ups.csv".format(key), encoding='utf-8')
yn = raw_input("Want to proceed with the k = " + str(key) + " U plot? [y/n]\t")
if (yn == "y"):
plot(df, key, 'Count of U movements in a ' + str(key) + '-day sequence')
def experiment():
dataframe = data_import('sp.csv')
a = {}
a['LogisticRegression'] = LogisticRegression(verbose=False)
a['LogisticRegression - 1K'] = LogisticRegression(C=1000.0, verbose=False)
a['DecisionTree'] = tree.DecisionTreeClassifier()
a['NN (5, 2) - 1e-3'] = MLPClassifier(alpha=1e-5, hidden_layer_sizes=(5, 2), random_state=1, verbose=False)
a['NN (5, 2) - 1.0'] = MLPClassifier(alpha=1e-5, hidden_layer_sizes=(5, 2), random_state=1, verbose=False)
a['NN (25, 2) - 1e-3'] = MLPClassifier(alpha=1e-1, hidden_layer_sizes=(25, 2), verbose=False)
a['NN (25, 2) - 1.0'] = MLPClassifier(alpha=1.0, hidden_layer_sizes=(25, 2), verbose=False)
d = {}
for k in range(1, 8):
d['all_t_{}'.format(k)] = ml.feature_extraction(dataframe, past_days = k)
d['no_ups_{}'.format(k)] = ml.feature_extraction(dataframe, use_ups = False, past_days = k)
d['no_updowns_{}'.format(k)] = ml.feature_extraction(dataframe, use_updowns = False, past_days = k)
d['no_value_{}'.format(k)] = ml.feature_extraction(dataframe, use_value = False, past_days = k)
d['only_updowns_{}'.format(k)] = ml.feature_extraction(dataframe, use_ups = False, use_value = False, past_days = k)
d['only_ups_{}'.format(k)] = ml.feature_extraction(dataframe, use_value = False, use_updowns = False, past_days = k)
d['only_value'] = ml.feature_extraction(dataframe, use_ups = False, use_updowns = False)
results = ml.experiment_dataset(MLPClassifier(alpha=0.1, hidden_layer_sizes=(25, 2), verbose=False), d)
results = results.sort(['accuracy'])
results = ml.experiment_dataset(MLPClassifier(alpha=0.1, hidden_layer_sizes=(25, 2), verbose=False), d)
results = results.sort(['accuracy'])
X, y = ml.feature_extraction(dataframe, past_days = 3, use_value = False)
algorithms = {'Final': MLPClassifier(alpha=0.1, hidden_layer_sizes=(25, 2), verbose=False)}
results = ml.experiment_algorithms(X, y, algorithms)
print results
def start():
if (raw_input("Data analysis [1] or Machine Learning [2]?\t") == "1"):
data_analysis()
else:
experiment()
if __name__ == "__main__":
#setup()
start() | 45.035714 | 124 | 0.665609 |
7957a433a713130cfd85c95b69f5ab33049bdfed | 3,299 | py | Python | Northwind_populate_data.py | marvelje/northwind-SQLite3 | c15c9e7b016ea4ec24738fb784f75cff2e5a805d | [
"MIT"
] | 171 | 2017-01-26T09:32:24.000Z | 2022-03-30T21:27:01.000Z | Northwind_populate_data.py | marvelje/northwind-SQLite3 | c15c9e7b016ea4ec24738fb784f75cff2e5a805d | [
"MIT"
] | 4 | 2019-01-15T11:15:00.000Z | 2021-04-25T20:29:53.000Z | Northwind_populate_data.py | marvelje/northwind-SQLite3 | c15c9e7b016ea4ec24738fb784f75cff2e5a805d | [
"MIT"
] | 168 | 2017-03-02T17:55:08.000Z | 2022-03-25T05:14:23.000Z | from datetime import timedelta, datetime
from random import randint
from random import choice as rc
import sqlite3
# This function will return a random datetime between two datetime objects.
def random_date(start, end):
return start + timedelta(seconds=randint(0, int((end - start).total_seconds())))
# Connect to the DB
conn = sqlite3.connect('Northwind.sqlite')
c = conn.cursor()
# ShipName, ShipAddress, ShipCity, ShipRegion, ShipPostalCode
c.execute("select distinct ShipName, ShipAddress, ShipCity, ShipRegion, ShipPostalCode, ShipCountry from [Order]")
locations = [(row[0], row[1], row[2], row[3], row[4], row[5]) for row in c.fetchall()]
# Customer.Id
c.execute("select distinct id from [Employee]")
employees = [row[0] for row in c.fetchall()]
# Shipper.Id
c.execute("select distinct id from [Shipper]")
shippers = [row[0] for row in c.fetchall()]
# Customer.Id
c.execute("select distinct id from [Customer]")
customers = [row[0] for row in c.fetchall()]
# Create a bunch of new orders
for i in range(randint(15000,16000)):
sql = 'INSERT INTO [Order] (CustomerId, EmployeeId, OrderDate, RequiredDate, ShippedDate, ShipVia, Freight, ShipName, ShipAddress, ShipCity, ShipRegion, ShipPostalCode, ShipCountry) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)'
location = rc(locations)
order_date = random_date(datetime.strptime('2012-07-10', '%Y-%m-%d'), datetime.today())
required_date = random_date(order_date, order_date+timedelta(days=randint(14,60)))
shipped_date = random_date(order_date, order_date+timedelta(days=randint(1,30)))
params = (
rc(customers), # CustomerId
rc(employees), # EmployeeId
order_date, # OrderDate
required_date, # RequiredDate
shipped_date, # ShippedDate
rc(shippers), # ShipVia
0.00, # Freight
location[0], # ShipName
location[1], # ShipAddress
location[2], # ShipCity
location[3], # ShipRegion
location[4], # ShipPostalCode
location[5], # ShipCountry
)
c.execute(sql,params)
# Product.Id
c.execute("select distinct id, UnitPrice from [Product]")
products = [(row[0], row[1]) for row in c.fetchall()]
# Order.Id
c.execute("select distinct id from [Order] where Freight = 0.00")
orders = [row[0] for row in c.fetchall()]
# Fill the order with items
for order in orders:
used = []
for x in range(randint(1,len(products))):
sql = 'INSERT INTO [OrderDetail] (Id, OrderId, ProductId, UnitPrice, Quantity, Discount) VALUES (?, ?, ?, ?, ?, ?)'
control = 1
while control:
product = rc(products)
if product not in used:
used.append(product)
control = 0
params = (
"%s/%s" % (order, product[0]),
order, # OrderId
product[0], # ProductId
product[1], # UnitPrice
randint(1,50), # Quantity
0, # Discount
)
c.execute(sql,params)
# Cleanup
# c.execute('update [Order] set OrderDate = date(OrderDate), RequiredDate = date(RequiredDate), ShippedDate = date(ShippedDate)')
c.execute("select sum(Quantity)*0.25+10, OrderId from [OrderDetail] group by OrderId")
orders = [(row[0],row[1]) for row in c.fetchall()]
for order in orders:
c.execute("update [Order] set Freight=? where Id=?", (order[0], order[1]))
conn.commit()
conn.close() | 35.095745 | 231 | 0.665959 |
7957a54d308ec6144c3b622625fa3972bc338d60 | 806 | py | Python | zooniverse_web/tests/test_email.py | ADACS-Australia/SS18A-JBanfield | 26196091cc83f88f28abdf979276ee439349efa8 | [
"MIT"
] | null | null | null | zooniverse_web/tests/test_email.py | ADACS-Australia/SS18A-JBanfield | 26196091cc83f88f28abdf979276ee439349efa8 | [
"MIT"
] | 15 | 2018-06-05T10:28:03.000Z | 2018-07-03T09:05:11.000Z | zooniverse_web/tests/test_email.py | ADACS-Australia/SS18A-JBanfield | 26196091cc83f88f28abdf979276ee439349efa8 | [
"MIT"
] | null | null | null | """
Distributed under the MIT License. See LICENSE.txt for more info.
"""
from __future__ import unicode_literals
from django.test import TestCase
from django.core import mail
from zooniverse_web.mailer.email import Email
class TestEmail(TestCase):
def test_email(self): # tests whether an email can be sent
subject = 'Test Subject'
to_addresses = ['testto@localhost.com']
context = {
'message': 'message',
}
template = '<p>hi,</p><p>This is {{message}}</p>'
from_address = 'testfrom@localhost.com'
email = Email(subject, to_addresses, template, context, from_address=from_address)
email.send_email()
self.assertEqual([(x.to, x.body) for x in mail.outbox], [(['testto@localhost.com', ], 'hi,This is message')])
| 31 | 117 | 0.658809 |
7957a5eea858c60bc14fc8063a6b6241b9add1b8 | 5,441 | py | Python | main.py | eddielyc/Augmented-Geometric-Distillation | 029973b7ce3c08fa1f0fa4dab27981d2148986a3 | [
"Apache-2.0"
] | 3 | 2022-03-10T05:56:04.000Z | 2022-03-12T07:32:59.000Z | main.py | eddielyc/Augmented-Geometric-Distillation | 029973b7ce3c08fa1f0fa4dab27981d2148986a3 | [
"Apache-2.0"
] | 1 | 2022-03-10T06:00:19.000Z | 2022-03-24T06:52:23.000Z | main.py | eddielyc/Augmented-Geometric-Distillation | 029973b7ce3c08fa1f0fa4dab27981d2148986a3 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Time : 2020/5/3 19:22
# Author : Yichen Lu
import argparse
import os.path as osp
import torch
from reid.utils import Dataset
from reid.utils import build_test_loader, build_train_loader
from reid import trainers
from reid.evaluation.evaluators import Evaluator
from reid.utils import load_checkpoint, CheckpointManager
from reid.utils import WarmupLRScheduler
from reid.utils import before_run, build_optimizer
from reid.models import ResNet, Linear, Networks
def main(args):
before_run(args)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
dataset = Dataset(args.data_root, args.dataset)
# market = Dataset(args.data_root, 'market')
# msmt17 = Dataset(args.data_root, 'msmt17')
# duke = Dataset(args.data_root, 'duke')
# cuhk03 = Dataset(args.data_root, 'cuhk03')
# dataset = MixedTrainingSet(market, msmt17, duke, cuhk03)
training_loader = build_train_loader(dataset, args, metric=True, contrast=args.peers != 1)
query_loader, gallery_loader = build_test_loader(dataset, args)
backbone = ResNet(depth=args.depth, last_stride=args.last_stride, last_pooling=args.last_pooling,
embedding=args.embedding)
classifier = Linear(args.embedding, len(dataset.train_ids), device)
# Load from checkpoint
epoch = 1
if args.resume:
checkpoint = load_checkpoint(args.resume)
manager = CheckpointManager(backbone=backbone, classifier=classifier)
# manager = CheckpointManager(model=model)
epoch = manager.load(checkpoint)
# manager.load(checkpoint)
print("=> Start epoch {} ".format(epoch))
backbone = backbone.to(device)
classifier = classifier.to(device)
networks = Networks(backbone, classifier)
# Evaluator
evaluator = Evaluator(backbone)
# Checkpoint Manager
manager = CheckpointManager(logs_dir=args.logs_dir, backbone=backbone, classifier=classifier)
# Optimizer
optimizer_main = build_optimizer(backbone, classifier, args)
# Lr Scheduler
lr_scheduler = WarmupLRScheduler(optimizer_main, warmup_epochs=args.warmup,
base_lr=args.learning_rate, milestones=args.epochs_decay,
start_epoch=epoch)
# Trainer
trainer = trainers.SupervisedTrainer(networks=networks,
optimizer=optimizer_main,
lr_scheduler=lr_scheduler,
)
# ------------------- Training -------------------
for epoch in range(epoch, args.epochs + 1):
trainer.train(epoch, training_loader)
if args.evaluate and epoch % args.evaluate == 0:
evaluator.evaluate(query_loader, gallery_loader, dataset.query, dataset.gallery, re_ranking=False,
output_feature="embedding", print_freq=1000)
manager.save(epoch=epoch, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))
if epoch % args.save_freq == 0:
manager.save(epoch=epoch)
print(f"\n * Finished epoch {epoch} learning rate {lr_scheduler.get_lr()} \n")
# ------------------- Training -------------------
if __name__ == '__main__':
working_dir = osp.dirname(osp.abspath(__file__))
parser = argparse.ArgumentParser(description="Incremental learning for person Re-ID")
# basic configs
parser.add_argument("-g", "--gpu", nargs='*', type=str, default=['0'])
parser.add_argument("-s", "--seed", type=int, default=None)
parser.add_argument('--data-root', type=str, metavar='PATH', default=osp.join(working_dir, 'data'))
parser.add_argument('--dataset', type=str, default="msmt17", choices=['market', 'duke', 'msmt17', 'cuhk03'])
parser.add_argument('-b', '--batch-size', type=int, default=128)
parser.add_argument('--resume', type=str, default='', metavar='PATH')
parser.add_argument('--epochs', type=int, default=90)
parser.add_argument("--epochs-decay", nargs='*', type=int, default=[61, ])
parser.add_argument('--logs-dir', type=str, metavar='PATH', default=osp.join(working_dir, 'logs'))
parser.add_argument("--save-freq", type=int, default=1000)
parser.add_argument("--optimizer", type=str, choices=['SGD', 'Adam'], default="SGD")
parser.add_argument("--warmup", type=int, default=10)
parser.add_argument('--learning-rate', type=float, default=0.01)
parser.add_argument("--evaluate", type=int, default=10)
# data configs
parser.add_argument('--height', type=int, default=256)
parser.add_argument('--width', type=int, default=128)
parser.add_argument('--re', type=float, default=0.5)
parser.add_argument("--re-area", type=float, default=0.4)
parser.add_argument("--peers", type=int, default=1)
parser.add_argument("--preload", action="store_true", default=True)
# model configs
parser.add_argument("--last-pooling", type=str, default="avg", choices=["avg", "max"])
parser.add_argument("--last-stride", type=int, default=2, choices=[1, 2])
parser.add_argument("--depth", type=int, default=50, choices=[34, 50])
parser.add_argument("--embedding", type=int, default=2048)
args = parser.parse_args()
# args.seed = 6677
main(args)
| 42.178295 | 113 | 0.642713 |
7957a5f11e6e5af32fe980b9ca85a5458ed89b13 | 1,105 | py | Python | aiotdlib/api/functions/toggle_session_can_accept_calls.py | mostafa-arshadi/aiotdlib | 59f430a65dfb424fc69d471a0d7bcd77ad7acf08 | [
"MIT"
] | 37 | 2021-05-04T10:41:41.000Z | 2022-03-30T13:48:05.000Z | aiotdlib/api/functions/toggle_session_can_accept_calls.py | mostafa-arshadi/aiotdlib | 59f430a65dfb424fc69d471a0d7bcd77ad7acf08 | [
"MIT"
] | 13 | 2021-07-17T19:54:51.000Z | 2022-02-26T06:50:00.000Z | aiotdlib/api/functions/toggle_session_can_accept_calls.py | mostafa-arshadi/aiotdlib | 59f430a65dfb424fc69d471a0d7bcd77ad7acf08 | [
"MIT"
] | 7 | 2021-09-22T21:27:11.000Z | 2022-02-20T02:33:19.000Z | # =============================================================================== #
# #
# This file has been generated automatically!! Do not change this manually! #
# #
# =============================================================================== #
from __future__ import annotations
from pydantic import Field
from ..base_object import BaseObject
class ToggleSessionCanAcceptCalls(BaseObject):
"""
Toggles whether a session can accept incoming calls
:param session_id: Session identifier
:type session_id: :class:`int`
:param can_accept_calls: True, if incoming calls can be accepted by the session
:type can_accept_calls: :class:`bool`
"""
ID: str = Field("toggleSessionCanAcceptCalls", alias="@type")
session_id: int
can_accept_calls: bool
@staticmethod
def read(q: dict) -> ToggleSessionCanAcceptCalls:
return ToggleSessionCanAcceptCalls.construct(**q)
| 34.53125 | 83 | 0.499548 |
7957a71d6d2755f3b8fcaf783d2178a12401cb0f | 13,067 | py | Python | debarcer/generate_vcf.py | FelixMoelder/debarcer | a401c1b0ad3f23b1f2c1c1cc03170c635186f78f | [
"MIT"
] | 11 | 2017-03-20T17:57:09.000Z | 2021-04-20T03:11:55.000Z | debarcer/generate_vcf.py | FelixMoelder/debarcer | a401c1b0ad3f23b1f2c1c1cc03170c635186f78f | [
"MIT"
] | 91 | 2016-06-28T20:59:30.000Z | 2021-04-20T21:41:42.000Z | debarcer/generate_vcf.py | FelixMoelder/debarcer | a401c1b0ad3f23b1f2c1c1cc03170c635186f78f | [
"MIT"
] | 13 | 2016-05-31T20:05:33.000Z | 2022-03-10T13:10:17.000Z | # -*- coding: utf-8 -*-
"""
Created on Fri Oct 25 14:25:48 2019
@author: RJovelin
"""
import time
from debarcer.version import __version__
def GetConsData(consfile):
'''
(str) -> dict
:param consfile: Path to the consensus file (merged or not)
Returns a dictionary with consensus file info organized by chromo, and family
size for each position
'''
# create a dict with consensus info per contig, region and umi family size
# {contig :{fam: {pos: info}}}
data = {}
infile = open(consfile)
header = infile.readline().rstrip().split('\t')
for line in infile:
line = line.rstrip()
if line != '':
line = line.split('\t')
contig = line[header.index('CHROM')]
if contig not in data:
data[contig] = {}
# get the position
pos = int(line[header.index('POS')])
# get umi fam size
famsize = int(line[header.index('FAM')])
if famsize not in data[contig]:
data[contig][famsize] = {}
# collect info
assert pos not in data[contig][famsize]
data[contig][famsize][pos] = line
infile.close()
return data
def WriteVCF(consfile, outputfile, reference, ref_threshold, alt_threshold, filter_threshold, famsize):
'''
(str, str, str, float, float, int, int) -> None
:param consfile: Path to the consensus file (merged or not)
:param outputfile: Path to the output VCF file
:param reference" Path to the reference genome
:param ref_threshold: Maximum reference frequency (in %) to consider alternative variants
(ie. position with ref freq <= ref_threshold is considered variable)
:param alt_threshold: Minimum allele frequency (in %) to consider an alternative allele at a variable position
(ie. allele freq >= alt_threshold and ref freq <= ref_threshold --> record alternative allele)
:param filter_threshold: minimum number of reads to pass alternative variants
(ie. filter = PASS if variant depth >= alt_threshold)
:param famsize: Minimum umi family size
Write a VCF from the consensus file for a given umi family size.
Allow multiple records per position for SNVs and indels
'''
# parse consensus file -> consensus info for all recorded umi fam size
consdata = GetConsData(consfile)
# get the header of the consfile
infile = open(consfile)
header = infile.readline().rstrip().split('\t')
infile.close()
# get debarcer version
version = __version__
# create a list with VCF records for SNVs and indels, sorted by chromosome and position
Records = []
# loop over sorted contigs and sorted positions in cons data for given famsize
# make a sorted list of contigs
Chromosomes = [i.replace('chr', '') for i in consdata.keys()]
# make a list of non numerical contigs
others = []
for i in range(len(Chromosomes)):
if Chromosomes[i].isnumeric() == False:
others.append(Chromosomes[i])
# sort other
others.sort()
# make list of numerical chromos
if len(others) != 0:
for i in others:
Chromosomes.remove(i)
Chromosomes = sorted(list(map(lambda x: int(x), Chromosomes)))
Chromosomes = list(map(lambda x: 'chr' + str(x), Chromosomes))
others = list(map(lambda x: 'chr' + str(x), others))
# add back non-numerical contigs
Chromosomes.extend(others)
# make a sorted list of positions
positions = []
for i in consdata:
for j in consdata[i]:
positions.extend(list(consdata[i][j].keys()))
positions = sorted(list(map(lambda x: int(x), list(set(positions)))))
for contig in Chromosomes:
# check membership of famsize
if famsize in consdata[contig]:
for pos in positions:
# check pos membership for merged consensus files
if pos in consdata[contig][famsize]:
L = consdata[contig][famsize][pos]
# get reference frequency
ref_freq = float(L[header.index('REF_FREQ')])
# create VCF record if ref freq low enough to consider variant at position
if ref_freq <= ref_threshold:
# get consensus and raw depth
consdepth = int(L[header.index('CONSDP')])
rawdepth = int(L[header.index('RAWDP')])
# get minimum and mean family size
minfam = int(L[header.index('FAM')])
meanfam = float(L[header.index('MEAN_FAM')])
# set up info
info = 'RDP={0};CDP={1};MIF={2};MNF={3};AD={4};AL={5};AF={6}'
# get the reference allele
ref = L[header.index('REF')]
# get the list of single nucleotides
alleles = [header[header.index(i)] for i in 'ACGTN']
# make lists of deletions and counts
deletions = L[header.index('D_(ref,del)')].split(';')
delcounts = L[header.index('D_counts')].split(';')
# make lists of insertions and counts
insertions = L[header.index('I_(ref,ins)')].split(';')
inscounts = L[header.index('I_counts')].split(';')
# get the read depth for each allele and indels
depth = {i:int(L[header.index(i)]) for i in alleles}
if deletions != ['']:
for i in range(len(deletions)):
depth[deletions[i]] = int(delcounts[i])
if insertions != ['']:
for i in range(len(insertions)):
depth[insertions[i]] = int(inscounts[i])
# compute frequencies for each allele and indel
if sum(depth.values()) != 0:
freq = {i: (depth[i]/sum(depth.values())) * 100 for i in depth}
# record snvs and indels on different lines
# make a list of alternative alleles with frequency >= alt_threshold
alt_alleles = [i for i in freq if i in alleles and i != ref and freq[i] >= alt_threshold]
# make a list of read depth for alternative alleles passing alt_threshold
alt_depth = [str(depth[i]) for i in alt_alleles]
# make a list of frequencies for alternative alelles passing alt_threshold
alt_freq = [str(round(freq[i], 4)) for i in alt_alleles]
# record info
alt_info = info.format(rawdepth, consdepth, minfam, round(meanfam, 2), depth[ref], ','.join(alt_depth), ','.join(alt_freq))
# make list of deletions with frequency >= alt_threshold
del_alleles = [i for i in freq if i in deletions and freq[i] >= alt_threshold]
# make a list of read depth for deletions passing alt_threshold
del_depth = [str(depth[i]) for i in del_alleles]
# make a list of frequencies for deletions passing alt_threshold
del_freq = [str(round(freq[i], 4)) for i in del_alleles]
# make list of insertions with frequency >= alt_threshold
ins_alleles = [i for i in freq if i in insertions and freq[i] >= alt_threshold]
# make a list of read depth for insertions passing alt_threshold
ins_depth = [str(depth[i]) for i in ins_alleles]
# make a list of frequencies for insertions passing alt_threshold
ins_freq = [str(round(freq[i], 4)) for i in ins_alleles]
# check that alernative alleles are recorded
if len(alt_alleles) != 0:
# get the filter value based on min_read_depth
if True in [depth[i] >= filter_threshold for i in alt_alleles]:
filt = 'PASS'
else:
filt = 'a{0}'.format(filter_threshold)
Records.append('\t'.join([contig, str(pos), '.', ref, ','.join(alt_alleles), '0', filt, alt_info]) + '\n')
# check that deletions are recorded
if len(del_alleles) != 0:
# record deletions seperately on distinct lines
for i in range(len(del_alleles)):
# get the filter value based on min_read_depth
if depth[del_alleles[i]] >= filter_threshold == True:
filt == 'PASS'
else:
filt = 'a{0}'.format(filter_threshold)
# record info
del_info = info.format(rawdepth, consdepth, minfam, round(meanfam, 2), depth[ref], del_depth[i], del_freq[i])
# extract ref allele and alt allele
k = list(map(lambda x: x.strip(), del_alleles[i].replace("'", '').replace('(', '').replace(')', '').split(',')))
Records.append('\t'.join([contig, str(pos), '.', k[0], k[1], '0', filt, del_info]) + '\n')
# check that insertions are recorded
if len(ins_alleles) != 0:
# record insertions seperately on distinct lines
for i in range(len(ins_alleles)):
# get the filter value based on min_read_depth
if depth[ins_alleles[i]] >= filter_threshold == True:
filt == 'PASS'
else:
filt = 'a{0}'.format(filter_threshold)
# record info
ins_info = info.format(rawdepth, consdepth, minfam, round(meanfam, 2), depth[ref], ins_depth[i], ins_freq[i])
# extract ref allele and alt allele
k = list(map(lambda x: x.strip(), ins_alleles[i].replace("'", '').replace('(', '').replace(')', '').split(',')))
Records.append('\t'.join([contig, str(pos), '.', k[0], k[1], '0', filt, ins_info]) + '\n')
# write VCF only if positions are recorded
if len(Records) != 0:
# open file for writing
newfile = open(outputfile, 'w')
# write VCF header
newfile.write('##fileformat=VCFv4.1\n')
newfile.write('##fileDate={0}\n'.format(time.strftime('%Y%m%d', time.localtime())))
newfile.write('##reference={0}\n'.format(reference))
newfile.write('##source=Debarcer v. {0}\n'.format(version))
# write info/filter/format metadata
newfile.write('##INFO=<ID=RDP,Number=1,Type=Integer,Description=\"Raw Depth\">\n')
newfile.write('##INFO=<ID=CDP,Number=1,Type=Integer,Description=\"Consensus Depth\">\n')
newfile.write('##INFO=<ID=MIF,Number=1,Type=Integer,Description=\"Minimum Family Size\">\n')
newfile.write('##INFO=<ID=MNF,Number=1,Type=Float,Description=\"Mean Family Size\">\n')
newfile.write('##INFO=<ID=AD,Number=1,Type=Integer,Description=\"Reference allele Depth\">\n')
newfile.write('##INFO=<ID=AL,Number=A,Type=Integer,Description=\"Alternate Allele Depth\">\n')
newfile.write('##INFO=<ID=AF,Number=A,Type=Float,Description=\"Alternate Allele Frequency\">\n')
newfile.write('##FILTER=<ID=a{0},Description=\"Alternate allele depth below {0}\">\n'.format(filter_threshold))
# write data header
newfile.write('\t'.join(['#CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO']) + '\n')
# write data
newfile.write('\n'.join(Records))
newfile.close()
| 52.477912 | 151 | 0.506161 |
7957a83621e1bac1e2447ff0f5b26575783757aa | 9,349 | py | Python | ftests/parallel/test_builtin.py | j8xixo12/solvcon | a8bf3a54d4b1ed91d292e0cdbcb6f2710d33d99a | [
"BSD-3-Clause"
] | 16 | 2015-12-09T02:54:42.000Z | 2021-04-20T11:26:39.000Z | ftests/parallel/test_builtin.py | j8xixo12/solvcon | a8bf3a54d4b1ed91d292e0cdbcb6f2710d33d99a | [
"BSD-3-Clause"
] | 95 | 2015-12-09T00:49:40.000Z | 2022-02-14T13:34:55.000Z | ftests/parallel/test_builtin.py | j8xixo12/solvcon | a8bf3a54d4b1ed91d292e0cdbcb6f2710d33d99a | [
"BSD-3-Clause"
] | 13 | 2015-05-08T04:16:42.000Z | 2021-01-15T09:28:06.000Z | # -*- coding: UTF-8 -*-
#
# Copyright (C) 2012 Yung-Yu Chen <yyc@solvcon.net>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os
from unittest import TestCase
from solvcon.anchor import VtkAnchor
from solvcon.solver import BlockSolver
from solvcon.hook import BlockHook
class CaseCollect(BlockHook):
def postmarch(self):
self._collect_interior('soln', tovar=True)
self._collect_interior('dsoln', tovar=True)
def preloop(self):
self.postmarch()
class TestingSolver(BlockSolver):
MESG_FILENAME_DEFAULT = os.devnull
_interface_init_ = ['cecnd', 'cevol']
def __init__(self, blk, *args, **kw):
"""
@keyword neq: number of equations (variables).
@type neq: int
"""
from numpy import empty
super(TestingSolver, self).__init__(blk, *args, **kw)
# data structure for C/FORTRAN.
self.blk = blk
# arrays.
ndim = self.ndim
ncell = self.ncell
ngstcell = self.ngstcell
## solutions.
neq = self.neq
self.sol = empty((ngstcell+ncell, neq), dtype=self.fpdtype)
self.soln = empty((ngstcell+ncell, neq), dtype=self.fpdtype)
self.dsol = empty((ngstcell+ncell, neq, ndim), dtype=self.fpdtype)
self.dsoln = empty((ngstcell+ncell, neq, ndim), dtype=self.fpdtype)
## metrics.
self.cecnd = empty(
(ngstcell+ncell, self.CLMFC+1, ndim), dtype=self.fpdtype)
self.cevol = empty((ngstcell+ncell, self.CLMFC+1), dtype=self.fpdtype)
def create_alg(self):
from solvcon.parcel.fake._algorithm import FakeAlgorithm
alg = FakeAlgorithm()
alg.setup_mesh(self.blk)
alg.setup_algorithm(self)
return alg
##################################################
# marching algorithm.
##################################################
MMNAMES = list()
MMNAMES.append('update')
def update(self, worker=None):
self.sol[:,:] = self.soln[:,:]
self.dsol[:,:,:] = self.dsoln[:,:,:]
MMNAMES.append('calcsoln')
def calcsoln(self, worker=None):
self.create_alg().calc_soln()
MMNAMES.append('ibcsoln')
def ibcsoln(self, worker=None):
if worker: self.exchangeibc('soln', worker=worker)
MMNAMES.append('calccfl')
def calccfl(self, worker=None):
self.marchret = -2.0
MMNAMES.append('calcdsoln')
def calcdsoln(self, worker=None):
self.create_alg().calc_dsoln()
MMNAMES.append('ibcdsoln')
def ibcdsoln(self, worker=None):
if worker: self.exchangeibc('dsoln', worker=worker)
class TestBlockCaseRun(TestCase):
time = 0.0
time_increment = 1.0
nsteps = 10
def _get_case(self, **kw):
import os
from solvcon.conf import env
from solvcon.case import BlockCase
from solvcon.anchor import FillAnchor
from solvcon.helper import Information
meshfn = kw.get('meshfn', 'sample.neu')
kw['meshfn'] = os.path.join(env.datadir, meshfn)
case = BlockCase(basedir='.', basefn='blockcase', bcmap=None,
solvertype=TestingSolver, neq=1,
steps_run=self.nsteps, time_increment=self.time_increment,
**kw
)
case.info = Information()
case.runhooks.append(FillAnchor,
keys=('soln', 'dsoln'), value=0.0,
)
case.runhooks.append(CaseCollect)
case.init()
return case
class TestSequential(TestBlockCaseRun):
def test_soln(self):
from numpy import zeros
from solvcon.domain import Domain
case = self._get_case(domaintype=Domain)
svr = case.solver.solverobj
case.run()
ngstcell = svr.ngstcell
# get result.
soln = svr.soln[ngstcell:,0]
# calculate reference
clvol = zeros(soln.shape, dtype=soln.dtype)
for iistep in range(self.nsteps*2):
clvol += svr.clvol[ngstcell:]*self.time_increment/2
# compare.
self.assertTrue((soln==clvol).all())
def test_dsoln(self):
from numpy import zeros
from solvcon.domain import Domain
case = self._get_case(domaintype=Domain)
svr = case.solver.solverobj
case.run()
ngstcell = svr.ngstcell
# get result.
dsoln = svr.dsoln[ngstcell:,0,:]
# calculate reference
clcnd = zeros(dsoln.shape, dtype=dsoln.dtype)
for iistep in range(self.nsteps*2):
clcnd += svr.clcnd[ngstcell:]*self.time_increment/2
# compare.
self.assertTrue((dsoln==clcnd).all())
class TestLocalParallel(TestBlockCaseRun):
npart = 3
def test_soln(self):
import sys
from nose.plugins.skip import SkipTest
if sys.platform.startswith('win'): raise SkipTest
from numpy import zeros
from solvcon.domain import Collective
case = self._get_case(npart=self.npart, domaintype=Collective)
case.run()
# get result.
soln = case.execution.var['soln'][:,0]
# calculate reference
blk = case.solver.domainobj.blk
clvol = zeros(soln.shape, dtype=soln.dtype)
for iistep in range(self.nsteps*2):
clvol += blk.clvol*self.time_increment/2
# compare.
self.assertTrue((soln==clvol).all())
def test_dsoln(self):
import sys
from nose.plugins.skip import SkipTest
if sys.platform.startswith('win'): raise SkipTest
from numpy import zeros
from solvcon.domain import Collective
case = self._get_case(npart=self.npart, domaintype=Collective)
case.run()
# get result.
dsoln = case.execution.var['dsoln'][:,0,:]
# calculate reference
blk = case.solver.domainobj.blk
clcnd = zeros(dsoln.shape, dtype=dsoln.dtype)
for iistep in range(self.nsteps*2):
clcnd += blk.clcnd*self.time_increment/2
# compare.
self.assertTrue((dsoln==clcnd).all())
def test_ibcthread(self):
import sys
from nose.plugins.skip import SkipTest
if sys.platform.startswith('win'): raise SkipTest
from numpy import zeros
from solvcon.domain import Collective
case = self._get_case(npart=self.npart, domaintype=Collective,
ibcthread=True)
case.run()
# get result.
soln = case.execution.var['soln'][:,0]
# calculate reference
blk = case.solver.domainobj.blk
clvol = zeros(soln.shape, dtype=soln.dtype)
for iistep in range(self.nsteps*2):
clvol += blk.clvol*self.time_increment/2
# compare.
self.assertTrue((soln==clvol).all())
class TestPresplitLocalParallel(TestBlockCaseRun):
npart = 3
def test_soln(self):
import sys
from nose.plugins.skip import SkipTest
if sys.platform.startswith('win'): raise SkipTest
from numpy import zeros
from solvcon.domain import Collective
case = self._get_case(npart=self.npart, domaintype=Collective,
meshfn='sample.dom')
case.run()
# get result.
soln = case.execution.var['soln'][:,0]
# calculate reference
blk = case.solver.domainobj.blk
clvol = zeros(soln.shape, dtype=soln.dtype)
for iistep in range(self.nsteps*2):
clvol += blk.clvol*self.time_increment/2
# compare.
self.assertTrue((soln==clvol).all())
class TestPresplitLocalParallelNoArrs(TestBlockCaseRun):
npart = 3
def _get_case_nocollect(self, **kw):
import os
from solvcon.conf import env
from solvcon.case import BlockCase
from solvcon.anchor import FillAnchor
from solvcon.helper import Information
meshfn = kw.get('meshfn', 'sample.neu')
kw['meshfn'] = os.path.join(env.datadir, meshfn)
case = BlockCase(basedir='.', basefn='blockcase', bcmap=None,
solvertype=TestingSolver, neq=1,
steps_run=self.nsteps, time_increment=self.time_increment,
**kw
)
case.info = Information()
case.runhooks.append(FillAnchor,
keys=('soln', 'dsoln'), value=0.0,
)
case.init()
return case
def test_run(self):
import sys
from nose.plugins.skip import SkipTest
if sys.platform.startswith('win'): raise SkipTest
from numpy import zeros
from solvcon.domain import Collective
case = self._get_case_nocollect(npart=self.npart,
domaintype=Collective, meshfn='sample.dom',
with_arrs=False, with_whole=False)
case.run()
| 35.014981 | 78 | 0.620601 |
7957a86d70a141a3e2fbf538bb59d8b3fed332da | 177 | py | Python | quadpy/enr/__init__.py | gdmcbain/quadpy | c083d500027d7c1b2187ae06ff2b7fbdd360ccc7 | [
"MIT"
] | 1 | 2019-01-02T19:04:42.000Z | 2019-01-02T19:04:42.000Z | quadpy/enr/__init__.py | gdmcbain/quadpy | c083d500027d7c1b2187ae06ff2b7fbdd360ccc7 | [
"MIT"
] | null | null | null | quadpy/enr/__init__.py | gdmcbain/quadpy | c083d500027d7c1b2187ae06ff2b7fbdd360ccc7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
from .stroud import Stroud
from .stroud_secrest import StroudSecrest
from .tools import integrate
__all__ = ["Stroud", "StroudSecrest", "integrate"]
| 19.666667 | 50 | 0.723164 |
7957aa82bb9656d3954e7422903e754c9fab5b65 | 14,830 | py | Python | tmtoolkit/lda_utils/visualize.py | ddomhoff/tmtoolkit | 2e533d04af8fd3cbdd57af1a277f67148087b369 | [
"Apache-2.0"
] | null | null | null | tmtoolkit/lda_utils/visualize.py | ddomhoff/tmtoolkit | 2e533d04af8fd3cbdd57af1a277f67148087b369 | [
"Apache-2.0"
] | null | null | null | tmtoolkit/lda_utils/visualize.py | ddomhoff/tmtoolkit | 2e533d04af8fd3cbdd57af1a277f67148087b369 | [
"Apache-2.0"
] | null | null | null | import os
import logging
import numpy as np
import matplotlib.pyplot as plt
from tmtoolkit.utils import mat2d_window_from_indices
from tmtoolkit.lda_utils.common import top_n_from_distribution
logger = logging.getLogger('tmtoolkit')
#
# word clouds from topic models
#
def _wordcloud_color_func_black(word, font_size, position, orientation, random_state=None, **kwargs):
return 'rgb(0,0,0)'
DEFAULT_WORDCLOUD_KWARGS = { # default wordcloud settings for transparent background and black font
'width': 800,
'height': 600,
'mode': 'RGBA',
'background_color': None,
'color_func': _wordcloud_color_func_black
}
def write_wordclouds_to_folder(wordclouds, folder, file_name_fmt='{label}.png', **save_kwargs):
if not os.path.exists(folder):
raise ValueError('target folder `%s` does not exist' % folder)
for label, wc in wordclouds.items():
file_name = file_name_fmt.format(label=label)
file_path = os.path.join(folder, file_name)
logger.info('writing wordcloud to file `%s`' % file_path)
wc.save(file_path, **save_kwargs)
def generate_wordclouds_for_topic_words(phi, vocab, top_n, topic_labels='topic_{i1}', which_topics=None,
return_images=True, **wordcloud_kwargs):
return generate_wordclouds_from_distribution(phi, row_labels=topic_labels, val_labels=vocab, top_n=top_n,
which_rows=which_topics, return_images=return_images,
**wordcloud_kwargs)
def generate_wordclouds_for_document_topics(theta, doc_labels, top_n, topic_labels='topic_{i1}', which_documents=None,
return_images=True, **wordcloud_kwargs):
return generate_wordclouds_from_distribution(theta, row_labels=doc_labels, val_labels=topic_labels, top_n=top_n,
which_rows=which_documents, return_images=return_images,
**wordcloud_kwargs)
def generate_wordclouds_from_distribution(distrib, row_labels, val_labels, top_n, which_rows=None, return_images=True,
**wordcloud_kwargs):
prob = top_n_from_distribution(distrib, top_n=top_n, row_labels=row_labels, val_labels=None)
words = top_n_from_distribution(distrib, top_n=top_n, row_labels=row_labels, val_labels=val_labels)
if which_rows:
prob = prob.loc[which_rows, :]
words = words.loc[which_rows, :]
assert prob.shape == words.shape
wordclouds = {}
for (p_row_name, p), (w_row_name, w) in zip(prob.iterrows(), words.iterrows()):
assert p_row_name == w_row_name
logger.info('generating wordcloud for `%s`' % p_row_name)
wc = generate_wordcloud_from_probabilities_and_words(p, w,
return_image=return_images,
**wordcloud_kwargs)
wordclouds[p_row_name] = wc
return wordclouds
def generate_wordcloud_from_probabilities_and_words(prob, words, return_image=True, wordcloud_instance=None,
**wordcloud_kwargs):
if len(prob) != len(words):
raise ValueError('`distrib` and `labels` must have the name length')
if hasattr(prob, 'ndim') and prob.ndim != 1:
raise ValueError('`distrib` must be a 1D array or sequence')
if hasattr(words, 'ndim') and words.ndim != 1:
raise ValueError('`labels` must be a 1D array or sequence')
weights = dict(zip(words, prob))
return generate_wordcloud_from_weights(weights, return_image=return_image,
wordcloud_instance=wordcloud_instance, **wordcloud_kwargs)
def generate_wordcloud_from_weights(weights, return_image=True, wordcloud_instance=None, **wordcloud_kwargs):
if not isinstance(weights, dict) or not weights:
raise ValueError('`weights` must be a non-empty dictionary')
if not wordcloud_instance:
from wordcloud import WordCloud
use_wc_kwargs = DEFAULT_WORDCLOUD_KWARGS.copy()
use_wc_kwargs.update(wordcloud_kwargs)
wordcloud_instance = WordCloud(**use_wc_kwargs)
wordcloud_instance.generate_from_frequencies(weights)
if return_image:
return wordcloud_instance.to_image()
else:
return wordcloud_instance
#
# plot heatmaps (especially for doc-topic distribution)
#
def plot_doc_topic_heatmap(fig, ax, doc_topic_distrib, doc_labels, topic_labels=None,
which_documents=None, which_document_indices=None,
which_topics=None, which_topic_indices=None,
xaxislabel=None, yaxislabel=None,
**kwargs):
"""
Plot a heatmap for a document-topic distribution `doc_topic_distrib` to a matplotlib Figure `fig` and Axes `ax`
using `doc_labels` as document labels on the y-axis and topics from 1 to `n_topics=doc_topic_distrib.shape[1]` on
the x-axis.
Custom topic labels can be passed as `topic_labels`.
A subset of documents can be specified either with a sequence `which_documents` containing a subset of document
labels from `doc_labels` or `which_document_indices` containing a sequence of document indices.
A subset of topics can be specified either with a sequence `which_topics` containing sequence of numbers between
[1, n_topics] or `which_topic_indices` which is a number between [0, n_topics-1]
Additional arguments can be passed via `kwargs` to `plot_heatmap`.
Please note that it is almost always necessary to select a subset of your document-topic distribution with the
`which_documents` or `which_topics` parameters, as otherwise the amount of data to be plotted will be too high
to give a reasonable picture.
"""
if which_documents is not None and which_document_indices is not None:
raise ValueError('only `which_documents` or `which_document_indices` can be set, not both')
if which_topics is not None and which_topic_indices is not None:
raise ValueError('only `which_topics` or `which_topic_indices` can be set, not both')
if which_documents is not None:
which_document_indices = np.where(np.isin(doc_labels, which_documents))[0]
if which_topics is not None:
which_topic_indices = np.array(which_topics) - 1
select_distrib_subset = False
if topic_labels is None:
topic_labels = np.array(range(1, doc_topic_distrib.shape[1]+1))
elif not isinstance(topic_labels, np.ndarray):
topic_labels = np.array(topic_labels)
if which_document_indices is not None:
select_distrib_subset = True
doc_labels = np.array(doc_labels)[which_document_indices]
if which_topic_indices is not None:
select_distrib_subset = True
topic_labels = topic_labels[which_topic_indices]
if select_distrib_subset:
doc_topic_distrib = mat2d_window_from_indices(doc_topic_distrib, which_document_indices, which_topic_indices)
return plot_heatmap(fig, ax, doc_topic_distrib,
xaxislabel=xaxislabel or 'topic',
yaxislabel=yaxislabel or 'document',
xticklabels=topic_labels,
yticklabels=doc_labels,
**kwargs)
def plot_topic_word_heatmap(fig, ax, topic_word_distrib, vocab,
which_topics=None, which_topic_indices=None,
which_words=None, which_word_indices=None,
xaxislabel=None, yaxislabel=None,
**kwargs):
"""
Plot a heatmap for a topic-word distribution `topic_word_distrib` to a matplotlib Figure `fig` and Axes `ax`
using `vocab` as vocabulary on the x-axis and topics from 1 to `n_topics=doc_topic_distrib.shape[1]` on
the y-axis.
A subset of words from `vocab` can be specified either directly with a sequence `which_words` or
`which_document_indices` containing a sequence of word indices in `vocab`.
A subset of topics can be specified either with a sequence `which_topics` containing sequence of numbers between
[1, n_topics] or `which_topic_indices` which is a number between [0, n_topics-1]
Additional arguments can be passed via `kwargs` to `plot_heatmap`.
Please note that it is almost always necessary to select a subset of your topic-word distribution with the
`which_words` or `which_topics` parameters, as otherwise the amount of data to be plotted will be too high
to give a reasonable picture.
"""
if which_topics is not None and which_topic_indices is not None:
raise ValueError('only `which_topics` or `which_topic_indices` can be set, not both')
if which_words is not None and which_word_indices is not None:
raise ValueError('only `which_words` or `which_word_indices` can be set, not both')
if which_topics is not None:
which_topic_indices = np.array(which_topics) - 1
if which_words is not None:
which_word_indices = np.where(np.isin(vocab, which_words))[0]
select_distrib_subset = False
topic_labels = np.array(range(1, topic_word_distrib.shape[0]+1))
if which_topic_indices is not None:
select_distrib_subset = True
topic_labels = topic_labels[which_topic_indices]
if which_word_indices is not None:
select_distrib_subset = True
vocab = np.array(vocab)[which_word_indices]
if select_distrib_subset:
topic_word_distrib = mat2d_window_from_indices(topic_word_distrib, which_topic_indices, which_word_indices)
return plot_heatmap(fig, ax, topic_word_distrib,
xaxislabel=xaxislabel or 'vocab',
yaxislabel=yaxislabel or 'topic',
xticklabels=vocab,
yticklabels=topic_labels,
**kwargs)
def plot_heatmap(fig, ax, data,
xaxislabel=None, yaxislabel=None,
xticklabels=None, yticklabels=None,
title=None, grid=True,
values_in_cells=True, round_values_in_cells=2,
legend=False,
fontsize_axislabel=None,
fontsize_axisticks=None,
fontsize_cell_values=None):
""""
helper function to plot a heatmap for a 2D matrix `data` using matplotlib's "matshow" function
"""
if not isinstance(data, np.ndarray):
data = np.array(data)
if data.ndim != 2:
raise ValueError('`data` must be a 2D matrix/array')
# draw basic heatmap
cax = ax.matshow(data)
# draw legend
if legend:
fig.colorbar(cax)
# set title
if title:
ax.set_title(title, y=1.25)
n_rows, n_cols = data.shape
# draw values in cells
if values_in_cells:
textcol_thresh = data.min() + (data.max() - data.min()) / 2
x_indices, y_indices = np.meshgrid(np.arange(n_cols), np.arange(n_rows))
for x, y in zip(x_indices.flatten(), y_indices.flatten()):
val = data[y, x]
# lower values get white text color for better visibility
textcol = 'white' if val < textcol_thresh else 'black'
disp_val = round(val, round_values_in_cells) if round_values_in_cells is not None else val
ax.text(x, y, disp_val, va='center', ha='center', color=textcol, fontsize=fontsize_cell_values)
# customize axes
if xaxislabel:
ax.set_xlabel(xaxislabel)
if yaxislabel:
ax.set_ylabel(yaxislabel)
if fontsize_axislabel:
for item in (ax.xaxis.label, ax.yaxis.label):
item.set_fontsize(fontsize_axislabel)
ax.set_xticks(np.arange(0, n_cols))
ax.set_yticks(np.arange(0, n_rows))
if xticklabels is not None:
ax.set_xticklabels(xticklabels, rotation=45, ha='left')
if yticklabels is not None:
ax.set_yticklabels(yticklabels)
if fontsize_axisticks:
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontsize(fontsize_axisticks)
# gridlines based on minor ticks
if grid:
ax.set_xticks(np.arange(-.5, n_cols), minor=True)
ax.set_yticks(np.arange(-.5, n_rows), minor=True)
ax.grid(which='minor', color='w', linestyle='-', linewidth=1)
return fig, ax
#
# plotting of evaluation results
#
def plot_eval_results(eval_results, metric=None, xaxislabel=None, yaxislabel=None, title=None,
title_fontsize='x-large', axes_title_fontsize='large', **fig_kwargs):
"""
Plot the evaluation results from `eval_results`. `eval_results` must be a sequence containing `(param, values)`
tuples, where `param` is the parameter value to appear on the x axis and `values` can be a dict structure
containing the metric values. `eval_results` can be created using the `results_by_parameter` function from the
`lda_utils.common` module.
Set `metric` to plot only a specific metric.
Set `xaxislabel` for a label on the x-axis.
Set `yaxislabel` for a label on the y-axis.
Set `title` for a plot title.
"""
if type(eval_results) not in (list, tuple) or not eval_results:
raise ValueError('`eval_results` must be a list or tuple with at least one element')
if type(eval_results[0]) not in (list, tuple) or len(eval_results[0]) != 2:
raise ValueError('`eval_results` must be a list or tuple containing a (param, values) tuple. '
'Maybe `eval_results` must be converted with `results_by_parameter`.')
if metric is not None and type(metric) not in (list, tuple):
metric = [metric]
elif metric is None:
# remove special evaluation result 'model': the calculated model itself
all_metrics = set(next(iter(eval_results))[1].keys()) - {'model'}
metric = sorted(all_metrics)
fig, axes = plt.subplots(len(metric), ncols=1, sharex=True, **fig_kwargs)
# set title
if title:
figtitle = fig.suptitle(title, fontsize=title_fontsize)
figtitle.set_y(0.95)
x = list(zip(*eval_results))[0]
for i, (ax, m) in enumerate(zip(axes.flatten(), metric)):
y = [metric_res[m] for _, metric_res in eval_results]
ax.plot(x, y, label=m)
ax.set_title(m, fontsize=axes_title_fontsize)
# set axis labels
if xaxislabel and i == len(metric)-1:
ax.set_xlabel(xaxislabel)
if yaxislabel:
ax.set_ylabel(yaxislabel)
fig.subplots_adjust(hspace=0.35)
# set title
if title:
fig.subplots_adjust(top=0.86)
return fig, axes
| 40.966851 | 118 | 0.663655 |
7957aa94d5643dd7ff67c59ebef391b22de6d989 | 2,428 | py | Python | flask_server/mysite/flask_app.py | pal03377/pc_presence | 367cb2776cee5caa0e54e69bb5a121411851d3f8 | [
"MIT"
] | 2 | 2016-10-21T14:48:19.000Z | 2021-03-03T21:29:01.000Z | flask_server/mysite/flask_app.py | pal03377/pc_presence | 367cb2776cee5caa0e54e69bb5a121411851d3f8 | [
"MIT"
] | null | null | null | flask_server/mysite/flask_app.py | pal03377/pc_presence | 367cb2776cee5caa0e54e69bb5a121411851d3f8 | [
"MIT"
] | null | null | null | from flask import Flask
import os
import hashlib
import pickle
import time
import json
app = Flask(__name__)
mydir = "./mysite"
registrationEnabled = True
offlineAfterTime = 2 * 60 * 60
def checkIfTeamNameIsValid(teamName):
return teamName.isalpha()
@app.route("/")
def home():
return os.getcwd()
@app.route("/register")
@app.route("/register/<team>/<pwd>")
def registerTeam(team=None, pwd=None):
if not registrationEnabled:
return "Sorry, your admin has disabled team registrations."
if os.path.exists(mydir + "/" + team + ".onOffInfo"):
return "This team already exists!"
if team is None:
to_return = ""
with open(mydir + "/registration.html", "r") as f:
for s in f:
to_return += s
return to_return
if not checkIfTeamNameIsValid(team):
return "Invalid team name: " + team + "<br>Your team name can only contain letters."
with open(mydir + "/" + team + ".pwdhash", "w") as f:
f.write(hashlib.sha512(pwd.encode()).hexdigest())
with open(mydir + "/" + team + ".onOffInfo", "wb") as f:
pickle.dump({}, f)
return "Team registration completed!"
def checkLogin(team, pwd):
if not checkIfTeamNameIsValid(team):
return False
if not os.path.exists(mydir + "/" + team + ".pwdhash"):
return False
pwdhash = ""
with open(mydir + "/" + team + ".pwdhash", "r") as f:
for s in f:
pwdhash += s
return hashlib.sha512(pwd.encode()).hexdigest() == pwdhash
@app.route("/login/<team>/<pwd>")
def login(team, pwd):
if checkLogin(team, pwd):
return "correct"
return "wrong"
@app.route("/stillOnline/<myname>/<team>/<pwd>")
def stillOnline(myname, team, pwd):
"""(1) My status = online
(2) Get dict of online and offline people"""
if not checkLogin(team, pwd):
return "wrong password"
currentTime = time.time()
with open(mydir + "/" + team + ".onOffInfo", "rb") as f:
peopledict = pickle.load(f)
peopledict[myname] = currentTime
with open(mydir + "/" + team + ".onOffInfo", "wb") as f:
pickle.dump(peopledict, f)
onOffPeople = {}
for person in peopledict:
onOffPeople[person] = (
(currentTime - peopledict[person]) < offlineAfterTime)
return json.dumps(onOffPeople)
@app.route("/icon")
def getIcon():
return send_from_directory(mydir, "icon.png")
| 27.908046 | 92 | 0.615321 |
7957aaf157f297279f712a67434a1024f97a2df8 | 3,444 | py | Python | tests/test_games4e.py | imRushabhShah/aima-python | 77066d1afd0bc432a7c8611ef78e6f351906a663 | [
"MIT"
] | 1 | 2020-04-24T17:12:48.000Z | 2020-04-24T17:12:48.000Z | tests/test_games4e.py | imRushabhShah/aima-python | 77066d1afd0bc432a7c8611ef78e6f351906a663 | [
"MIT"
] | null | null | null | tests/test_games4e.py | imRushabhShah/aima-python | 77066d1afd0bc432a7c8611ef78e6f351906a663 | [
"MIT"
] | 1 | 2019-12-09T20:50:14.000Z | 2019-12-09T20:50:14.000Z | import pytest
from games4e import *
# Creating the game instances
f52 = Fig52Game()
ttt = TicTacToe()
con4 = ConnectFour()
random.seed("aima-python")
def gen_state(to_move='X', x_positions=[], o_positions=[], h=3, v=3, k=3):
"""Given whose turn it is to move, the positions of X's on the board, the
positions of O's on the board, and, (optionally) number of rows, columns
and how many consecutive X's or O's required to win, return the corresponding
game state"""
moves = set([(x, y) for x in range(1, h + 1) for y in range(1, v + 1)]) \
- set(x_positions) - set(o_positions)
moves = list(moves)
board = {}
for pos in x_positions:
board[pos] = 'X'
for pos in o_positions:
board[pos] = 'O'
return GameState(to_move=to_move, utility=0, board=board, moves=moves)
def test_minimax_decision():
assert minimax_decision('A', f52) == 'a1'
assert minimax_decision('B', f52) == 'b1'
assert minimax_decision('C', f52) == 'c1'
assert minimax_decision('D', f52) == 'd3'
def test_alphabeta_search():
assert alphabeta_search('A', f52) == 'a1'
assert alphabeta_search('B', f52) == 'b1'
assert alphabeta_search('C', f52) == 'c1'
assert alphabeta_search('D', f52) == 'd3'
state = gen_state(to_move='X', x_positions=[(1, 1), (3, 3)],
o_positions=[(1, 2), (3, 2)])
assert alphabeta_search(state, ttt) == (2, 2)
state = gen_state(to_move='O', x_positions=[(1, 1), (3, 1), (3, 3)],
o_positions=[(1, 2), (3, 2)])
assert alphabeta_search(state, ttt) == (2, 2)
state = gen_state(to_move='O', x_positions=[(1, 1)],
o_positions=[])
assert alphabeta_search(state, ttt) == (2, 2)
state = gen_state(to_move='X', x_positions=[(1, 1), (3, 1)],
o_positions=[(2, 2), (3, 1)])
assert alphabeta_search(state, ttt) == (1, 3)
def test_monte_carlo_tree_search():
state = gen_state(to_move='X', x_positions=[(1, 1), (3, 3)],
o_positions=[(1, 2), (3, 2)])
assert monte_carlo_tree_search(state, ttt) == (2, 2)
state = gen_state(to_move='O', x_positions=[(1, 1), (3, 1), (3, 3)],
o_positions=[(1, 2), (3, 2)])
assert monte_carlo_tree_search(state, ttt) == (2, 2)
# uncomment the following when removing the 3rd edition
# state = gen_state(to_move='O', x_positions=[(1, 1)],
# o_positions=[])
# assert monte_carlo_tree_search(state, ttt) == (2, 2)
state = gen_state(to_move='X', x_positions=[(1, 1), (3, 1)],
o_positions=[(2, 2), (3, 1)])
assert monte_carlo_tree_search(state, ttt) == (1, 3)
# should never lose to a random or alphabeta player in a ttt game
assert ttt.play_game(mcts_player, random_player) >= 0
assert ttt.play_game(mcts_player, alphabeta_player) >= 0
# should never lose to a random player in a connect four game
assert con4.play_game(mcts_player, random_player) >= 0
def test_random_tests():
assert Fig52Game().play_game(alphabeta_player, alphabeta_player) == 3
# The player 'X' (one who plays first) in TicTacToe never loses:
assert ttt.play_game(alphabeta_player, alphabeta_player) >= 0
# The player 'X' (one who plays first) in TicTacToe never loses:
assert ttt.play_game(alphabeta_player, random_player) >= 0
if __name__ == "__main__":
pytest.main()
| 35.142857 | 81 | 0.612079 |
7957ac51856c156a67d195c9614252a0cc40a90e | 6,044 | py | Python | docs/conf.py | tomerten/sparkdatachallenge | d20dbf5008a4dc5909b886486bb7f5658edd0e73 | [
"MIT"
] | null | null | null | docs/conf.py | tomerten/sparkdatachallenge | d20dbf5008a4dc5909b886486bb7f5658edd0e73 | [
"MIT"
] | null | null | null | docs/conf.py | tomerten/sparkdatachallenge | d20dbf5008a4dc5909b886486bb7f5658edd0e73 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# sparkdatachallenge documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
import sparkdatachallenge
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"matplotlib.sphinxext.mathmpl",
"matplotlib.sphinxext.plot_directive",
"IPython.sphinxext.ipython_directive",
"IPython.sphinxext.ipython_console_highlighting",
"sphinx.ext.mathjax",
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.inheritance_diagram",
"sphinx.ext.napoleon",
"nbsphinx",
]
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = False
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
napoleon_preprocess_types = False
napoleon_type_aliases = None
napoleon_attr_annotations = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
exclude_patterns = ["_build", "**.ipynb_checkpoints"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"sparkdatachallenge"
copyright = u"2021, Tom Mertens"
author = u"Tom Mertens"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = sparkdatachallenge.__version__
# The full version, including alpha/beta/rc tags.
release = sparkdatachallenge.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
try:
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
except ImportError:
print("Sphinx html theme 'sphinx_rtd_theme' not found. Using 'classic' instead.")
html_theme = "classic"
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "sparkdatachallengedoc"
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"sparkdatachallenge.tex",
u"sparkdatachallenge Documentation",
u"Tom Mertens",
"manual",
),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "sparkdatachallenge", u"sparkdatachallenge Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"sparkdatachallenge",
u"sparkdatachallenge Documentation",
author,
"sparkdatachallenge",
"One line description of project.",
"Miscellaneous",
),
]
| 30.836735 | 98 | 0.703508 |
7957ac81e27b2ab96c80b5cfd8ddfcc923fafd42 | 393 | py | Python | openfl/component/__init__.py | sarthakpati/openfl | 8edebfd565d94f709a7d7f06d9ee38a7975c066e | [
"Apache-2.0"
] | null | null | null | openfl/component/__init__.py | sarthakpati/openfl | 8edebfd565d94f709a7d7f06d9ee38a7975c066e | [
"Apache-2.0"
] | null | null | null | openfl/component/__init__.py | sarthakpati/openfl | 8edebfd565d94f709a7d7f06d9ee38a7975c066e | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2020-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
"""openfl.component package."""
from .assigner import Assigner, RandomGroupedAssigner, StaticGroupedAssigner
from .aggregator import Aggregator
from .collaborator import Collaborator
__all__ = [
'Assigner',
'RandomGroupedAssigner',
'StaticGroupedAssigner',
'Aggregator',
'Collaborator'
]
| 23.117647 | 76 | 0.753181 |
7957acb6b77ec289644558ac78a0407d8ca2a9d6 | 4,767 | py | Python | keecenter/settings.py | GoRoSfan/keecenter | cb2e9a5d0467c817363893958f0efc8ed4622691 | [
"MIT"
] | null | null | null | keecenter/settings.py | GoRoSfan/keecenter | cb2e9a5d0467c817363893958f0efc8ed4622691 | [
"MIT"
] | 28 | 2021-03-10T05:39:07.000Z | 2021-09-23T16:13:16.000Z | keecenter/settings.py | GoRoSfan/keecenter | cb2e9a5d0467c817363893958f0efc8ed4622691 | [
"MIT"
] | null | null | null | """
Django settings for keecenter project.
Generated by 'django-admin startproject' using Django 2.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY', 'x43)ex0#op5!d1iug^j*o(w5@xs#0_=)v@koa2&z3m&s1(v)e*')
ALLOWED_HOSTS = ['127.0.0.1', 'keecenter.herokuapp.com', 'localhost']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'django_summernote',
'rest_framework',
'rest_framework.authtoken',
'djoser',
'public',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'keecenter.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'keecenter.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'keecenter_db',
'USER': 'physic',
'PASSWORD': '123RqweFasdVzxc$',
'HOST': 'localhost',
'PORT': '5432',
}
}
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'uk'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
SITE = 1
# Media connection settings
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'static/media')
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAdminUser',
'rest_framework.permissions.AllowAny',
),
'PAGE_SIZE': 10,
'DEFAULT_AUTHENTICATION_CLASSES': (
# 'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
'EXCEPTION_HANDLER': 'rest_framework_json_api.exceptions.exception_handler',
'DEFAULT_PAGINATION_CLASS':
'rest_framework_json_api.pagination.PageNumberPagination',
'DEFAULT_PARSER_CLASSES': (
'rest_framework_json_api.parsers.JSONParser',
'rest_framework.parsers.FormParser',
'rest_framework.parsers.MultiPartParser'
),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework_json_api.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
),
'DEFAULT_METADATA_CLASS': 'rest_framework_json_api.metadata.JSONAPIMetadata',
}
try:
from .local_settings import *
except ImportError:
from .prod_settings import *
| 27.554913 | 95 | 0.70086 |
7957ad3e71b22b4cb0d29e499d00f3268570bcba | 1,302 | py | Python | billy/bin/commands/__init__.py | backwardn/billy | 07ac788d25a6c79d03dd0e3d55459bbb55e22439 | [
"BSD-3-Clause"
] | 33 | 2016-11-05T07:25:48.000Z | 2022-01-31T03:40:43.000Z | billy/bin/commands/__init__.py | backwardn/billy | 07ac788d25a6c79d03dd0e3d55459bbb55e22439 | [
"BSD-3-Clause"
] | 16 | 2015-02-05T21:25:58.000Z | 2015-09-18T20:27:06.000Z | billy/bin/commands/__init__.py | backwardn/billy | 07ac788d25a6c79d03dd0e3d55459bbb55e22439 | [
"BSD-3-Clause"
] | 22 | 2015-03-23T07:13:20.000Z | 2016-06-10T04:41:06.000Z | """
defines a command extension system that is used by billy-util
new commands can be added by deriving from BaseCommand and overriding a few
attributes:
name: name of subcommand
help: help string displayed for subcommand
add_args(): method that calls `self.add_argument`
handle(args): method that does the command
"""
from six import add_metaclass
class CommandMeta(type):
""" register subcommands in a central registry """
def __new__(meta, classname, bases, classdict):
cls = type.__new__(meta, classname, bases, classdict)
if not hasattr(cls, 'subcommands'):
cls.subcommands = []
else:
# if this is a unique command
if cls.name not in [c.name for c in cls.subcommands]:
cls.subcommands.append(cls)
return cls
@add_metaclass(CommandMeta)
class BaseCommand(object):
help = ''
def __init__(self, subparsers):
self.subparser = subparsers.add_parser(self.name, help=self.help)
self.add_args()
def add_argument(self, *args, **kwargs):
self.subparser.add_argument(*args, **kwargs)
def add_args(self):
pass
def handle(self, args):
raise NotImplementedError('commands must implement handle(args)')
| 27.702128 | 79 | 0.64977 |
7957ae0f3ce32ca4209386d35663d72ef290a7bd | 35,347 | py | Python | rllib/utils/test_utils.py | yuanchi2807/ray | cf512254bb4bcd71ff1818dff5c868ab10c5f620 | [
"Apache-2.0"
] | null | null | null | rllib/utils/test_utils.py | yuanchi2807/ray | cf512254bb4bcd71ff1818dff5c868ab10c5f620 | [
"Apache-2.0"
] | null | null | null | rllib/utils/test_utils.py | yuanchi2807/ray | cf512254bb4bcd71ff1818dff5c868ab10c5f620 | [
"Apache-2.0"
] | 1 | 2022-03-27T09:01:59.000Z | 2022-03-27T09:01:59.000Z | from collections import Counter
import copy
from gym.spaces import Box
import logging
import numpy as np
import random
import re
import time
import tree # pip install dm_tree
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union
import yaml
import ray
from ray.rllib.utils.framework import try_import_jax, try_import_tf, try_import_torch
from ray.rllib.utils.typing import PartialTrainerConfigDict
from ray.tune import CLIReporter, run_experiments
jax, _ = try_import_jax()
tf1, tf, tfv = try_import_tf()
if tf1:
eager_mode = None
try:
from tensorflow.python.eager.context import eager_mode
except (ImportError, ModuleNotFoundError):
pass
torch, _ = try_import_torch()
logger = logging.getLogger(__name__)
def framework_iterator(
config: Optional[PartialTrainerConfigDict] = None,
frameworks: Sequence[str] = ("tf2", "tf", "tfe", "torch"),
session: bool = False,
with_eager_tracing: bool = False,
time_iterations: Optional[dict] = None,
) -> Union[str, Tuple[str, Optional["tf1.Session"]]]:
"""An generator that allows for looping through n frameworks for testing.
Provides the correct config entries ("framework") as well
as the correct eager/non-eager contexts for tfe/tf.
Args:
config: An optional config dict to alter in place depending on the
iteration.
frameworks: A list/tuple of the frameworks to be tested.
Allowed are: "tf2", "tf", "tfe", "torch", and None.
session: If True and only in the tf-case: Enter a tf.Session()
and yield that as second return value (otherwise yield (fw, None)).
Also sets a seed (42) on the session to make the test
deterministic.
with_eager_tracing: Include `eager_tracing=True` in the returned
configs, when framework=[tfe|tf2].
time_iterations: If provided, will write to the given dict (by
framework key) the times in seconds that each (framework's)
iteration takes.
Yields:
If `session` is False: The current framework [tf2|tf|tfe|torch] used.
If `session` is True: A tuple consisting of the current framework
string and the tf1.Session (if fw="tf", otherwise None).
"""
config = config or {}
frameworks = [frameworks] if isinstance(frameworks, str) else list(frameworks)
# Both tf2 and tfe present -> remove "tfe" or "tf2" depending on version.
if "tf2" in frameworks and "tfe" in frameworks:
frameworks.remove("tfe" if tfv == 2 else "tf2")
for fw in frameworks:
# Skip non-installed frameworks.
if fw == "torch" and not torch:
logger.warning("framework_iterator skipping torch (not installed)!")
continue
if fw != "torch" and not tf:
logger.warning(
"framework_iterator skipping {} (tf not installed)!".format(fw)
)
continue
elif fw == "tfe" and not eager_mode:
logger.warning(
"framework_iterator skipping tf-eager (could not "
"import `eager_mode` from tensorflow.python)!"
)
continue
elif fw == "tf2" and tfv != 2:
logger.warning("framework_iterator skipping tf2.x (tf version is < 2.0)!")
continue
elif fw == "jax" and not jax:
logger.warning("framework_iterator skipping JAX (not installed)!")
continue
assert fw in ["tf2", "tf", "tfe", "torch", "jax", None]
# Do we need a test session?
sess = None
if fw == "tf" and session is True:
sess = tf1.Session()
sess.__enter__()
tf1.set_random_seed(42)
config["framework"] = fw
eager_ctx = None
# Enable eager mode for tf2 and tfe.
if fw in ["tf2", "tfe"]:
eager_ctx = eager_mode()
eager_ctx.__enter__()
assert tf1.executing_eagerly()
# Make sure, eager mode is off.
elif fw == "tf":
assert not tf1.executing_eagerly()
# Additionally loop through eager_tracing=True + False, if necessary.
if fw in ["tf2", "tfe"] and with_eager_tracing:
for tracing in [True, False]:
config["eager_tracing"] = tracing
print(f"framework={fw} (eager-tracing={tracing})")
time_started = time.time()
yield fw if session is False else (fw, sess)
if time_iterations is not None:
time_total = time.time() - time_started
time_iterations[fw + ("+tracing" if tracing else "")] = time_total
print(f".. took {time_total}sec")
config["eager_tracing"] = False
# Yield current framework + tf-session (if necessary).
else:
print(f"framework={fw}")
time_started = time.time()
yield fw if session is False else (fw, sess)
if time_iterations is not None:
time_total = time.time() - time_started
time_iterations[fw + ("+tracing" if tracing else "")] = time_total
print(f".. took {time_total}sec")
# Exit any context we may have entered.
if eager_ctx:
eager_ctx.__exit__(None, None, None)
elif sess:
sess.__exit__(None, None, None)
def check(x, y, decimals=5, atol=None, rtol=None, false=False):
"""
Checks two structures (dict, tuple, list,
np.array, float, int, etc..) for (almost) numeric identity.
All numbers in the two structures have to match up to `decimal` digits
after the floating point. Uses assertions.
Args:
x (any): The value to be compared (to the expectation: `y`). This
may be a Tensor.
y (any): The expected value to be compared to `x`. This must not
be a tf-Tensor, but may be a tfe/torch-Tensor.
decimals (int): The number of digits after the floating point up to
which all numeric values have to match.
atol (float): Absolute tolerance of the difference between x and y
(overrides `decimals` if given).
rtol (float): Relative tolerance of the difference between x and y
(overrides `decimals` if given).
false (bool): Whether to check that x and y are NOT the same.
"""
# A dict type.
if isinstance(x, dict):
assert isinstance(y, dict), "ERROR: If x is dict, y needs to be a dict as well!"
y_keys = set(x.keys())
for key, value in x.items():
assert key in y, "ERROR: y does not have x's key='{}'! y={}".format(key, y)
check(value, y[key], decimals=decimals, atol=atol, rtol=rtol, false=false)
y_keys.remove(key)
assert not y_keys, "ERROR: y contains keys ({}) that are not in x! y={}".format(
list(y_keys), y
)
# A tuple type.
elif isinstance(x, (tuple, list)):
assert isinstance(
y, (tuple, list)
), "ERROR: If x is tuple, y needs to be a tuple as well!"
assert len(y) == len(
x
), "ERROR: y does not have the same length as x ({} vs {})!".format(
len(y), len(x)
)
for i, value in enumerate(x):
check(value, y[i], decimals=decimals, atol=atol, rtol=rtol, false=false)
# Boolean comparison.
elif isinstance(x, (np.bool_, bool)):
if false is True:
assert bool(x) is not bool(y), "ERROR: x ({}) is y ({})!".format(x, y)
else:
assert bool(x) is bool(y), "ERROR: x ({}) is not y ({})!".format(x, y)
# Nones or primitives.
elif x is None or y is None or isinstance(x, (str, int)):
if false is True:
assert x != y, "ERROR: x ({}) is the same as y ({})!".format(x, y)
else:
assert x == y, "ERROR: x ({}) is not the same as y ({})!".format(x, y)
# String/byte comparisons.
elif hasattr(x, "dtype") and (x.dtype == object or str(x.dtype).startswith("<U")):
try:
np.testing.assert_array_equal(x, y)
if false is True:
assert False, "ERROR: x ({}) is the same as y ({})!".format(x, y)
except AssertionError as e:
if false is False:
raise e
# Everything else (assume numeric or tf/torch.Tensor).
else:
if tf1 is not None:
# y should never be a Tensor (y=expected value).
if isinstance(y, (tf1.Tensor, tf1.Variable)):
# In eager mode, numpyize tensors.
if tf.executing_eagerly():
y = y.numpy()
else:
raise ValueError(
"`y` (expected value) must not be a Tensor. "
"Use numpy.ndarray instead"
)
if isinstance(x, (tf1.Tensor, tf1.Variable)):
# In eager mode, numpyize tensors.
if tf1.executing_eagerly():
x = x.numpy()
# Otherwise, use a new tf-session.
else:
with tf1.Session() as sess:
x = sess.run(x)
return check(
x, y, decimals=decimals, atol=atol, rtol=rtol, false=false
)
if torch is not None:
if isinstance(x, torch.Tensor):
x = x.detach().cpu().numpy()
if isinstance(y, torch.Tensor):
y = y.detach().cpu().numpy()
# Using decimals.
if atol is None and rtol is None:
# Assert equality of both values.
try:
np.testing.assert_almost_equal(x, y, decimal=decimals)
# Both values are not equal.
except AssertionError as e:
# Raise error in normal case.
if false is False:
raise e
# Both values are equal.
else:
# If false is set -> raise error (not expected to be equal).
if false is True:
assert False, "ERROR: x ({}) is the same as y ({})!".format(x, y)
# Using atol/rtol.
else:
# Provide defaults for either one of atol/rtol.
if atol is None:
atol = 0
if rtol is None:
rtol = 1e-7
try:
np.testing.assert_allclose(x, y, atol=atol, rtol=rtol)
except AssertionError as e:
if false is False:
raise e
else:
if false is True:
assert False, "ERROR: x ({}) is the same as y ({})!".format(x, y)
def check_compute_single_action(
trainer, include_state=False, include_prev_action_reward=False
):
"""Tests different combinations of args for trainer.compute_single_action.
Args:
trainer: The Trainer object to test.
include_state: Whether to include the initial state of the Policy's
Model in the `compute_single_action` call.
include_prev_action_reward: Whether to include the prev-action and
-reward in the `compute_single_action` call.
Raises:
ValueError: If anything unexpected happens.
"""
# Have to import this here to avoid circular dependency.
from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID, SampleBatch
# Some Trainers may not abide to the standard API.
pid = DEFAULT_POLICY_ID
try:
# Multi-agent: Pick any learnable policy (or DEFAULT_POLICY if it's the only
# one).
pid = next(iter(trainer.workers.local_worker().get_policies_to_train()))
pol = trainer.get_policy(pid)
except AttributeError:
pol = trainer.policy
# Get the policy's model.
model = pol.model
action_space = pol.action_space
def _test(
what, method_to_test, obs_space, full_fetch, explore, timestep, unsquash, clip
):
call_kwargs = {}
if what is trainer:
call_kwargs["full_fetch"] = full_fetch
call_kwargs["policy_id"] = pid
obs = obs_space.sample()
if isinstance(obs_space, Box):
obs = np.clip(obs, -1.0, 1.0)
state_in = None
if include_state:
state_in = model.get_initial_state()
if not state_in:
state_in = []
i = 0
while f"state_in_{i}" in model.view_requirements:
state_in.append(
model.view_requirements[f"state_in_{i}"].space.sample()
)
i += 1
action_in = action_space.sample() if include_prev_action_reward else None
reward_in = 1.0 if include_prev_action_reward else None
if method_to_test == "input_dict":
assert what is pol
input_dict = {SampleBatch.OBS: obs}
if include_prev_action_reward:
input_dict[SampleBatch.PREV_ACTIONS] = action_in
input_dict[SampleBatch.PREV_REWARDS] = reward_in
if state_in:
for i, s in enumerate(state_in):
input_dict[f"state_in_{i}"] = s
input_dict_batched = SampleBatch(
tree.map_structure(lambda s: np.expand_dims(s, 0), input_dict)
)
action = pol.compute_actions_from_input_dict(
input_dict=input_dict_batched,
explore=explore,
timestep=timestep,
**call_kwargs,
)
# Unbatch everything to be able to compare against single
# action below.
# ARS and ES return action batches as lists.
if isinstance(action[0], list):
action = (np.array(action[0]), action[1], action[2])
action = tree.map_structure(lambda s: s[0], action)
try:
action2 = pol.compute_single_action(
input_dict=input_dict,
explore=explore,
timestep=timestep,
**call_kwargs,
)
# Make sure these are the same, unless we have exploration
# switched on (or noisy layers).
if not explore and not pol.config.get("noisy"):
check(action, action2)
except TypeError:
pass
else:
action = what.compute_single_action(
obs,
state_in,
prev_action=action_in,
prev_reward=reward_in,
explore=explore,
timestep=timestep,
unsquash_action=unsquash,
clip_action=clip,
**call_kwargs,
)
state_out = None
if state_in or full_fetch or what is pol:
action, state_out, _ = action
if state_out:
for si, so in zip(state_in, state_out):
check(list(si.shape), so.shape)
if unsquash is None:
unsquash = what.config["normalize_actions"]
if clip is None:
clip = what.config["clip_actions"]
# Test whether unsquash/clipping works on the Trainer's
# compute_single_action method: Both flags should force the action
# to be within the space's bounds.
if method_to_test == "single" and what == trainer:
if not action_space.contains(action) and (
clip or unsquash or not isinstance(action_space, Box)
):
raise ValueError(
f"Returned action ({action}) of trainer/policy {what} "
f"not in Env's action_space {action_space}"
)
# We are operating in normalized space: Expect only smaller action
# values.
if (
isinstance(action_space, Box)
and not unsquash
and what.config.get("normalize_actions")
and np.any(np.abs(action) > 15.0)
):
raise ValueError(
f"Returned action ({action}) of trainer/policy {what} "
"should be in normalized space, but seems too large/small "
"for that!"
)
# Loop through: Policy vs Trainer; Different API methods to calculate
# actions; unsquash option; clip option; full fetch or not.
for what in [pol, trainer]:
if what is trainer:
# Get the obs-space from Workers.env (not Policy) due to possible
# pre-processor up front.
worker_set = getattr(trainer, "workers", None)
assert worker_set
if isinstance(worker_set, list):
obs_space = trainer.get_policy(pid).observation_space
else:
obs_space = worker_set.local_worker().for_policy(
lambda p: p.observation_space, policy_id=pid
)
obs_space = getattr(obs_space, "original_space", obs_space)
else:
obs_space = pol.observation_space
for method_to_test in ["single"] + (["input_dict"] if what is pol else []):
for explore in [True, False]:
for full_fetch in [False, True] if what is trainer else [False]:
timestep = random.randint(0, 100000)
for unsquash in [True, False, None]:
for clip in [False] if unsquash else [True, False, None]:
_test(
what,
method_to_test,
obs_space,
full_fetch,
explore,
timestep,
unsquash,
clip,
)
def check_learning_achieved(tune_results, min_reward, evaluation=False):
"""Throws an error if `min_reward` is not reached within tune_results.
Checks the last iteration found in tune_results for its
"episode_reward_mean" value and compares it to `min_reward`.
Args:
tune_results: The tune.run returned results object.
min_reward (float): The min reward that must be reached.
Raises:
ValueError: If `min_reward` not reached.
"""
# Get maximum reward of all trials
# (check if at least one trial achieved some learning)
avg_rewards = [
(
trial.last_result["episode_reward_mean"]
if not evaluation
else trial.last_result["evaluation"]["episode_reward_mean"]
)
for trial in tune_results.trials
]
best_avg_reward = max(avg_rewards)
if best_avg_reward < min_reward:
raise ValueError("`stop-reward` of {} not reached!".format(min_reward))
print("ok")
def check_train_results(train_results):
"""Checks proper structure of a Trainer.train() returned dict.
Args:
train_results: The train results dict to check.
Raises:
AssertionError: If `train_results` doesn't have the proper structure or
data in it.
"""
# Import these here to avoid circular dependencies.
from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID
from ray.rllib.utils.metrics.learner_info import LEARNER_INFO, LEARNER_STATS_KEY
from ray.rllib.utils.pre_checks.multi_agent import check_multi_agent
# Assert that some keys are where we would expect them.
for key in [
"agent_timesteps_total",
"config",
"custom_metrics",
"episode_len_mean",
"episode_reward_max",
"episode_reward_mean",
"episode_reward_min",
"episodes_total",
"hist_stats",
"info",
"iterations_since_restore",
"num_healthy_workers",
"perf",
"policy_reward_max",
"policy_reward_mean",
"policy_reward_min",
"sampler_perf",
"time_since_restore",
"time_this_iter_s",
"timesteps_since_restore",
"timesteps_total",
"timers",
"time_total_s",
"training_iteration",
]:
assert (
key in train_results
), f"'{key}' not found in `train_results` ({train_results})!"
_, is_multi_agent = check_multi_agent(train_results["config"])
# Check in particular the "info" dict.
info = train_results["info"]
assert LEARNER_INFO in info, f"'learner' not in train_results['infos'] ({info})!"
assert (
"num_steps_trained" in info or "num_env_steps_trained" in info
), f"'num_(env_)?steps_trained' not in train_results['infos'] ({info})!"
learner_info = info[LEARNER_INFO]
# Make sure we have a default_policy key if we are not in a
# multi-agent setup.
if not is_multi_agent:
# APEX algos sometimes have an empty learner info dict (no metrics
# collected yet).
assert len(learner_info) == 0 or DEFAULT_POLICY_ID in learner_info, (
f"'{DEFAULT_POLICY_ID}' not found in "
f"train_results['infos']['learner'] ({learner_info})!"
)
for pid, policy_stats in learner_info.items():
if pid == "batch_count":
continue
# Expect td-errors to be per batch-item.
if "td_error" in policy_stats:
configured_b = train_results["config"]["train_batch_size"]
actual_b = policy_stats["td_error"].shape[0]
# R2D2 case.
if (configured_b - actual_b) / actual_b > 0.1:
assert (
configured_b
/ (
train_results["config"]["model"]["max_seq_len"]
+ train_results["config"]["burn_in"]
)
== actual_b
)
# Make sure each policy has the LEARNER_STATS_KEY under it.
assert LEARNER_STATS_KEY in policy_stats
learner_stats = policy_stats[LEARNER_STATS_KEY]
for key, value in learner_stats.items():
# Min- and max-stats should be single values.
if key.startswith("min_") or key.startswith("max_"):
assert np.isscalar(value), f"'key' value not a scalar ({value})!"
return train_results
def run_learning_tests_from_yaml(
yaml_files: List[str],
*,
max_num_repeats: int = 2,
smoke_test: bool = False,
) -> Dict[str, Any]:
"""Runs the given experiments in yaml_files and returns results dict.
Args:
yaml_files (List[str]): List of yaml file names.
max_num_repeats (int): How many times should we repeat a failed
experiment?
smoke_test (bool): Whether this is just a smoke-test. If True,
set time_total_s to 5min and don't early out due to rewards
or timesteps reached.
"""
print("Will run the following yaml files:")
for yaml_file in yaml_files:
print("->", yaml_file)
# All trials we'll ever run in this test script.
all_trials = []
# The experiments (by name) we'll run up to `max_num_repeats` times.
experiments = {}
# The results per experiment.
checks = {}
# Metrics per experiment.
stats = {}
start_time = time.monotonic()
def should_check_eval(experiment):
# If we have evaluation workers, use their rewards.
# This is useful for offline learning tests, where
# we evaluate against an actual environment.
return experiment["config"].get("evaluation_interval", None) is not None
# Loop through all collected files and gather experiments.
# Augment all by `torch` framework.
for yaml_file in yaml_files:
tf_experiments = yaml.safe_load(open(yaml_file).read())
# Add torch version of all experiments to the list.
for k, e in tf_experiments.items():
# If framework explicitly given, only test for that framework.
# Some algos do not have both versions available.
if "frameworks" in e:
frameworks = e["frameworks"]
else:
# By default we don't run tf2, because tf2's multi-gpu support
# isn't complete yet.
frameworks = ["tf", "torch"]
# Pop frameworks key to not confuse Tune.
e.pop("frameworks", None)
e["stop"] = e["stop"] if "stop" in e else {}
e["pass_criteria"] = e["pass_criteria"] if "pass_criteria" in e else {}
# For smoke-tests, we just run for n min.
if smoke_test:
# 0sec for each(!) experiment/trial.
# This is such that if there are many experiments/trials
# in a test (e.g. rllib_learning_test), each one can at least
# create its trainer and run a first iteration.
e["stop"]["time_total_s"] = 0
else:
check_eval = should_check_eval(e)
episode_reward_key = (
"episode_reward_mean"
if not check_eval
else "evaluation/episode_reward_mean"
)
# We also stop early, once we reach the desired reward.
min_reward = e.get("pass_criteria", {}).get(episode_reward_key)
if min_reward is not None:
e["stop"][episode_reward_key] = min_reward
# Generate `checks` dict for all experiments
# (tf, tf2 and/or torch).
for framework in frameworks:
k_ = k + "-" + framework
ec = copy.deepcopy(e)
ec["config"]["framework"] = framework
if framework == "tf2":
ec["config"]["eager_tracing"] = True
checks[k_] = {
"min_reward": ec["pass_criteria"].get("episode_reward_mean", 0.0),
"min_throughput": ec["pass_criteria"].get("timesteps_total", 0.0)
/ (ec["stop"].get("time_total_s", 1.0) or 1.0),
"time_total_s": ec["stop"].get("time_total_s"),
"failures": 0,
"passed": False,
}
# This key would break tune.
ec.pop("pass_criteria", None)
# One experiment to run.
experiments[k_] = ec
# Print out the actual config.
print("== Test config ==")
print(yaml.dump(experiments))
# Keep track of those experiments we still have to run.
# If an experiment passes, we'll remove it from this dict.
experiments_to_run = experiments.copy()
try:
ray.init(address="auto")
except ConnectionError:
ray.init()
for i in range(max_num_repeats):
# We are done.
if len(experiments_to_run) == 0:
print("All experiments finished.")
break
print(f"Starting learning test iteration {i}...")
# Run remaining experiments.
trials = run_experiments(
experiments_to_run,
resume=False,
verbose=2,
progress_reporter=CLIReporter(
metric_columns={
"training_iteration": "iter",
"time_total_s": "time_total_s",
"timesteps_total": "ts",
"episodes_this_iter": "train_episodes",
"episode_reward_mean": "reward_mean",
"evaluation/episode_reward_mean": "eval_reward_mean",
},
sort_by_metric=True,
max_report_frequency=30,
),
)
all_trials.extend(trials)
# Check each experiment for whether it passed.
# Criteria is to a) reach reward AND b) to have reached the throughput
# defined by `timesteps_total` / `time_total_s`.
for experiment in experiments_to_run.copy():
print(f"Analyzing experiment {experiment} ...")
# Collect all trials within this experiment (some experiments may
# have num_samples or grid_searches defined).
trials_for_experiment = []
for t in trials:
trial_exp = re.sub(".+/([^/]+)$", "\\1", t.local_dir)
if trial_exp == experiment:
trials_for_experiment.append(t)
print(f" ... Trials: {trials_for_experiment}.")
check_eval = should_check_eval(experiments[experiment])
# Error: Increase failure count and repeat.
if any(t.status == "ERROR" for t in trials_for_experiment):
print(" ... ERROR.")
checks[experiment]["failures"] += 1
# Smoke-tests always succeed.
elif smoke_test:
print(" ... SMOKE TEST (mark ok).")
checks[experiment]["passed"] = True
del experiments_to_run[experiment]
# Experiment finished: Check reward achieved and timesteps done
# (throughput).
else:
if check_eval:
episode_reward_mean = np.mean(
[
t.last_result["evaluation"]["episode_reward_mean"]
for t in trials_for_experiment
]
)
else:
episode_reward_mean = np.mean(
[
t.last_result["episode_reward_mean"]
for t in trials_for_experiment
]
)
desired_reward = checks[experiment]["min_reward"]
timesteps_total = np.mean(
[t.last_result["timesteps_total"] for t in trials_for_experiment]
)
total_time_s = np.mean(
[t.last_result["time_total_s"] for t in trials_for_experiment]
)
# TODO(jungong) : track trainer and env throughput separately.
throughput = timesteps_total / (total_time_s or 1.0)
# TODO(jungong) : enable throughput check again after
# TD3_HalfCheetahBulletEnv is fixed and verified.
# desired_throughput = checks[experiment]["min_throughput"]
desired_throughput = None
# Record performance.
stats[experiment] = {
"episode_reward_mean": float(episode_reward_mean),
"throughput": (
float(throughput) if throughput is not None else 0.0
),
}
print(
f" ... Desired reward={desired_reward}; "
f"desired throughput={desired_throughput}"
)
# We failed to reach desired reward or the desired throughput.
if (desired_reward and episode_reward_mean < desired_reward) or (
desired_throughput and throughput < desired_throughput
):
print(
" ... Not successful: Actual "
f"reward={episode_reward_mean}; "
f"actual throughput={throughput}"
)
checks[experiment]["failures"] += 1
# We succeeded!
else:
print(" ... Successful: (mark ok).")
checks[experiment]["passed"] = True
del experiments_to_run[experiment]
ray.shutdown()
time_taken = time.monotonic() - start_time
# Create results dict and write it to disk.
result = {
"time_taken": float(time_taken),
"trial_states": dict(Counter([trial.status for trial in all_trials])),
"last_update": float(time.time()),
"stats": stats,
"passed": [k for k, exp in checks.items() if exp["passed"]],
"failures": {
k: exp["failures"] for k, exp in checks.items() if exp["failures"] > 0
},
}
return result
def check_same_batch(batch1, batch2) -> None:
"""Check if both batches are (almost) identical.
For MultiAgentBatches, the step count and individual policy's
SampleBatches are checked for identity. For SampleBatches, identity is
checked as the almost numerical key-value-pair identity between batches
with ray.rllib.utils.test_utils.check(). unroll_id is compared only if
both batches have an unroll_id.
Args:
batch1: Batch to compare against batch2
batch2: Batch to compare against batch1
"""
# Avoids circular import
from ray.rllib.policy.sample_batch import SampleBatch, MultiAgentBatch
assert type(batch1) == type(
batch2
), "Input batches are of different types {} and {}".format(
str(type(batch1)), str(type(batch2))
)
def check_sample_batches(_batch1, _batch2, _policy_id=None):
unroll_id_1 = _batch1.get("unroll_id", None)
unroll_id_2 = _batch2.get("unroll_id", None)
# unroll IDs only have to fit if both batches have them
if unroll_id_1 is not None and unroll_id_2 is not None:
assert unroll_id_1 == unroll_id_2
batch1_keys = set()
for k, v in _batch1.items():
# unroll_id is compared above already
if k == "unroll_id":
continue
check(v, _batch2[k])
batch1_keys.add(k)
batch2_keys = set(_batch2.keys())
# unroll_id is compared above already
batch2_keys.discard("unroll_id")
_difference = batch1_keys.symmetric_difference(batch2_keys)
# Cases where one batch has info and the other has not
if _policy_id:
assert not _difference, (
"SampleBatches for policy with ID {} "
"don't share information on the "
"following information: \n{}"
"".format(_policy_id, _difference)
)
else:
assert not _difference, (
"SampleBatches don't share information "
"on the following information: \n{}"
"".format(_difference)
)
if type(batch1) == SampleBatch:
check_sample_batches(batch1, batch2)
elif type(batch1) == MultiAgentBatch:
assert batch1.count == batch2.count
batch1_ids = set()
for policy_id, policy_batch in batch1.policy_batches.items():
check_sample_batches(
policy_batch, batch2.policy_batches[policy_id], policy_id
)
batch1_ids.add(policy_id)
# Case where one ma batch has info on a policy the other has not
batch2_ids = set(batch2.policy_batches.keys())
difference = batch1_ids.symmetric_difference(batch2_ids)
assert (
not difference
), f"MultiAgentBatches don't share the following information: \n{difference}."
else:
raise ValueError("Unsupported batch type " + str(type(batch1)))
| 39.230855 | 88 | 0.559595 |
7957aed551b60efd23626a81194086f01cc37555 | 1,186 | py | Python | Lib/site-packages/stripe/api_resources/subscription_schedule.py | 2anirban/LSTM-Stock-Predictor | bcd3709ff88c8d1286df93163b30164c1d225652 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | Lib/site-packages/stripe/api_resources/subscription_schedule.py | 2anirban/LSTM-Stock-Predictor | bcd3709ff88c8d1286df93163b30164c1d225652 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | Lib/site-packages/stripe/api_resources/subscription_schedule.py | 2anirban/LSTM-Stock-Predictor | bcd3709ff88c8d1286df93163b30164c1d225652 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import, division, print_function
from stripe import util
from stripe.api_resources.abstract import CreateableAPIResource
from stripe.api_resources.abstract import UpdateableAPIResource
from stripe.api_resources.abstract import ListableAPIResource
from stripe.api_resources.abstract import nested_resource_class_methods
@nested_resource_class_methods("revision", operations=["retrieve", "list"])
class SubscriptionSchedule(
CreateableAPIResource, UpdateableAPIResource, ListableAPIResource
):
OBJECT_NAME = "subscription_schedule"
def cancel(self, idempotency_key=None, **params):
url = self.instance_url() + "/cancel"
headers = util.populate_headers(idempotency_key)
self.refresh_from(self.request("post", url, params, headers))
return self
def release(self, idempotency_key=None, **params):
url = self.instance_url() + "/release"
headers = util.populate_headers(idempotency_key)
self.refresh_from(self.request("post", url, params, headers))
return self
def revisions(self, **params):
return self.request("get", self.instance_url() + "/revisions", params)
| 39.533333 | 78 | 0.751265 |
7957b00c66b08f7e29826174346e6980c1f73317 | 47,735 | py | Python | tests/test_elastica_numba/test_governing_equations_nb.py | engiecat/PyElastica | 0ea100e23d5908bf7ebdae4261276539e02a53a6 | [
"MIT"
] | null | null | null | tests/test_elastica_numba/test_governing_equations_nb.py | engiecat/PyElastica | 0ea100e23d5908bf7ebdae4261276539e02a53a6 | [
"MIT"
] | null | null | null | tests/test_elastica_numba/test_governing_equations_nb.py | engiecat/PyElastica | 0ea100e23d5908bf7ebdae4261276539e02a53a6 | [
"MIT"
] | null | null | null | __doc__ = """Test Cosserat rod governing equations for Numba implementation"""
# System imports
import numpy as np
from numpy.testing import assert_allclose
from elastica.utils import Tolerance, MaxDimension
from elastica._elastica_numba._linalg import _batch_matvec
from elastica._elastica_numba._rod._cosserat_rod import (
CosseratRod,
_compute_geometry_from_state,
_compute_all_dilatations,
_compute_dilatation_rate,
_compute_shear_stretch_strains,
_compute_internal_shear_stretch_stresses_from_model,
_compute_internal_bending_twist_stresses_from_model,
_compute_damping_forces,
_compute_internal_forces,
_compute_bending_twist_strains,
_compute_damping_torques,
_compute_internal_torques,
_update_accelerations,
_get_z_vector,
)
import pytest
class BaseClass:
def __init__(self, n_elem, nu=0.0):
super(BaseClass, self).__init__()
self.n_elem = n_elem
self.start = np.array([0.0, 0.0, 0.0])
self.direction = np.array([0.0, 0.0, 1.0])
self.normal = np.array([1.0, 0.0, 0.0])
self.base_length = 1.0
self.base_radius = 0.25
self.density = 1
self.nu = nu
self.E = 1
self.poisson_ratio = 0.5
self.shear_modulus = self.E / (self.poisson_ratio + 1.0)
def constructor(n_elem, nu=0.0):
cls = BaseClass(n_elem, nu)
rod = CosseratRod.straight_rod(
cls.n_elem,
cls.start,
cls.direction,
cls.normal,
cls.base_length,
cls.base_radius,
cls.density,
cls.nu,
cls.E,
shear_modulus=cls.shear_modulus,
)
return cls, rod
def compute_geometry_analytically(n_elem):
initial = BaseClass(n_elem)
# Construct position array using start and direction vectors.
# This position array will be our reference for test cases
end = initial.start + initial.direction * initial.base_length
position = np.zeros((MaxDimension.value(), n_elem + 1))
for i in range(0, MaxDimension.value()):
position[i, ...] = np.linspace(initial.start[i], end[i], num=n_elem + 1)
# Compute geometry
# length of each element is same we dont need to use position array for calculation of lengths
rest_lengths = np.repeat(initial.base_length / n_elem, n_elem)
tangents = np.repeat(initial.direction[:, np.newaxis], n_elem, axis=1)
radius = np.repeat(initial.base_radius, n_elem)
return position, rest_lengths, tangents, radius
def compute_all_dilatations_analytically(n_elem, dilatation):
initial = BaseClass(n_elem)
position, rest_lengths, tangents, radius = compute_geometry_analytically(n_elem)
rest_voronoi_lengths = np.repeat(
initial.base_length / n_elem, n_elem - 1
) # n-1 elements in voronoi domain
dilatation_collection = np.repeat(dilatation, n_elem, axis=0)
# Compute dilatation
lengths = rest_lengths * dilatation
# Compute voronoi dilatation
voronoi_lengths = rest_voronoi_lengths * dilatation
voronoi_dilatation = voronoi_lengths / rest_voronoi_lengths
return (dilatation_collection, voronoi_dilatation, lengths, rest_voronoi_lengths)
def compute_dilatation_rate_analytically(n_elem, dilatation):
position, rest_lengths, tangents, radius = compute_geometry_analytically(n_elem)
# In order to compute dilatation rate, we need to set node velocity.
# We can compute velocity subtracting current position from the previous
# position which is the rest_position, here take dt = 1.0 .
position_rest = position.copy() # Here take a copy before modifying position
position *= dilatation # Change the position of the nodes
# TODO: Find a better way to set velocity, which we use for dilatation rate
# v = (x[new]-x[old])/dt, dt = 1.0
velocity = position - position_rest
# velocity_difference = v[i+1]-v[i]
velocity_difference = velocity[..., 1:] - velocity[..., :-1]
# Hard coded, here since we know there is only velocity along the rod (d3),
# just use those values to compute dilatation rate.
dilatation_rate = velocity_difference[-1] / rest_lengths
return dilatation_rate, velocity
def compute_strain_analytically(n_elem, dilatation):
position, rest_lengths, tangents, radius = compute_geometry_analytically(n_elem)
(
dilatation_collection,
voronoi_dilatation,
lengths,
rest_voronoi_lengths,
) = compute_all_dilatations_analytically(n_elem, dilatation)
strain = (
(lengths - rest_lengths) / rest_lengths * tangents
) # multiply with tangents to make a vector
return strain
def compute_stress_analytically(n_elem, dilatation):
initial = BaseClass(n_elem)
strain = compute_strain_analytically(n_elem, dilatation)
# Compute Internal stress. Actually, below computation has a unit of force
# but in RSoS 2018 paper and in cosserat_rod.py, it is called stress.
# It is basically, shear_matrix * strain
stress = (initial.base_radius * initial.base_radius * np.pi) * initial.E * strain
return stress
def compute_forces_analytically(n_elem, dilatation):
internal_stress = compute_stress_analytically(n_elem, dilatation)
# Internal forces in between elements have to be zero, because
# we compress every element by same amount. Thus we only need
# to compute forces at the first and last nodes. We know that
# forces at the first and last node have to be in opposite direction
# thus we multiply forces on last node with -1.0.
internal_forces = np.zeros((MaxDimension.value(), n_elem + 1))
internal_forces[..., 0] = internal_stress[..., 0] / dilatation
internal_forces[..., -1] = -1.0 * internal_stress[..., 0] / dilatation
return internal_forces
class TestingClass:
@pytest.mark.parametrize("n_elem", [2, 3, 5, 10, 20])
def test_case_compute_geomerty_from_state(self, n_elem, nu=0):
"""
This test case, tests compute_geometry_from_state
function by comparing with analytical solution.
:param n_elem:
:param nu:
:return:
"""
initial, test_rod = constructor(n_elem, nu)
position, rest_lengths, tangents, radius = compute_geometry_analytically(n_elem)
# Compute geometry from state
_compute_geometry_from_state(
test_rod.position_collection,
test_rod.volume,
test_rod.lengths,
test_rod.tangents,
test_rod.radius,
)
assert_allclose(test_rod.rest_lengths, rest_lengths, atol=Tolerance.atol())
assert_allclose(
test_rod.lengths, rest_lengths, atol=Tolerance.atol()
) # no dilatation
assert_allclose(test_rod.tangents, tangents, atol=Tolerance.atol())
assert_allclose(test_rod.radius, radius, atol=Tolerance.atol())
@pytest.mark.parametrize("n_elem", [2, 3, 5, 10, 20])
@pytest.mark.parametrize("dilatation", [0.1, 0.2, 0.3, 0.5, 1.0, 1.1])
def test_case_compute_all_dilatations(self, n_elem, dilatation):
"""
This test case, tests compute_all_dilatations
function by comparing with analytical solution.
:param n_elem:
:param dilatation:
:return:
"""
initial, test_rod = constructor(n_elem)
(
dilatation_collection,
voronoi_dilatation,
lengths,
rest_voronoi_lengths,
) = compute_all_dilatations_analytically(n_elem, dilatation)
test_rod.position_collection *= dilatation
# Compute dilatation using compute_all_dilatations
# Compute geometry again because node positions changed.
# But compute geometry will be done inside compute_all_dilatations.
_compute_all_dilatations(
test_rod.position_collection,
test_rod.volume,
test_rod.lengths,
test_rod.tangents,
test_rod.radius,
test_rod.dilatation,
test_rod.rest_lengths,
test_rod.rest_voronoi_lengths,
test_rod.voronoi_dilatation,
)
assert_allclose(test_rod.lengths, lengths, atol=Tolerance.atol())
assert_allclose(
test_rod.rest_voronoi_lengths, rest_voronoi_lengths, atol=Tolerance.atol()
)
assert_allclose(
test_rod.dilatation, dilatation_collection, atol=Tolerance.atol()
)
assert_allclose(
test_rod.voronoi_dilatation, voronoi_dilatation, atol=Tolerance.atol()
)
@pytest.mark.parametrize("n_elem", [2, 3, 5, 10, 20])
@pytest.mark.parametrize("dilatation", [0.1, 0.2, 0.3, 0.5, 1.0, 1.1])
def test_case_compute_dilatation_rate(self, n_elem, dilatation):
"""
This test case tests compute_dilatation_rate
function by comparing with analytical calculation.
This function depends on the compute_all_dilatations.
:param n_elem:
:param dilatation:
:return:
"""
initial, test_rod = constructor(n_elem)
dilatation_rate, velocity = compute_dilatation_rate_analytically(
n_elem, dilatation
)
# Set velocity vector in test_rod to the computed velocity vector above,
# since we need to initialize velocity for dilatation_rate
test_rod.velocity_collection = velocity
_compute_all_dilatations(
test_rod.position_collection,
test_rod.volume,
test_rod.lengths,
test_rod.tangents,
test_rod.radius,
test_rod.dilatation,
test_rod.rest_lengths,
test_rod.rest_voronoi_lengths,
test_rod.voronoi_dilatation,
)
_compute_dilatation_rate(
test_rod.position_collection,
test_rod.velocity_collection,
test_rod.lengths,
test_rod.rest_lengths,
test_rod.dilatation_rate,
)
assert_allclose(
test_rod.dilatation_rate, dilatation_rate, atol=Tolerance.atol()
)
@pytest.mark.parametrize("n_elem", [2, 3, 5, 10, 20])
@pytest.mark.parametrize("dilatation", [0.1, 0.2, 0.3, 0.5, 1.0, 1.1])
def test_case_compute_shear_stretch_strains(self, n_elem, dilatation):
"""
This test case initializes a straight rod. We modify node positions
and compress the rod numerically. By doing that we impose shear stress
in the rod and check, computation strains.
This test function tests
_compute_shear_stretch_strains
"""
initial, test_rod = constructor(n_elem)
test_rod.position_collection *= dilatation
strain = compute_strain_analytically(n_elem, dilatation)
_compute_shear_stretch_strains(
test_rod.position_collection,
test_rod.volume,
test_rod.lengths,
test_rod.tangents,
test_rod.radius,
test_rod.rest_lengths,
test_rod.rest_voronoi_lengths,
test_rod.dilatation,
test_rod.voronoi_dilatation,
test_rod.director_collection,
test_rod.sigma,
)
assert_allclose(test_rod.sigma, strain, atol=Tolerance.atol())
@pytest.mark.parametrize("n_elem", [2, 3, 5, 10, 20])
@pytest.mark.parametrize("dilatation", [0.1, 0.2, 0.3, 0.5, 1.0, 1.1])
def test_case_compute_internal_shear_stretch_stresses_from_model(
self, n_elem, dilatation
):
"""
This test case initializes a straight rod. We modify node positions
and compress the rod numerically. By doing that we impose shear stress
in the rod and check, computation stresses.
This test function tests
_compute_internal_shear_stretch_stresses_from_model
"""
initial, test_rod = constructor(n_elem)
test_rod.position_collection *= dilatation
internal_stress = compute_stress_analytically(n_elem, dilatation)
_compute_internal_shear_stretch_stresses_from_model(
test_rod.position_collection,
test_rod.volume,
test_rod.lengths,
test_rod.tangents,
test_rod.radius,
test_rod.rest_lengths,
test_rod.rest_voronoi_lengths,
test_rod.dilatation,
test_rod.voronoi_dilatation,
test_rod.director_collection,
test_rod.sigma,
test_rod.rest_sigma,
test_rod.shear_matrix,
test_rod.internal_stress,
)
assert_allclose(
test_rod.internal_stress, internal_stress, atol=Tolerance.atol()
)
@pytest.mark.parametrize("n_elem", [2, 3, 5, 10, 20])
@pytest.mark.parametrize("dilatation", [0.1, 0.2, 0.3, 0.5, 1.0, 1.1])
def test_case_compute_internal_forces(self, n_elem, dilatation):
"""
This test case initializes a straight rod. We modify node positions
and compress the rod numerically. By doing that we impose shear stress
in the rod and check, computation stresses.
This test function tests
_compute_internal_shear_stretch_stresses_from_model
"""
initial, test_rod = constructor(n_elem)
test_rod.position_collection *= dilatation
internal_forces = compute_forces_analytically(n_elem, dilatation)
_compute_internal_forces(
test_rod.position_collection,
test_rod.volume,
test_rod.lengths,
test_rod.tangents,
test_rod.radius,
test_rod.rest_lengths,
test_rod.rest_voronoi_lengths,
test_rod.dilatation,
test_rod.voronoi_dilatation,
test_rod.director_collection,
test_rod.sigma,
test_rod.rest_sigma,
test_rod.shear_matrix,
test_rod.internal_stress,
test_rod.velocity_collection,
test_rod.dissipation_constant_for_forces,
test_rod.damping_forces,
test_rod.internal_forces,
)
assert_allclose(
test_rod.internal_forces, internal_forces, atol=Tolerance.atol()
)
@pytest.mark.parametrize("n_elem", [2, 3, 5, 10, 20])
@pytest.mark.parametrize("nu", [0.1, 0.2, 0.5, 2])
def test_compute_damping_forces_torques(self, n_elem, nu):
"""
In this test case, we initialize a straight rod and modify
velocities of nodes and angular velocities of elements.
By doing that we can test damping forces on nodes and
damping torques on elements.
This test function tests
_compute_damping_forces
_compute_damping_torques
"""
# This is an artificial test, this part exists just to
# keep our coverage percentage.
initial, test_rod = constructor(n_elem, nu)
# Construct velocity and omega
test_rod.velocity_collection[:] = 1.0
test_rod.omega_collection[:] = 1.0
# Compute damping forces and torques
damping_forces = (
np.repeat(np.array([1.0, 1.0, 1.0])[:, np.newaxis], n_elem + 1, axis=1)
* nu
* (1.0 / n_elem)
)
damping_forces[..., 0] *= 0.5
damping_forces[..., -1] *= 0.5
damping_torques = (
np.repeat(np.array([1.0, 1.0, 1.0])[:, np.newaxis], n_elem, axis=1)
* nu
* (1.0 / n_elem)
)
# Compute geometry from state
_compute_geometry_from_state(
test_rod.position_collection,
test_rod.volume,
test_rod.lengths,
test_rod.tangents,
test_rod.radius,
)
# Compute damping forces and torques using in class functions
_compute_damping_forces(
test_rod.damping_forces,
test_rod.velocity_collection,
test_rod.dissipation_constant_for_forces,
test_rod.lengths,
)
_compute_damping_torques(
test_rod.damping_torques,
test_rod.omega_collection,
test_rod.dissipation_constant_for_torques,
test_rod.lengths,
)
test_damping_forces = test_rod.damping_forces
test_damping_torques = test_rod.damping_torques
# Compare damping forces and torques computed using in class functions and above
assert_allclose(test_damping_forces, damping_forces, atol=Tolerance.atol())
assert_allclose(test_damping_torques, damping_torques, atol=Tolerance.atol())
# alpha is base angle of isosceles triangle
@pytest.mark.parametrize("alpha", np.radians([22.5, 30, 45, 60, 70]))
def test_case_bend_straight_rod(self, alpha):
"""
In this test case we initialize a straight rod with 2 elements
and numerically bend the rod. We modify node positions and directors
to make a isosceles triangle. Then first we compute curvature
between two elements and compute the angle between them.
Finally, we compute bend twist couples and compare with
correct solution.
This test function tests
_compute_bending_twist_strains
_compute_internal_torques
only bend_twist_couple terms.
"""
n_elem = 2
initial, test_rod = constructor(n_elem, nu=0.0)
base_length = initial.base_length
# Change the coordinates of nodes, artificially bend the rod.
# /\
# ------ ==> / \
# / \
# Here I chose a isosceles triangle.
length = base_length / n_elem
position = np.zeros((MaxDimension.value(), n_elem + 1))
position[..., 0] = np.array([0.0, 0.0, 0.0])
position[..., 1] = length * np.array([0.0, np.sin(alpha), np.cos(alpha)])
position[..., 2] = length * np.array([0.0, 0.0, 2 * np.cos(alpha)])
test_rod.position_collection = position
# Set the directors manually. This is easy since we have two elements.
directors = np.zeros((MaxDimension.value(), MaxDimension.value(), n_elem))
directors[..., 0] = np.array(
(
[1.0, 0.0, 0.0],
[0.0, np.cos(alpha), -np.sin(alpha)],
[0.0, np.sin(alpha), np.cos(alpha)],
)
)
directors[..., -1] = np.array(
(
[1.0, 0.0, 0.0],
[0.0, np.cos(alpha), np.sin(alpha)],
[0, -np.sin(alpha), np.cos(alpha)],
)
)
test_rod.director_collection = directors
# Compute voronoi rest length. Since elements lengths are equal
# in this test case, rest voronoi length can be easily computed
# dividing base length to number of elements.
rest_voronoi_length = base_length / n_elem
# Now compute geometry and dilatation, which we need for curvature calculations.
_compute_all_dilatations(
test_rod.position_collection,
test_rod.volume,
test_rod.lengths,
test_rod.tangents,
test_rod.radius,
test_rod.dilatation,
test_rod.rest_lengths,
test_rod.rest_voronoi_lengths,
test_rod.voronoi_dilatation,
)
_compute_dilatation_rate(
test_rod.position_collection,
test_rod.velocity_collection,
test_rod.lengths,
test_rod.rest_lengths,
test_rod.dilatation_rate,
)
_compute_bending_twist_strains(
test_rod.director_collection, test_rod.rest_voronoi_lengths, test_rod.kappa
)
# Generalized rotation per unit length is given by rest_D_i * Kappa_i.
# Thus in order to get the angle between two elements, we need to multiply
# kappa with rest_D_i . But this will give the exterior vertex angle of the
# triangle. Think as, we rotate element 1 clockwise direction and align with
# the element 2.
#
# \
# /\ \ 1
# 1 / \ 2 ==> \
# / \ \
# \ 2
# \
#
# So for this transformation we use exterior vertex angle of isosceles triangle.
# Exterior vertex angle can be computed easily, it is the sum of base angles
# , since this is isosceles triangle it is 2*base_angle
correct_angle = np.degrees(np.array([2 * alpha, 0.0, 0.0]).reshape(3, 1))
test_angle = np.degrees(test_rod.kappa * test_rod.rest_voronoi_lengths)
assert_allclose(test_angle, correct_angle, atol=Tolerance.atol())
# Now lets test bending stress terms in internal torques equation.
# Here we will test bend twist couple 2D and bend twist couple 3D terms of the
# internal torques equation. Set the bending matrix to identity matrix for simplification.
test_rod.bend_matrix[:] = np.repeat(
np.identity(3)[:, :, np.newaxis], n_elem - 1, axis=2
)
# We need to compute shear stress, for internal torque equation.
# Shear stress is not used in this test case. In order to make sure shear
# stress do not contribute to the total torque we use assert check.
_compute_internal_bending_twist_stresses_from_model(
test_rod.director_collection,
test_rod.rest_voronoi_lengths,
test_rod.internal_couple,
test_rod.bend_matrix,
test_rod.kappa,
test_rod.rest_kappa,
)
assert_allclose(
test_rod.internal_stress,
np.zeros(3 * n_elem).reshape(3, n_elem),
atol=Tolerance.atol(),
)
# Make sure voronoi dilatation is 1
assert_allclose(
test_rod.voronoi_dilatation, np.array([1.0]), atol=Tolerance.atol()
)
# Compute correct torques, first compute correct kappa.
correct_kappa = np.radians(correct_angle / rest_voronoi_length)
# We only need to compute bend twist couple 2D term for comparison,
# because bend twist couple 3D term is already zero, due to cross product.
# TODO: Extended this test for multiple elements more than 2.
correct_torques = np.zeros((MaxDimension.value(), n_elem))
correct_torques[..., 0] = correct_kappa[..., 0]
correct_torques[..., -1] = -1.0 * correct_kappa[..., -1]
_compute_internal_torques(
test_rod.position_collection,
test_rod.velocity_collection,
test_rod.tangents,
test_rod.lengths,
test_rod.rest_lengths,
test_rod.director_collection,
test_rod.rest_voronoi_lengths,
test_rod.bend_matrix,
test_rod.rest_kappa,
test_rod.kappa,
test_rod.voronoi_dilatation,
test_rod.mass_second_moment_of_inertia,
test_rod.omega_collection,
test_rod.internal_stress,
test_rod.internal_couple,
test_rod.dilatation,
test_rod.dilatation_rate,
test_rod.dissipation_constant_for_torques,
test_rod.damping_torques,
test_rod.internal_torques,
)
assert_allclose(
test_rod.internal_torques, correct_torques, atol=Tolerance.atol()
)
def test_case_shear_torque(self):
"""
In this test case we initialize a straight rod with two elements
and set bending matrix to zero. This gives us opportunity decouple
shear torque from twist and bending torques in internal torques
equation. Then we modify node positions of second element and
introduce artificial bending. Finally, we compute shear torque
using internal torque function and compare with analytical value.
This test case is for testing shear torque term,
in internal torques equation.
Tested function
_compute_internal_torques
"""
n_elem = 2
initial, test_rod = constructor(n_elem, nu=0.0)
position = np.zeros((MaxDimension.value(), n_elem + 1))
position[..., 0] = np.array([0.0, 0.0, 0.0])
position[..., 1] = np.array([0.0, 0.0, 0.5])
position[..., 2] = np.array([0.0, -0.3, 0.9])
test_rod.position_collection = position
# Simplify the computations, and chose shear matrix as identity matrix.
test_rod.shear_matrix[:] = np.repeat(
np.identity(3)[:, :, np.newaxis], n_elem - 1, axis=2
)
# Internal shear stress function is tested previously
_compute_internal_shear_stretch_stresses_from_model(
test_rod.position_collection,
test_rod.volume,
test_rod.lengths,
test_rod.tangents,
test_rod.radius,
test_rod.rest_lengths,
test_rod.rest_voronoi_lengths,
test_rod.dilatation,
test_rod.voronoi_dilatation,
test_rod.director_collection,
test_rod.sigma,
test_rod.rest_sigma,
test_rod.shear_matrix,
test_rod.internal_stress,
)
correct_shear_torques = np.zeros((MaxDimension.value(), n_elem))
# Correct shear torques can be computed easily.
# Procedure:
# 1) Q = [1., 0., 0.; 0., 1., 0.; 0., 0., 1.]
# 2) t = [0., -0.6, 0.8]
# 3) sigma = (eQt-d3) = [0.0, -0.6, -0.2]
# 4) Qt = [0., -0.6, 0.8]
# 5) torque = Qt x sigma
# Note that this is not generic, but it does not to be, it is testing the functions.
correct_shear_torques[..., -1] = np.array([0.3, 0.0, 0.0])
# Set bending matrix to zero matrix, because we dont want
# any contribution from bending on total internal torques
test_rod.bend_matrix[:] = 0.0
_compute_internal_torques(
test_rod.position_collection,
test_rod.velocity_collection,
test_rod.tangents,
test_rod.lengths,
test_rod.rest_lengths,
test_rod.director_collection,
test_rod.rest_voronoi_lengths,
test_rod.bend_matrix,
test_rod.rest_kappa,
test_rod.kappa,
test_rod.voronoi_dilatation,
test_rod.mass_second_moment_of_inertia,
test_rod.omega_collection,
test_rod.internal_stress,
test_rod.internal_couple,
test_rod.dilatation,
test_rod.dilatation_rate,
test_rod.dissipation_constant_for_torques,
test_rod.damping_torques,
test_rod.internal_torques,
)
assert_allclose(
test_rod.internal_torques, correct_shear_torques, atol=Tolerance.atol()
)
def test_case_lagrange_transport_unsteady_dilatation(self):
"""
In this test case, we initialize a straight rod. Then we modify
angular velocity of elements and set mass moment of inertia
to identity matrix. By doing this we need to get zero torque
due lagrangian transport term, because of Jwxw, J=I, wxw=0.
Next we test unsteady dilatation contribution to internal
torques, by setting dilatation rate to 1 and recover initialized
angular velocity back, de/dt * Jw = w , de/dt=1 J=I.
This test function tests
_compute_internal_torques
only lagrange transport and
unsteady dilatation terms, tested numerically.
Note that, viscous dissipation set to 0,
since we don't want any contribution from
damping torque.
"""
n_elem = 2
initial, test_rod = constructor(n_elem, nu=0.0)
# TODO: find one more test in which you dont set J=I, may be some analytical test
# Set the mass moment of inertia matrix to identity matrix for simplification.
# When lagrangian transport tested, total torque computed by the function has
# to be zero, because (J.w/e)xw if J=I then wxw/e = 0.
test_rod.mass_second_moment_of_inertia[:] = np.repeat(
np.identity(3)[:, :, np.newaxis], n_elem, axis=2
)
_compute_shear_stretch_strains(
test_rod.position_collection,
test_rod.volume,
test_rod.lengths,
test_rod.tangents,
test_rod.radius,
test_rod.rest_lengths,
test_rod.rest_voronoi_lengths,
test_rod.dilatation,
test_rod.voronoi_dilatation,
test_rod.director_collection,
test_rod.sigma,
)
_compute_bending_twist_strains(
test_rod.director_collection, test_rod.rest_voronoi_lengths, test_rod.kappa
)
_compute_internal_forces(
test_rod.position_collection,
test_rod.volume,
test_rod.lengths,
test_rod.tangents,
test_rod.radius,
test_rod.rest_lengths,
test_rod.rest_voronoi_lengths,
test_rod.dilatation,
test_rod.voronoi_dilatation,
test_rod.director_collection,
test_rod.sigma,
test_rod.rest_sigma,
test_rod.shear_matrix,
test_rod.internal_stress,
test_rod.velocity_collection,
test_rod.dissipation_constant_for_forces,
test_rod.damping_forces,
test_rod.internal_forces,
)
# Lets set angular velocity omega to arbitray numbers
# Make sure shape of the random vector correct
omega = np.zeros(3 * n_elem).reshape(3, n_elem)
for i in range(0, n_elem):
omega[..., i] = np.random.rand(3)
test_rod.omega_collection = omega
_compute_internal_torques(
test_rod.position_collection,
test_rod.velocity_collection,
test_rod.tangents,
test_rod.lengths,
test_rod.rest_lengths,
test_rod.director_collection,
test_rod.rest_voronoi_lengths,
test_rod.bend_matrix,
test_rod.rest_kappa,
test_rod.kappa,
test_rod.voronoi_dilatation,
test_rod.mass_second_moment_of_inertia,
test_rod.omega_collection,
test_rod.internal_stress,
test_rod.internal_couple,
test_rod.dilatation,
test_rod.dilatation_rate,
test_rod.dissipation_constant_for_torques,
test_rod.damping_torques,
test_rod.internal_torques,
)
# computed internal torques has to be zero. Internal torques created by Lagrangian
# transport term is zero because mass moment of inertia is identity matrix and wxw=0.
# Torques due to unsteady dilatation has to be zero because dilatation rate is zero.
assert_allclose(
test_rod.internal_torques,
np.zeros(3 * n_elem).reshape(3, n_elem),
atol=Tolerance.atol(),
)
# Now lets test torques due to unsteady dilatation. For that, lets set dilatation
# rate to 1, it is zero before. It has to be zero before, because rod is not elongating or shortening
assert_allclose(
test_rod.dilatation_rate, np.zeros(n_elem), atol=Tolerance.atol()
) # check if dilatation rate is 0
# Now set velocity such that to set dilatation rate to 1.
test_rod.velocity_collection[..., 0] = np.ones(3) * -0.5
test_rod.velocity_collection[..., -1] = np.ones(3) * 0.5
_compute_dilatation_rate(
test_rod.position_collection,
test_rod.velocity_collection,
test_rod.lengths,
test_rod.rest_lengths,
test_rod.dilatation_rate,
)
assert_allclose(test_rod.dilatation_rate, np.array([1.0, 1.0]))
_compute_internal_torques(
test_rod.position_collection,
test_rod.velocity_collection,
test_rod.tangents,
test_rod.lengths,
test_rod.rest_lengths,
test_rod.director_collection,
test_rod.rest_voronoi_lengths,
test_rod.bend_matrix,
test_rod.rest_kappa,
test_rod.kappa,
test_rod.voronoi_dilatation,
test_rod.mass_second_moment_of_inertia,
test_rod.omega_collection,
test_rod.internal_stress,
test_rod.internal_couple,
test_rod.dilatation,
test_rod.dilatation_rate,
test_rod.dissipation_constant_for_torques,
test_rod.damping_torques,
test_rod.internal_torques,
)
# Total internal torque has to be equal to angular velocity omega.
# All the other terms contributing total internal torque is zero,
# other than unsteady dilatation.
correct_torques = omega
assert_allclose(
test_rod.internal_torques, correct_torques, atol=Tolerance.atol()
)
@pytest.mark.parametrize("n_elem", [2, 3, 5, 10, 20])
def test_compute_internal_forces_and_torques(self, n_elem):
"""
This function is only used to test the wrapper method in Cosserat Rod to call internal forces and torques.
Parameters
----------
n_elem
Returns
-------
"""
initial, test_rod = constructor(n_elem, nu=0.0)
test_rod._compute_internal_forces_and_torques(time=0)
@pytest.mark.parametrize("n_elem", [2, 3, 5, 10, 20])
def test_update_acceleration(self, n_elem):
"""
In this test case, we initialize a straight rod.
We set correct parameters for rod mass, dilatation, mass moment of inertia
and call the function update_accelerations and compare the angular and
translational acceleration with the correct values.
This test case tests,
update_accelerations
_update_accelerations
"""
initial, test_rod = constructor(n_elem, nu=0.0)
mass = test_rod.mass
external_forces = np.zeros(3 * (n_elem + 1)).reshape(3, n_elem + 1)
external_torques = np.zeros(3 * n_elem).reshape(3, n_elem)
for i in range(0, n_elem):
external_torques[..., i] = np.random.rand(3)
for i in range(0, n_elem + 1):
external_forces[..., i] = np.random.rand(3)
test_rod.external_forces[:] = external_forces
test_rod.external_torques[:] = external_torques
# No dilatation in the rods
dilatations = np.ones(n_elem)
# Set mass moment of inertia matrix to identity matrix for convenience.
# Inverse of identity = identity
inv_mass_moment_of_inertia = np.repeat(
np.identity(3)[:, :, np.newaxis], n_elem, axis=2
)
test_rod.inv_mass_second_moment_of_inertia[:] = inv_mass_moment_of_inertia
# Compute acceleration
test_rod.update_accelerations(time=0)
correct_acceleration = external_forces / mass
assert_allclose(
test_rod.acceleration_collection,
correct_acceleration,
atol=Tolerance.atol(),
)
correct_angular_acceleration = (
_batch_matvec(inv_mass_moment_of_inertia, external_torques) * dilatations
)
assert_allclose(
test_rod.alpha_collection,
correct_angular_acceleration,
atol=Tolerance.atol(),
)
@pytest.mark.parametrize("n_elem", [2, 3, 5, 10, 20])
def test_case_compute_translational_energy(self, n_elem, nu=0):
"""
This function tests compute translational energy function. We
take an initial input energy for the rod and compute the velocity and
set the velocity of rod elements. We call compute_translational_energy
function and compare the result with output energy.
Note here we are only setting the y velocity of the rod, x and z velocity
are zero.
Parameters
----------
n_elem
nu
Returns
-------
"""
initial, test_rod = constructor(n_elem, nu)
base_length = 1.0
base_radius = 0.25
density = 1.0
mass = base_length * np.pi * base_radius * base_radius * density
input_energy = 10
velocity = np.sqrt(2 * input_energy / mass)
test_rod.velocity_collection[1, :] = velocity
output_energy = test_rod.compute_translational_energy()
assert_allclose(output_energy, input_energy, atol=Tolerance.atol())
@pytest.mark.parametrize("n_elem", [2, 3, 5, 10, 20])
def test_case_compute_rotational_energy(self, n_elem, nu=0):
"""
This function tests compute rotational energy function. We
take an initial input energy for the rod and compute the angular velocity and
set the angular velocity of rod elements. We call compute_rotational_energy
function and compare the result with output energy. Here we are using mass
moment of inertia corresponding to z velocity.
Note here we are only setting the z velocity of the rod, y and x velocity
are zero.
Parameters
----------
n_elem
nu
Returns
-------
"""
initial, test_rod = constructor(n_elem, nu)
input_energy = 10
omega = np.sqrt(
2
* input_energy
/ (test_rod.mass_second_moment_of_inertia[2, 2, 0] * n_elem)
)
test_rod.omega_collection[..., :] = np.array([0.0, 0.0, omega]).reshape(3, 1)
_compute_all_dilatations(
test_rod.position_collection,
test_rod.volume,
test_rod.lengths,
test_rod.tangents,
test_rod.radius,
test_rod.dilatation,
test_rod.rest_lengths,
test_rod.rest_voronoi_lengths,
test_rod.voronoi_dilatation,
)
output_energy = test_rod.compute_rotational_energy()
assert_allclose(output_energy, input_energy, atol=Tolerance.atol())
@pytest.mark.parametrize("n_elem", [2, 3, 5, 10, 20])
def test_case_compute_velocity_center_of_mass(self, n_elem, nu=0.0):
"""
This function tests compute velocity center of mass function. We initialize a
random velocity vector and copy this vector to velocity_collection array. We call
the compute_velocity_center_of_mass function and compare the output vector with
our correct velocity vector which we initialize at the beginning.
randomly the a vector for velocity
Parameters
----------
n_elem
nu
Returns
-------
"""
correct_velocity = np.random.rand(3) / (n_elem + 1)
initial, test_rod = constructor(n_elem, nu)
test_rod.velocity_collection[..., :] = np.array(correct_velocity).reshape(3, 1)
output_velocity = test_rod.compute_velocity_center_of_mass()
assert_allclose(output_velocity, correct_velocity, atol=Tolerance.atol())
@pytest.mark.parametrize("n_elem", [2, 3, 5, 10, 20])
def test_case_compute_position_center_of_mass(self, n_elem, nu=0.0):
"""
This function tests compute position center of mass function. We initialize a
random position vector and copy this vector to position_collection array. We call
the compute_position_center_of_mass function and compare the output vector with
our correct position vector which we initialize at the beginning.
randomly the a vector for position
Parameters
----------
n_elem
nu
Returns
-------
"""
correct_position = np.random.rand(3) / (n_elem + 1)
initial, test_rod = constructor(n_elem, nu)
test_rod.position_collection[..., :] = np.array(correct_position).reshape(3, 1)
output_position = test_rod.compute_position_center_of_mass()
assert_allclose(output_position, correct_position, atol=Tolerance.atol())
# alpha is base angle of isosceles triangle
@pytest.mark.parametrize("alpha", np.radians([22.5, 30, 45, 60, 70]))
def test_case_compute_bending_energy(self, alpha, nu=0.0):
"""
Similar to the previous test case test_case_bend_straight_rod.
In this test case we initialize a straight rod with 2 elements
and numerically bend the rod. We modify node positions and directors
to make a isosceles triangle. Then first we compute curvature
between two elements and compute the angle between them.
Finally, we compute the bending energy of rod and compare with
correct solution.
This test function tests
compute_bending_energy
Parameters
----------
alpha
nu
Returns
-------
"""
n_elem = 2
initial, test_rod = constructor(n_elem, nu=0.0)
base_length = initial.base_length
# Change the coordinates of nodes, artificially bend the rod.
# /\
# ------ ==> / \
# / \
# Here I chose a isosceles triangle.
length = base_length / n_elem
position = np.zeros((MaxDimension.value(), n_elem + 1))
position[..., 0] = np.array([0.0, 0.0, 0.0])
position[..., 1] = length * np.array([0.0, np.sin(alpha), np.cos(alpha)])
position[..., 2] = length * np.array([0.0, 0.0, 2 * np.cos(alpha)])
test_rod.position_collection = position
# Set the directors manually. This is easy since we have two elements.
directors = np.zeros((MaxDimension.value(), MaxDimension.value(), n_elem))
directors[..., 0] = np.array(
(
[1.0, 0.0, 0.0],
[0.0, np.cos(alpha), -np.sin(alpha)],
[0.0, np.sin(alpha), np.cos(alpha)],
)
)
directors[..., -1] = np.array(
(
[1.0, 0.0, 0.0],
[0.0, np.cos(alpha), np.sin(alpha)],
[0, -np.sin(alpha), np.cos(alpha)],
)
)
test_rod.director_collection = directors
# Compute voronoi rest length. Since elements lengths are equal
# in this test case, rest voronoi length can be easily computed
# dividing base length to number of elements.
rest_voronoi_length = base_length / n_elem
# Now compute geometry and dilatation, which we need for curvature calculations.
_compute_all_dilatations(
test_rod.position_collection,
test_rod.volume,
test_rod.lengths,
test_rod.tangents,
test_rod.radius,
test_rod.dilatation,
test_rod.rest_lengths,
test_rod.rest_voronoi_lengths,
test_rod.voronoi_dilatation,
)
_compute_dilatation_rate(
test_rod.position_collection,
test_rod.velocity_collection,
test_rod.lengths,
test_rod.rest_lengths,
test_rod.dilatation_rate,
)
_compute_bending_twist_strains(
test_rod.director_collection, test_rod.rest_voronoi_lengths, test_rod.kappa
)
# Generalized rotation per unit length is given by rest_D_i * Kappa_i.
# Thus in order to get the angle between two elements, we need to multiply
# kappa with rest_D_i . But this will give the exterior vertex angle of the
# triangle. Think as, we rotate element 1 clockwise direction and align with
# the element 2.
#
# \
# /\ \ 1
# 1 / \ 2 ==> \
# / \ \
# \ 2
# \
#
# So for this transformation we use exterior vertex angle of isosceles triangle.
# Exterior vertex angle can be computed easily, it is the sum of base angles
# , since this is isosceles triangle it is 2*base_angle
correct_angle = np.degrees(np.array([2 * alpha, 0.0, 0.0]).reshape(3, 1))
test_angle = np.degrees(test_rod.kappa * test_rod.rest_voronoi_lengths)
assert_allclose(test_angle, correct_angle, atol=Tolerance.atol())
# Now lets test bending stress terms in internal torques equation.
# Here we will test bend twist couple 2D and bend twist couple 3D terms of the
# internal torques equation. Set the bending matrix to identity matrix for simplification.
test_rod.bend_matrix[:] = np.repeat(
np.identity(3)[:, :, np.newaxis], n_elem - 1, axis=2
)
# Compute bending energy
correct_kappa = 2 * alpha / rest_voronoi_length
correct_bending_energy = (
0.5 * correct_kappa * correct_kappa * rest_voronoi_length
)
test_bending_energy = test_rod.compute_bending_energy()
assert_allclose(
test_bending_energy, correct_bending_energy, atol=Tolerance.atol()
)
@pytest.mark.parametrize("n_elem", [2, 3, 5, 10, 20])
@pytest.mark.parametrize("dilatation", [0.1, 0.2, 0.3, 0.5, 1.0, 1.1])
def test_compute_shear_energy(self, n_elem, dilatation):
"""
This test case is initializes a straight rod. We modify node positions
and compress the rod numerically. By doing that we impose shear stress
in the rod and check, compute shear energy function.
Parameters
----------
n_elem
dilatation
Returns
-------
"""
initial, test_rod = constructor(n_elem)
base_length = initial.base_length
# Compute rest length. Since elements lengths are equal
# in this test case, rest length can be easily computed
# dividing base length to number of elements.
rest_length = base_length / n_elem
test_rod.position_collection *= dilatation
internal_strain = compute_strain_analytically(n_elem, dilatation)
internal_stress = compute_stress_analytically(n_elem, dilatation)
# Compute shear energy
correct_shear_energy = (
0.5
* (
np.einsum("ij, ij->j", internal_strain, internal_stress) * rest_length
).sum()
)
_compute_internal_forces(
test_rod.position_collection,
test_rod.volume,
test_rod.lengths,
test_rod.tangents,
test_rod.radius,
test_rod.rest_lengths,
test_rod.rest_voronoi_lengths,
test_rod.dilatation,
test_rod.voronoi_dilatation,
test_rod.director_collection,
test_rod.sigma,
test_rod.rest_sigma,
test_rod.shear_matrix,
test_rod.internal_stress,
test_rod.velocity_collection,
test_rod.dissipation_constant_for_forces,
test_rod.damping_forces,
test_rod.internal_forces,
)
test_shear_energy = test_rod.compute_shear_energy()
assert_allclose(test_shear_energy, correct_shear_energy, atol=Tolerance.atol())
def test_get_z_vector_function():
"""
This functions test _get_z_vector function.
Returns
-------
"""
correct_z_vector = np.array([0.0, 0.0, 1.0]).reshape(3, 1)
assert_allclose(correct_z_vector, _get_z_vector(), atol=Tolerance.atol())
if __name__ == "__main__":
from pytest import main
main([__file__])
| 37.498036 | 114 | 0.625306 |
7957b02308098ab5f539b738b262a76228b7b93d | 4,858 | py | Python | fhvqe/retrieve_data.py | PhaseCraft/fhvqe-paper | 535878896cc978f3ab7cc4d5d790d50d2a3c2359 | [
"Apache-2.0"
] | null | null | null | fhvqe/retrieve_data.py | PhaseCraft/fhvqe-paper | 535878896cc978f3ab7cc4d5d790d50d2a3c2359 | [
"Apache-2.0"
] | null | null | null | fhvqe/retrieve_data.py | PhaseCraft/fhvqe-paper | 535878896cc978f3ab7cc4d5d790d50d2a3c2359 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Phasecraft Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cirq
import os
import json
import logging
import numpy as np
import pandas as pd
logger = logging.getLogger("fhvqe.retrieve_data")
# Constants
CIRCUIT_JSON = "circuit.json"
RESULTS_JSON = "results.json"
PROJECT_ID = "fermi-hubbard-vqe"
RESULTS_FOLDER = "results"
project_id = PROJECT_ID
engine = cirq.google.Engine(project_id=project_id)
program_id_filename = "data/program_ids.csv"
full_program_ids = []
with open(program_id_filename, "r") as pids:
full_program_ids = [pid[:-1] for pid in pids]
def retrieve_historical_data(program_id, prefix=""):
"""Retrieve historical data for a given program_id
Args:
program_id -- The program_id as saved in jobs_xxx.json
prefix -- The job prefix, e.g. "tflo-", "noise-", "givens" or ...
Returns:
(jobs, results) A list `jobs` containing all jobs with the
given program_id and a list containing all the `result`s of
these jobs.
"""
full_pids = list(filter(lambda pid: prefix+program_id in pid, full_program_ids))
jobs = []
print(f"Program id: {program_id}. Prefix: {prefix}. Full pids: {list(full_pids)}")
for full_pid in full_pids:
jobs += engine.get_program(program_id=full_pid).list_jobs()
results = [job.results() for job in jobs]
return jobs, results
def retrieve_historical_samples(program_id, prefix=""):
"""Retrieve historical samples for a given program_id
Args:
program_id -- The program_id as saved in jobs_xxx.json
prefix -- The job prefix, e.g. "tflo-", "noise-", "givens" or ...
Returns:
`samples` A list of int16-matrices containing the samples ordered s.t.
they are compatible with the observables in `scripts/analyse_results.py`
"""
jobs, results = retrieve_historical_data(program_id, prefix=prefix)
print(f"Retrieved {len(jobs)} jobs for program id {program_id} prefix {prefix}")
samples = results[0]
samples = [s.measurements["x"].astype(np.int16).T for s in samples]
return samples
def load_samples_txt(filename):
"""Load a samples.txt file to a list of numpy arrays
Args:
filename -- The `samples.txt` (or similar) filename containing the samples
Returns:
`samples` A list of int16-matrices containing the samples ordered s.t.
they are compatible with the observables in `scripts/analyse_results.py`.
"""
with open(filename) as file:
samples_blocks = file.read().split("\n\n\n")
# delete last block if it is empty
if len(samples_blocks[-1]) == 1: samples_blocks = samples_blocks[:-1]
samples_list = [block.splitlines() for block in samples_blocks]
samples = []
for sample in samples_list:
if len(sample) != 0:
arr = np.empty((len(sample[0]), len(sample)), dtype=np.int16)
for i in range(len(sample[0])):
for j in range(len(sample)):
arr[i,j] = sample[j][i]
samples.append(arr)
return samples
# Allow this file to be run directly as a separate script.
if __name__ == "__main__":
# Change here:
folder = "heatmaps/4x1"
filename = "jobs_4x1_2.json"
full_filename = os.path.join(folder, filename)
results_folder = os.path.join(folder, RESULTS_FOLDER)
if not os.path.exists(results_folder):
os.makedirs(results_folder)
with open(full_filename) as json_file:
jobs_data = json.load(json_file)
for j, job_data in enumerate(jobs_data):
print(f"[{j}/{len(jobs_data)-1}]\r", end="")
job_id = job_data["job_id"]
prog_id = job_data["prog_id"]
job = engine.get_program(program_id=prog_id).get_job(job_id=job_id)
if job.status() == "SUCCESS":
job_results = job.results()
prog_folder = os.path.join(results_folder, prog_id)
if not os.path.exists(prog_folder):
os.makedirs(prog_folder)
prog_data = cirq.to_json(job.program().get_circuit())
with open(os.path.join(prog_folder, CIRCUIT_JSON), 'w') as outfile:
json.dump(prog_data, outfile)
with open(os.path.join(prog_folder, RESULTS_JSON), 'w') as outfile:
json.dump(cirq.to_json(job_results), outfile)
| 35.985185 | 86 | 0.666735 |
7957b024f727bae66dba0ad129ba0b19840a0b32 | 4,051 | py | Python | backend/interdisciplinaire/settings.py | jacquant/interdisciplinaire | c2155517117c00d4eee9a0b34a59cda5983d0995 | [
"MIT"
] | null | null | null | backend/interdisciplinaire/settings.py | jacquant/interdisciplinaire | c2155517117c00d4eee9a0b34a59cda5983d0995 | [
"MIT"
] | null | null | null | backend/interdisciplinaire/settings.py | jacquant/interdisciplinaire | c2155517117c00d4eee9a0b34a59cda5983d0995 | [
"MIT"
] | null | null | null | """
Django settings for interdisciplinaire project.
Generated by 'django-admin startproject' using Django 2.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
SETTINGS_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
BASE_DIR = os.path.dirname(SETTINGS_DIR)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get(
"SECRET_KEY", "cmbj^cdp$%#ub*4exq8r2%ns=8r*o)3!-7p9tocfla8%*vk39k"
)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = [
# Django's app
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
# External Django's app
"import_export",
"rest_framework",
"drf_yasg",
# Internal Django's app
"backend.climate_actions.apps.ClimateActionsConfig",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "backend.interdisciplinaire.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": ["dist"],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "backend.interdisciplinaire.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "djongo",
"NAME": os.environ.get("DB_NAME", "interdisciplinaire"),
"HOST": os.environ.get("DB_IP", "127.0.0.1:27017"),
}
}
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://redis:6379/0",
"OPTIONS": {"CLIENT_CLASS": "django_redis.client.DefaultClient", },
}
}
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
SESSION_CACHE_ALIAS = "default"
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator", },
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator", },
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator", },
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "fr"
TIME_ZONE = "Europe/Brussels"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
MIDDLEWARE_CLASSES = ("whitenoise.middleware.WhiteNoiseMiddleware",)
STATIC_URL = "/static/"
STATIC_ROOT = os.path.join(BASE_DIR, "dist", "static")
STATICFILES_DIRS = []
IMPORT_EXPORT_USE_TRANSACTIONS = False
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
| 29.569343 | 91 | 0.709948 |
7957b0618d9620dad9fca86106ae183f0807cc3d | 12,913 | py | Python | custom_components/bhyve/__init__.py | allistermaguire/bhyve-home-assistant | 446b39c703166ab9ceba6c97220478b930d98139 | [
"MIT"
] | null | null | null | custom_components/bhyve/__init__.py | allistermaguire/bhyve-home-assistant | 446b39c703166ab9ceba6c97220478b930d98139 | [
"MIT"
] | null | null | null | custom_components/bhyve/__init__.py | allistermaguire/bhyve-home-assistant | 446b39c703166ab9ceba6c97220478b930d98139 | [
"MIT"
] | null | null | null | """Support for Orbit BHyve irrigation devices."""
import json
import logging
import os
import pprint
import voluptuous as vol
from homeassistant.const import (
ATTR_ATTRIBUTION,
CONF_PASSWORD,
CONF_USERNAME,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import callback
from homeassistant.exceptions import ConfigEntryNotReady, HomeAssistantError
from homeassistant.helpers import aiohttp_client, config_validation as cv
from homeassistant.helpers.dispatcher import (
async_dispatcher_send,
async_dispatcher_connect,
)
from homeassistant.helpers.entity import Entity
from .const import (
CONF_ATTRIBUTION,
CONF_CONF_DIR,
CONF_FAKE_MODE,
CONF_PACKET_DUMP,
DATA_BHYVE,
DOMAIN,
EVENT_PROGRAM_CHANGED,
EVENT_RAIN_DELAY,
EVENT_SET_MANUAL_PRESET_TIME,
MANUFACTURER,
SIGNAL_UPDATE_DEVICE,
SIGNAL_UPDATE_PROGRAM,
)
from .util import anonymize, pesudo_id_if_smart_program
from .pybhyve import Client
from .pybhyve.errors import BHyveError, WebsocketError
_LOGGER = logging.getLogger(__name__)
DATA_CONFIG = "config"
DEFAULT_PACKET_DUMP = False
DEFAULT_FAKE_MODE = False
DEFAULT_CONF_DIR = ""
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PACKET_DUMP, default=DEFAULT_PACKET_DUMP): cv.boolean,
vol.Optional(CONF_CONF_DIR, default=DEFAULT_CONF_DIR): cv.string,
# vol.Optional(CONF_FAKE_MODE, default=DEFAULT_FAKE_MODE): cv.boolean,
}
)
},
extra=vol.ALLOW_EXTRA,
)
class MockClient(Client):
def __init__(
self,
username: str,
password: str,
mock_devices,
mock_programs,
loop,
session,
async_callback,
) -> None:
super().__init__(
username,
password,
loop=loop,
session=session,
async_callback=async_callback,
)
self._devices = mock_devices
self._timer_programs = mock_programs
async def _refresh_devices(self, force_update=False):
pass
async def _refresh_timer_programs(self, force_update=False):
pass
@property
async def devices(self):
"""Get all devices."""
return self._devices
@property
async def timer_programs(self):
"""Get timer programs."""
return self._timer_programs
async def async_setup(hass, config):
"""Set up the BHyve component."""
conf = config[DOMAIN]
packet_dump = conf.get(CONF_PACKET_DUMP)
fake_mode = conf.get(CONF_FAKE_MODE)
conf_dir = conf.get(CONF_CONF_DIR)
if conf_dir == "":
conf_dir = hass.config.config_dir + "/.bhyve"
# Create storage/scratch directory.
try:
if not os.path.exists(conf_dir):
os.mkdir(conf_dir)
except Exception as err:
_LOGGER.info("Could not create storage dir: %s", err)
pass
async def async_update_callback(data):
if data is not None and packet_dump:
dump_file = conf_dir + "/" + "packets.dump"
with open(dump_file, "a") as dump:
dump.write(pprint.pformat(data, indent=2) + "\n")
event = data.get("event")
device_id = None
program_id = None
if event == EVENT_PROGRAM_CHANGED:
device_id = data.get("program", {}).get("device_id")
program_id = pesudo_id_if_smart_program(
device_id,
data.get("program", {}).get("id"),
bool(data.get("program", {}).get("is_smart_program", False))
)
else:
device_id = data.get("device_id")
if device_id is not None:
async_dispatcher_send(
hass, SIGNAL_UPDATE_DEVICE.format(device_id), device_id, data
)
if program_id is not None:
async_dispatcher_send(
hass, SIGNAL_UPDATE_PROGRAM.format(program_id), program_id, data
)
session = aiohttp_client.async_get_clientsession(hass)
try:
if fake_mode:
fake_devices_file = conf_dir + "/" + "devices.json"
fake_programs_file = conf_dir + "/" + "programs.json"
_LOGGER.info("Loading devices {}".format(fake_devices_file))
with open(fake_devices_file) as fake_devices:
mock_devices = json.load(fake_devices)
_LOGGER.info("Loading programs {}".format(fake_programs_file))
with open(fake_programs_file) as fake_programs:
mock_programs = json.load(fake_programs)
bhyve = MockClient(
conf[CONF_USERNAME],
conf[CONF_PASSWORD],
mock_devices,
mock_programs,
loop=hass.loop,
session=session,
async_callback=async_update_callback,
)
else:
bhyve = Client(
conf[CONF_USERNAME],
conf[CONF_PASSWORD],
loop=hass.loop,
session=session,
async_callback=async_update_callback,
)
await bhyve.login()
devices = [anonymize(device) for device in await bhyve.devices]
programs = await bhyve.timer_programs
_LOGGER.debug("Devices: {}".format(json.dumps(devices)))
_LOGGER.debug("Programs: {}".format(json.dumps(programs)))
hass.data[DATA_BHYVE] = bhyve
except WebsocketError as err:
_LOGGER.error("Config entry failed: %s", err)
raise ConfigEntryNotReady
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, bhyve.stop())
return True
def get_entity_from_domain(hass, domains, entity_id):
domains = domains if isinstance(domains, list) else [domains]
for domain in domains:
component = hass.data.get(domain)
if component is None:
raise HomeAssistantError("{} component not set up".format(domain))
entity = component.get_entity(entity_id)
if entity is not None:
return entity
raise HomeAssistantError("{} not found in {}".format(entity_id, ",".join(domains)))
class BHyveEntity(Entity):
"""Define a base BHyve entity."""
def __init__(
self,
hass,
bhyve,
name,
icon,
device_class=None,
):
"""Initialize the sensor."""
self._hass = hass
self._bhyve: Client = bhyve
self._device_class = device_class
self._name = name
self._icon = "mdi:{}".format(icon)
self._state = None
self._available = False
self._attrs = {}
@property
def available(self):
"""Return True if entity is available."""
return self._available
@property
def device_class(self):
"""Return the device class."""
return self._device_class
@property
def extra_state_attributes(self):
"""Return the device state attributes."""
return self._attrs
@property
def name(self):
"""Return the name of the sensor."""
return f"{self._name}"
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._icon
@property
def should_poll(self):
"""Disable polling."""
return False
@property
def device_info(self):
"""Return device registry information for this entity."""
return {
"name": self._name,
"manufacturer": MANUFACTURER,
ATTR_ATTRIBUTION: CONF_ATTRIBUTION,
}
class BHyveWebsocketEntity(BHyveEntity):
"""An entity which responds to websocket events."""
def __init__(
self,
hass,
bhyve,
name,
icon,
device_class=None,
):
self._async_unsub_dispatcher_connect = None
self._ws_unprocessed_events = []
super().__init__(hass, bhyve, name, icon, device_class)
def _on_ws_data(self, data):
pass
def _should_handle_event(self, event_name, data):
"""True if the websocket event should be handled"""
return True
async def async_update(self):
"""Retrieve latest state."""
ws_updates = list(self._ws_unprocessed_events)
self._ws_unprocessed_events[:] = []
for ws_event in ws_updates:
self._on_ws_data(ws_event)
class BHyveDeviceEntity(BHyveWebsocketEntity):
"""Define a base BHyve entity with a device."""
def __init__(
self,
hass,
bhyve,
device,
name,
icon,
device_class=None,
):
"""Initialize the sensor."""
self._mac_address = device.get("mac_address")
self._device_id = device.get("id")
self._device_type = device.get("type")
self._device_name = device.get("name")
super().__init__(hass, bhyve, name, icon, device_class)
self._setup(device)
def _setup(self, device):
pass
async def _refetch_device(self, force_update=False):
try:
device = await self._bhyve.get_device(self._device_id, force_update)
if not device:
_LOGGER.info("No device found with id %s", self._device_id)
self._available = False
return
self._setup(device)
except BHyveError as err:
_LOGGER.warning(f"Unable to retreive data for {self.name}: {err}")
async def _fetch_device_history(self, force_update=False):
try:
return await self._bhyve.get_device_history(self._device_id, force_update)
except BHyveError as err:
raise (err)
@property
def device_info(self):
"""Return device registry information for this entity."""
return {
"identifiers": {(DOMAIN, self._mac_address)},
"name": self._device_name,
"manufacturer": MANUFACTURER,
ATTR_ATTRIBUTION: CONF_ATTRIBUTION,
}
@property
def unique_id(self):
"""Return a unique, unchanging string that represents this sensor."""
raise HomeAssistantError(
"{} does not define a unique_id".format(self.__class__.__name__)
)
async def async_added_to_hass(self):
"""Register callbacks."""
@callback
def update(device_id, data):
"""Update the state."""
event = data.get("event")
if event == "device_disconnected":
_LOGGER.warning("Device {} disconnected and is no longer available".format(self.name))
self._available = False
elif event == "device_connected":
_LOGGER.info("Device {} reconnected and is now available".format(self.name))
self._available = True
if self._should_handle_event(event, data):
_LOGGER.info(
"Message received: {} - {} - {}".format(
self.name, self._device_id, str(data)
)
)
self._ws_unprocessed_events.append(data)
self.async_schedule_update_ha_state(True)
self._async_unsub_dispatcher_connect = async_dispatcher_connect(
self.hass, SIGNAL_UPDATE_DEVICE.format(self._device_id), update
)
async def async_will_remove_from_hass(self):
"""Disconnect dispatcher listener when removed."""
if self._async_unsub_dispatcher_connect:
self._async_unsub_dispatcher_connect()
async def set_manual_preset_runtime(self, minutes: int):
# {event: "set_manual_preset_runtime", device_id: "abc", seconds: 900}
payload = {
"event": EVENT_SET_MANUAL_PRESET_TIME,
"device_id": self._device_id,
"seconds": minutes * 60,
}
_LOGGER.info("Setting manual preset runtime: {}".format(payload))
await self._bhyve.send_message(payload)
async def enable_rain_delay(self, hours: int = 24):
"""Enable rain delay"""
await self._set_rain_delay(hours)
async def disable_rain_delay(self):
"""Disable rain delay"""
await self._set_rain_delay(0)
async def _set_rain_delay(self, hours: int):
try:
# {event: "rain_delay", device_id: "abc", delay: 48}
payload = {
"event": EVENT_RAIN_DELAY,
"device_id": self._device_id,
"delay": hours,
}
_LOGGER.info("Setting rain delay: {}".format(payload))
await self._bhyve.send_message(payload)
except BHyveError as err:
_LOGGER.warning("Failed to send to BHyve websocket message %s", err)
raise (err)
| 29.616972 | 102 | 0.603113 |
7957b13567ab9f948095290e58e107f21d66f4ab | 12,218 | py | Python | python/tvm/topi/nn/conv3d.py | aiblackmaner/tvm | 729155fb06e892af0654e93897e140d2d7ad16d6 | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null | python/tvm/topi/nn/conv3d.py | aiblackmaner/tvm | 729155fb06e892af0654e93897e140d2d7ad16d6 | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null | python/tvm/topi/nn/conv3d.py | aiblackmaner/tvm | 729155fb06e892af0654e93897e140d2d7ad16d6 | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, too-many-locals
# pylint: disable=unused-argument, redefined-builtin, no-else-return
"""Conv3D operators"""
import tvm
from tvm import te
from .pad import pad
from .util import get_pad_tuple3d
from ..util import simplify, get_const_tuple
from .winograd_util import winograd_transform_matrices
def conv3d_ncdhw(Input, Filter, stride, padding, dilation, out_dtype=None):
"""Conv3D operator in NCDHW layout.
Parameters
----------
Input : tvm.te.Tensor
5-D with shape [batch, in_channel, in_depth, in_height, in_width]
Filter : tvm.te.Tensor
5-D with shape [num_filter, in_channel, filter_depth, filter_height, filter_width]
stride : int or a list/tuple of three ints
Stride size, or [strid_depth, stride_height, stride_width]
padding : int or str
Padding size, or ['VALID', 'SAME']
dilation: int or a list/tuple of three ints
dilation size, or [dilation_depth, dilation_height, dilation_width]
Returns
-------
Output : tvm.te.Tensor
5-D with shape [batch, out_channel, out_depth, out_height, out_width]
"""
if out_dtype is None:
out_dtype = Input.dtype
assert isinstance(stride, int) or len(stride) == 3
assert isinstance(dilation, int) or len(dilation) == 3
if isinstance(stride, int):
stride_d = stride_h = stride_w = stride
else:
stride_d, stride_h, stride_w = stride
if isinstance(dilation, int):
dilation_d = dilation_h = dilation_w = dilation
else:
dilation_d, dilation_h, dilation_w = dilation
batch, in_channel, in_depth, in_height, in_width = Input.shape
num_filter, channel, kernel_d, kernel_h, kernel_w = Filter.shape
# compute the output shape
dilated_kernel_d = (kernel_d - 1) * dilation_d + 1
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
pad_front, pad_top, pad_left, pad_back, pad_down, pad_right = get_pad_tuple3d(
padding, (dilated_kernel_d, dilated_kernel_h, dilated_kernel_w)
)
out_channel = num_filter
out_depth = simplify((in_depth - dilated_kernel_d + pad_front + pad_back) // stride_d + 1)
out_height = simplify((in_height - dilated_kernel_h + pad_top + pad_down) // stride_h + 1)
out_width = simplify((in_width - dilated_kernel_w + pad_left + pad_right) // stride_w + 1)
# compute graph
pad_before = [0, 0, pad_front, pad_top, pad_left]
pad_after = [0, 0, pad_back, pad_down, pad_right]
temp = pad(Input, pad_before, pad_after, name="pad_temp")
rc = te.reduce_axis((0, in_channel), name="rc")
rz = te.reduce_axis((0, kernel_d), name="rz")
ry = te.reduce_axis((0, kernel_h), name="ry")
rx = te.reduce_axis((0, kernel_w), name="rx")
return te.compute(
(batch, out_channel, out_depth, out_height, out_width),
lambda nn, ff, zz, yy, xx: te.sum(
temp[
nn,
rc,
zz * stride_d + rz * dilation_d,
yy * stride_h + ry * dilation_h,
xx * stride_w + rx * dilation_w,
].astype(out_dtype)
* Filter[ff, rc, rz, ry, rx].astype(out_dtype),
axis=[rc, rz, ry, rx],
),
tag="conv3d_ncdhw",
)
def conv3d_ndhwc(Input, Filter, stride, padding, dilation, out_dtype="float32"):
"""Convolution operator in NDHWC layout.
Parameters
----------
Input : tvm.te.Tensor
5-D with shape [batch, in_depth, in_height, in_width, in_channel]
Filter : tvm.te.Tensor
5-D with shape [filter_depth, filter_height, filter_width, in_channel, num_filter]
stride : int or a list/tuple of three ints
Stride size, or [stride_depth, stride_height, stride_width]
padding : int or str
Padding size, or ['VALID', 'SAME']
dilation: int or a list/tuple of three ints
dilation size, or [dilation_depth, dilation_height, dilation_width]
Returns
-------
Output : tvm.te.Tensor
5-D with shape [batch, out_depth, out_height, out_width, out_channel]
"""
assert isinstance(stride, int) or len(stride) == 3
assert isinstance(dilation, int) or len(dilation) == 3
if isinstance(stride, int):
stride_d = stride_h = stride_w = stride
else:
stride_d, stride_h, stride_w = stride
if isinstance(dilation, int):
dilation_d = dilation_h = dilation_w = dilation
else:
dilation_d, dilation_h, dilation_w = dilation
batch, in_depth, in_height, in_width, in_channel = Input.shape
kernel_d, kernel_h, kernel_w, channel, num_filter = Filter.shape
# compute the output shape
dilated_kernel_d = (kernel_d - 1) * dilation_d + 1
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
pad_front, pad_top, pad_left, pad_back, pad_down, pad_right = get_pad_tuple3d(
padding, (dilated_kernel_d, dilated_kernel_h, dilated_kernel_w)
)
out_channel = num_filter
out_depth = simplify((in_depth - dilated_kernel_d + pad_front + pad_back) // stride_d + 1)
out_height = simplify((in_height - dilated_kernel_h + pad_top + pad_down) // stride_h + 1)
out_width = simplify((in_width - dilated_kernel_w + pad_left + pad_right) // stride_w + 1)
pad_before = [0, pad_front, pad_top, pad_left, 0]
pad_after = [0, pad_back, pad_down, pad_right, 0]
PaddedInput = pad(Input, pad_before, pad_after, name="PaddedInput")
rd = te.reduce_axis((0, kernel_d), name="rd")
rh = te.reduce_axis((0, kernel_h), name="rh")
rw = te.reduce_axis((0, kernel_w), name="rw")
rc = te.reduce_axis((0, in_channel), name="rc")
Output = te.compute(
(batch, out_depth, out_height, out_width, out_channel),
lambda nn, dd, hh, ww, cc: te.sum(
PaddedInput[
nn,
dd * stride_d + rd * dilation_d,
hh * stride_h + rh * dilation_h,
ww * stride_w + rw * dilation_w,
rc,
].astype(out_dtype)
* Filter[rd, rh, rw, rc, cc].astype(out_dtype),
axis=[rd, rh, rw, rc],
),
name="Conv3dOutput",
tag="conv3d_ndhwc",
)
return Output
# modify
def conv3d_NCDHWc(data, kernel, strides, padding, dilation, layout, out_dtype):
out_dtype = data.dtype if out_dtype is None else out_dtype
assert isinstance(dilation, int) or len(dilation) == 3
if isinstance(dilation, int):
dilation_d, dilation_h, dilation_w = (dilation, dilation, dilation)
else:
dilation_d, dilation_h, dilation_w = dilation
DSTR, HSTR, WSTR = strides
batch_size, ic_chunk, in_depth, in_height, ic_bn, in_width = get_const_tuple(data.shape)
oc_chunk, ic_chunk, kernel_depth, kernel_height, kernel_width, _, oc_bn = get_const_tuple(kernel.shape)
in_channel = ic_chunk * ic_bn
num_filter = oc_chunk * oc_bn
dilated_kernel_d = (kernel_depth - 1) * dilation_d + 1
dilated_kernel_h = (kernel_height - 1) * dilation_h + 1
dilated_kernel_w = (kernel_width - 1) * dilation_w + 1
pad_front, pad_top, pad_left, pad_back, pad_down, pad_right = get_pad_tuple3d(
padding, (dilated_kernel_d, dilated_kernel_h, dilated_kernel_w)
)
pad_d = pad_front + pad_back
pad_h = pad_top + pad_down
pad_w = pad_left + pad_right
pad_depth = in_depth + pad_d
pad_height = in_height + pad_h
pad_width = in_width + pad_w
out_depth = simplify((in_depth + pad_d - dilated_kernel_d) // DSTR + 1)
out_height = simplify((in_height + pad_h - dilated_kernel_h) // HSTR + 1)
out_width = simplify((in_width + pad_w - dilated_kernel_w) // WSTR + 1)
# pack data
DOPAD = pad_d != 0 or pad_h != 0 or pad_w != 0
if DOPAD:
data_pad = pad(
data,
(0, 0, pad_front, pad_top, 0, pad_left),
(0, 0, pad_back, pad_down, 0, pad_right),
name="data_pad",
)
else:
data_pad = data
# fetch schedule
# ic_bn, oc_bn = cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1]
# convolution
oshape = (batch_size, num_filter // oc_bn, out_depth, out_height, out_width, oc_bn)
ic = te.reduce_axis((0, in_channel), name="ic")
kh = te.reduce_axis((0, kernel_height), name="kh")
kw = te.reduce_axis((0, kernel_width), name="kw")
kd = te.reduce_axis((0, kernel_depth), name="kd")
idxmod = tvm.tir.indexmod
idxdiv = tvm.tir.indexdiv
conv = te.compute(
oshape,
lambda n, oc_chunk, od, oh, ow, oc_block: te.sum(
data_pad[
n,
idxdiv(ic, ic_bn),
od * DSTR + kd * dilation_d,
oh * HSTR + kh * dilation_h,
idxmod(ic, ic_bn),
ow * WSTR + kw * dilation_w,
].astype(out_dtype)
* kernel[
oc_chunk, idxdiv(ic, ic_bn), kd, kh, kw, idxmod(ic, ic_bn), oc_block
].astype(out_dtype),
axis=[ic, kd, kh, kw],
),
name="conv3d_NCDHWc",
tag="conv3d_NCDHWc",
)
return conv
# end
def conv3d_winograd_weight_transform(kernel, tile_size):
"""Weight transformation for 3D winograd
Parameters
----------
kernel: Tensor
The raw kernel tensor with layout "NCDHW".
tile_size: int
Tile size of winograd transform. e.g. 2 for F(2x2, 3x3) and 4 for F(4x4, 3x3)
Returns
-------
output : tvm.te.Tensor
5-D with shape [alpha, alpha, alpha, CO, CI]
"""
CO, CI, KD, KH, KW = get_const_tuple(kernel.shape)
depth_transform = 2 < KD < 8 and KD == KH
if depth_transform:
assert KD == KH == KW, "Only support NxNxN kernel"
else:
assert KH == KW, "Only supports DxNxN kernel"
r = tile_size + KH - 1
r_kh = te.reduce_axis((0, KH), name="r_kh")
r_kw = te.reduce_axis((0, KW), name="r_kw")
_, _, G = winograd_transform_matrices(tile_size, KH, kernel.dtype)
if depth_transform:
shape = (r, r, r, CO, CI)
r_kd = te.reduce_axis((0, KD), name="r_kd")
return te.compute(
shape,
lambda omg, eps, nu, co, ci: te.sum(
kernel[co][ci][r_kd][r_kh][r_kw] * G[omg][r_kd] * G[eps][r_kh] * G[nu][r_kw],
axis=[r_kd, r_kh, r_kw],
),
name="transform_weight",
)
else:
shape = (r, r, KD, CO, CI)
return te.compute(
shape,
lambda eps, nu, d, co, ci: te.sum(
kernel[co][ci][d][r_kh][r_kw] * G[eps][r_kh] * G[nu][r_kw], axis=[r_kh, r_kw]
),
name="transform_weight",
)
@tvm.target.generic_func
def conv3d_alter_layout(attrs, inputs, tinfos, out_type):
"""Change Conv3D layout.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : tvm.relay.Expr
Grouped input symbols
tinfos : list
Input shape and dtype
out_type: type
The output type
Note
----
Unlike other TOPI functions, this function operates on both graph level and operator level.
"""
# not to change by default
return None
@tvm.target.generic_func
def conv3d_infer_layout(workload, cfg):
raise ValueError("missing register for topi.nn.conv3d_infer_layout")
| 36.041298 | 107 | 0.630381 |
7957b1f0125e538077f428000808be9f38f4bd96 | 12,238 | py | Python | pytorch/pytorchcv/models/squeezenext.py | yick2232/imgclsmob | fb220bff18b27d1fc6db1bac6cf69b70c2d07490 | [
"MIT"
] | 9 | 2019-08-12T13:54:48.000Z | 2020-10-12T22:59:40.000Z | pytorch/pytorchcv/models/squeezenext.py | fireoil/imgclsmob | fb220bff18b27d1fc6db1bac6cf69b70c2d07490 | [
"MIT"
] | null | null | null | pytorch/pytorchcv/models/squeezenext.py | fireoil/imgclsmob | fb220bff18b27d1fc6db1bac6cf69b70c2d07490 | [
"MIT"
] | 2 | 2019-08-29T07:06:28.000Z | 2020-04-20T05:50:51.000Z | """
SqueezeNext for ImageNet-1K, implemented in PyTorch.
Original paper: 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
"""
__all__ = ['SqueezeNext', 'sqnxt23_w1', 'sqnxt23_w3d2', 'sqnxt23_w2', 'sqnxt23v5_w1', 'sqnxt23v5_w3d2', 'sqnxt23v5_w2']
import os
import torch.nn as nn
import torch.nn.init as init
from .common import ConvBlock, conv1x1_block, conv7x7_block
class SqnxtUnit(nn.Module):
"""
SqueezeNext unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
"""
def __init__(self,
in_channels,
out_channels,
stride):
super(SqnxtUnit, self).__init__()
if stride == 2:
reduction_den = 1
self.resize_identity = True
elif in_channels > out_channels:
reduction_den = 4
self.resize_identity = True
else:
reduction_den = 2
self.resize_identity = False
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=(in_channels // reduction_den),
stride=stride,
bias=True)
self.conv2 = conv1x1_block(
in_channels=(in_channels // reduction_den),
out_channels=(in_channels // (2 * reduction_den)),
bias=True)
self.conv3 = ConvBlock(
in_channels=(in_channels // (2 * reduction_den)),
out_channels=(in_channels // reduction_den),
kernel_size=(1, 3),
stride=1,
padding=(0, 1),
bias=True)
self.conv4 = ConvBlock(
in_channels=(in_channels // reduction_den),
out_channels=(in_channels // reduction_den),
kernel_size=(3, 1),
stride=1,
padding=(1, 0),
bias=True)
self.conv5 = conv1x1_block(
in_channels=(in_channels // reduction_den),
out_channels=out_channels,
bias=True)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bias=True)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.conv5(x)
x = x + identity
x = self.activ(x)
return x
class SqnxtInitBlock(nn.Module):
"""
SqueezeNext specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(SqnxtInitBlock, self).__init__()
self.conv = conv7x7_block(
in_channels=in_channels,
out_channels=out_channels,
stride=2,
padding=1,
bias=True)
self.pool = nn.MaxPool2d(
kernel_size=3,
stride=2,
ceil_mode=True)
def forward(self, x):
x = self.conv(x)
x = self.pool(x)
return x
class SqueezeNext(nn.Module):
"""
SqueezeNext model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(SqueezeNext, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", SqnxtInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
stage.add_module("unit{}".format(j + 1), SqnxtUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module('final_block', conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels,
bias=True))
in_channels = final_block_channels
self.features.add_module('final_pool', nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_squeezenext(version,
width_scale,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create SqueezeNext model with specific parameters.
Parameters:
----------
version : str
Version of SqueezeNet ('23' or '23v5').
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
init_block_channels = 64
final_block_channels = 128
channels_per_layers = [32, 64, 128, 256]
if version == '23':
layers = [6, 6, 8, 1]
elif version == '23v5':
layers = [2, 4, 14, 1]
else:
raise ValueError("Unsupported SqueezeNet version {}".format(version))
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1:
channels = [[int(cij * width_scale) for cij in ci] for ci in channels]
init_block_channels = int(init_block_channels * width_scale)
final_block_channels = int(final_block_channels * width_scale)
net = SqueezeNext(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def sqnxt23_w1(**kwargs):
"""
1.0-SqNxt-23 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_squeezenext(version="23", width_scale=1.0, model_name="sqnxt23_w1", **kwargs)
def sqnxt23_w3d2(**kwargs):
"""
1.5-SqNxt-23 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_squeezenext(version="23", width_scale=1.5, model_name="sqnxt23_w3d2", **kwargs)
def sqnxt23_w2(**kwargs):
"""
2.0-SqNxt-23 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_squeezenext(version="23", width_scale=2.0, model_name="sqnxt23_w2", **kwargs)
def sqnxt23v5_w1(**kwargs):
"""
1.0-SqNxt-23v5 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_squeezenext(version="23v5", width_scale=1.0, model_name="sqnxt23v5_w1", **kwargs)
def sqnxt23v5_w3d2(**kwargs):
"""
1.5-SqNxt-23v5 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_squeezenext(version="23v5", width_scale=1.5, model_name="sqnxt23v5_w3d2", **kwargs)
def sqnxt23v5_w2(**kwargs):
"""
2.0-SqNxt-23v5 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_squeezenext(version="23v5", width_scale=2.0, model_name="sqnxt23v5_w2", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
sqnxt23_w1,
sqnxt23_w3d2,
sqnxt23_w2,
sqnxt23v5_w1,
sqnxt23v5_w3d2,
sqnxt23v5_w2,
]
for model in models:
net = model(pretrained=pretrained)
# net.eval()
net.train()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != sqnxt23_w1 or weight_count == 724056)
assert (model != sqnxt23_w3d2 or weight_count == 1511824)
assert (model != sqnxt23_w2 or weight_count == 2583752)
assert (model != sqnxt23v5_w1 or weight_count == 921816)
assert (model != sqnxt23v5_w3d2 or weight_count == 1953616)
assert (model != sqnxt23v5_w2 or weight_count == 3366344)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 31.541237 | 119 | 0.600098 |
7957b1f65974fdcac6c4944f7a143ef19f96950a | 251 | py | Python | anchore_engine/vendored/docker_registry_client/docker_registry_client/__init__.py | Talanor/anchore-engine | 5e809db1eb681f89670655c5bf9933eba50cf403 | [
"Apache-2.0"
] | null | null | null | anchore_engine/vendored/docker_registry_client/docker_registry_client/__init__.py | Talanor/anchore-engine | 5e809db1eb681f89670655c5bf9933eba50cf403 | [
"Apache-2.0"
] | null | null | null | anchore_engine/vendored/docker_registry_client/docker_registry_client/__init__.py | Talanor/anchore-engine | 5e809db1eb681f89670655c5bf9933eba50cf403 | [
"Apache-2.0"
] | null | null | null | # flake8: noqa
from __future__ import absolute_import
from .DockerRegistryClient import (DockerRegistryClient,
BaseClient,
Repository)
| 31.375 | 68 | 0.458167 |
7957b25926a87ed196ea3becfeba50d163ec43d7 | 29,148 | py | Python | tests-trio/p2p-trio/test_discovery.py | Lilith1410/trinity | df92df488c6ade7a168483075c1d4f76843d3090 | [
"MIT"
] | null | null | null | tests-trio/p2p-trio/test_discovery.py | Lilith1410/trinity | df92df488c6ade7a168483075c1d4f76843d3090 | [
"MIT"
] | null | null | null | tests-trio/p2p-trio/test_discovery.py | Lilith1410/trinity | df92df488c6ade7a168483075c1d4f76843d3090 | [
"MIT"
] | null | null | null | import copy
import functools
import random
import re
import time
import trio
import pytest
import rlp
from eth_utils import decode_hex, int_to_big_endian
from eth_hash.auto import keccak
from eth_keys import keys
from eth.db.backends.memory import MemoryDB
from p2p import constants
from p2p.constants import IP_V4_ADDRESS_ENR_KEY, UDP_PORT_ENR_KEY, TCP_PORT_ENR_KEY
from p2p.enr import UnsignedENR, IDENTITY_SCHEME_ENR_KEY
from p2p.node_db import NodeDB
from p2p.identity_schemes import default_identity_scheme_registry, V4IdentityScheme
from p2p.discovery import (
CMD_FIND_NODE,
CMD_NEIGHBOURS,
CMD_PING,
CMD_PONG,
DiscoveryService,
PROTO_VERSION,
_get_msg_expiration,
_extract_nodes_from_payload,
_pack_v4,
_unpack_v4,
)
from p2p.kademlia import Node
from p2p.tools.factories import (
AddressFactory,
ENRFactory,
NodeFactory,
PrivateKeyFactory,
)
from trinity.components.builtin.upnp.events import NewUPnPMapping
# Force our tests to fail quickly if they accidentally get stuck waiting for a response that will
# never come. Notice that we can't use a too low value here because this constant is also used
# when waiting for things like waiting for a msg handler coroutine to forward the received msg
# over the appropriate channel (e.g. when request_enr() waits for the ENR to be sent over the
# enr_response_channels).
@pytest.fixture(autouse=True)
def short_timeout(monkeypatch):
monkeypatch.setattr(constants, 'KADEMLIA_REQUEST_TIMEOUT', 0.2)
@pytest.mark.trio
async def test_ping_pong(manually_driven_discovery_pair):
alice, bob = manually_driven_discovery_pair
# Collect all pongs received by alice in a list for later inspection.
got_pong = trio.Event()
received_pongs = []
async def recv_pong(node, payload, hash_):
received_pongs.append((node, payload))
got_pong.set()
alice.recv_pong_v4 = recv_pong
token = await alice.send_ping_v4(bob.this_node)
with trio.fail_after(1):
await bob.consume_datagram()
await alice.consume_datagram()
await got_pong.wait()
assert len(received_pongs) == 1
node, payload = received_pongs[0]
assert node.id == bob.this_node.id
assert token == payload[1]
def validate_node_enr(node, enr, sequence_number, extra_fields=tuple()):
assert enr is not None
enr.validate_signature()
assert enr.sequence_number == sequence_number
assert enr.identity_scheme == V4IdentityScheme
assert node.pubkey.to_compressed_bytes() == enr.public_key
assert node.address.ip_packed == enr[IP_V4_ADDRESS_ENR_KEY]
assert node.address.udp_port == enr[UDP_PORT_ENR_KEY]
assert node.address.tcp_port == enr[TCP_PORT_ENR_KEY]
enr_items = enr.items()
for extra_key, extra_value in extra_fields:
assert (extra_key, extra_value) in enr_items
@pytest.mark.trio
async def test_get_local_enr(manually_driven_discovery):
discovery = manually_driven_discovery
enr = await discovery.get_local_enr()
validate_node_enr(discovery.this_node, enr, sequence_number=1)
old_node = copy.copy(discovery.this_node)
# If our node's details change but an ENR refresh is not due yet, we'll get the ENR for the
# old node.
discovery.this_node.address.udp_port += 1
assert discovery._local_enr_next_refresh > time.monotonic()
enr = await discovery.get_local_enr()
validate_node_enr(old_node, enr, sequence_number=1)
# If a local ENR refresh is due, get_local_enr() will create a fresh ENR with a new sequence
# number.
discovery._local_enr_next_refresh = time.monotonic() - 1
enr = await discovery.get_local_enr()
validate_node_enr(discovery.this_node, enr, sequence_number=2)
# The new ENR will also be stored in our DB.
our_node = Node(discovery.node_db.get_enr(discovery.this_node.id))
assert enr == our_node.enr
# And the next refresh time will be updated.
assert discovery._local_enr_next_refresh > time.monotonic()
@pytest.mark.trio
async def test_local_enr_on_startup(manually_driven_discovery):
discovery = manually_driven_discovery
validate_node_enr(discovery.this_node, discovery.this_node.enr, sequence_number=1)
# Our local ENR will also be stored in our DB.
our_enr = discovery.node_db.get_enr(discovery.this_node.id)
assert discovery.this_node.enr == our_enr
@pytest.mark.trio
async def test_local_enr_fields(manually_driven_discovery):
discovery = manually_driven_discovery
async def test_field_provider(key, value):
return (key, value)
expected_fields = [(b'key1', b'value1'), (b'key2', b'value2')]
field_providers = tuple(
functools.partial(test_field_provider, key, value)
for key, value in expected_fields
)
discovery.enr_field_providers = field_providers
# Force a refresh or our local ENR.
discovery._local_enr_next_refresh = time.monotonic() - 1
enr = await discovery.get_local_enr()
validate_node_enr(discovery.this_node, enr, sequence_number=2, extra_fields=expected_fields)
@pytest.mark.trio
async def test_request_enr(nursery, manually_driven_discovery_pair):
alice, bob = manually_driven_discovery_pair
# Pretend that bob and alice have already bonded, otherwise bob will ignore alice's ENR
# request.
bob.node_db.set_last_pong_time(alice.this_node.id, int(time.monotonic()))
# Add a copy of Bob's node with a stub ENR to alice's RT as later we're going to check that it
# gets updated with the received ENR.
bobs_node_with_stub_enr = Node.from_pubkey_and_addr(
bob.this_node.pubkey, bob.this_node.address)
alice.node_db.set_last_pong_time(bob.this_node.id, int(time.monotonic()))
alice.node_db.delete_enr(bobs_node_with_stub_enr.id)
alice.node_db.set_enr(bobs_node_with_stub_enr.enr)
assert alice.node_db.get_enr(bobs_node_with_stub_enr.id).sequence_number == 0
received_enr = None
got_enr = trio.Event()
async def fetch_enr(event):
nonlocal received_enr
received_enr = await alice.request_enr(bobs_node_with_stub_enr)
event.set()
# Start a task in the background that requests an ENR to bob and then waits for it.
nursery.start_soon(fetch_enr, got_enr)
# Bob will now consume one datagram containing the ENR_REQUEST from alice, and as part of that
# will send an ENR_RESPONSE, which will then be consumed by alice, and as part of that it will
# be fed into the request_enr() task we're running the background.
with trio.fail_after(0.1):
await bob.consume_datagram()
await alice.consume_datagram()
with trio.fail_after(1):
await got_enr.wait()
validate_node_enr(bob.this_node, received_enr, sequence_number=1)
assert alice.node_db.get_enr(bob.this_node.id) == received_enr
# Now, if Bob later sends us a new ENR with no endpoint information, we'll evict him from both
# our DB and RT.
sequence_number = bob.this_node.enr.sequence_number + 1
new_unsigned_enr = UnsignedENR(
sequence_number,
kv_pairs={
IDENTITY_SCHEME_ENR_KEY: V4IdentityScheme.id,
V4IdentityScheme.public_key_enr_key: bob.pubkey.to_compressed_bytes(),
}
)
bob.this_node = Node(new_unsigned_enr.to_signed_enr(bob.privkey.to_bytes()))
received_enr = None
got_new_enr = trio.Event()
nursery.start_soon(fetch_enr, got_new_enr)
with trio.fail_after(0.1):
await bob.consume_datagram()
await alice.consume_datagram()
with trio.fail_after(1):
await got_new_enr.wait()
assert Node(received_enr).address is None
assert not alice.routing._contains(bob.this_node.id, include_replacement_cache=True)
with pytest.raises(KeyError):
alice.node_db.get_enr(bob.this_node.id)
@pytest.mark.trio
async def test_find_node_neighbours(manually_driven_discovery_pair):
alice, bob = manually_driven_discovery_pair
nodes_in_rt = 0
# Ensure we have plenty of nodes in our RT's buckets so that the NEIGHBOURS response sent by
# bob is split into multiple messages.
while nodes_in_rt < (constants.KADEMLIA_BUCKET_SIZE * 2):
node = NodeFactory()
eviction_candidate = bob.routing.update(node.id)
if eviction_candidate is not None:
continue
nodes_in_rt += 1
bob.node_db.set_enr(node.enr)
# Collect all neighbours packets received by alice in a list for later inspection.
received_neighbours = []
async def recv_neighbours(node, payload, hash_):
received_neighbours.append((node, payload))
alice.recv_neighbours_v4 = recv_neighbours
# Pretend that bob and alice have already bonded, otherwise bob will ignore alice's find_node.
bob.node_db.set_last_pong_time(alice.this_node.id, int(time.monotonic()))
alice.send_find_node_v4(bob.this_node, alice.pubkey.to_bytes())
with trio.fail_after(1):
await bob.consume_datagram()
# Alice needs to consume two datagrams here because we expect bob's response to be split
# across two packets since a single one would be bigger than protocol's byte limit.
await alice.consume_datagram()
await alice.consume_datagram()
# Bob should have sent two neighbours packets in order to keep the total packet size
# under the 1280 bytes limit. However, the two consume_datagram() calls above will have
# spawned background tasks so we take a few short naps here to wait for them to complete.
while len(received_neighbours) != 2:
await trio.sleep(0.01)
packet1, packet2 = received_neighbours
neighbours = []
for packet in [packet1, packet2]:
node, payload = packet
assert node == bob.this_node
neighbours.extend(_extract_nodes_from_payload(
node.address, payload[0], bob.logger))
assert len(neighbours) == constants.KADEMLIA_BUCKET_SIZE
@pytest.mark.trio
async def test_get_peer_candidates(manually_driven_discovery, monkeypatch):
total_nodes = 10
nodes = NodeFactory.create_batch(total_nodes)
discovery = manually_driven_discovery
for node in nodes:
discovery.node_db.set_enr(node.enr)
assert discovery.routing.update(node.id) is None
discovery._random_lookup_calls = 0
async def mock_lookup_random():
discovery._random_lookup_calls += 1
monkeypatch.setattr(discovery, 'lookup_random', mock_lookup_random)
def should_skip(skip_list, candidate):
return candidate in skip_list
candidates = discovery.get_peer_candidates(functools.partial(should_skip, tuple()), total_nodes)
assert sorted(candidates) == sorted(nodes)
candidates = discovery.get_peer_candidates(
functools.partial(should_skip, tuple()), total_nodes + 10)
assert sorted(candidates) == sorted(nodes)
# When we don't have enough candidates, a random lookup should be triggered.
with trio.fail_after(0.5):
while discovery._random_lookup_calls != 1:
await trio.sleep(0.01)
candidates = discovery.get_peer_candidates(
functools.partial(should_skip, tuple()), total_nodes - 1)
assert len(candidates) == total_nodes - 1
skip_list = (nodes[0], nodes[5], nodes[8])
candidates = discovery.get_peer_candidates(
functools.partial(should_skip, skip_list), total_nodes)
assert sorted(candidates) == sorted(set(nodes).difference(skip_list))
with trio.fail_after(0.5):
while discovery._random_lookup_calls != 2:
await trio.sleep(0.01)
@pytest.mark.trio
async def test_handle_new_upnp_mapping(manually_driven_discovery, endpoint_server):
manually_driven_discovery._event_bus = endpoint_server
manually_driven_discovery.manager.run_daemon_task(
manually_driven_discovery.handle_new_upnp_mapping)
assert manually_driven_discovery.this_node.address.ip == '127.0.0.1'
assert manually_driven_discovery.this_node.enr.sequence_number == 1
await trio.hazmat.checkpoint()
external_ip = '43.248.27.0'
await endpoint_server.broadcast(NewUPnPMapping(external_ip))
with trio.fail_after(0.5):
while True:
await trio.sleep(0.01)
if manually_driven_discovery.this_node.address.ip == external_ip:
break
assert manually_driven_discovery.this_node.enr.sequence_number == 2
@pytest.mark.trio
async def test_protocol_bootstrap(monkeypatch):
node1, node2 = NodeFactory.create_batch(2)
discovery = MockDiscoveryService([node1, node2])
invalidated_bonds = []
def invalidate_bond(node_id):
invalidated_bonds.append(node_id)
async def bond(node_id):
assert discovery.routing.update(node_id) is None
return True
monkeypatch.setattr(discovery, 'invalidate_bond', invalidate_bond)
# Pretend we bonded successfully with our bootstrap nodes.
monkeypatch.setattr(discovery, 'bond', bond)
await discovery.bootstrap()
assert sorted(invalidated_bonds) == sorted([node.id for node in [node1, node2]])
assert len(discovery.messages) == 2
# We don't care in which order the bootstrap nodes are contacted, nor which node_id was used
# in the find_node request, so we just assert that we sent find_node msgs to both nodes.
assert sorted([(node, cmd) for (node, cmd, _) in discovery.messages]) == sorted([
(node1, 'find_node'),
(node2, 'find_node')])
@pytest.mark.trio
async def test_wait_neighbours(nursery):
service = MockDiscoveryService([])
node = NodeFactory()
# Schedule a call to service.recv_neighbours_v4() simulating a neighbours response from the
# node we expect.
neighbours = tuple(NodeFactory.create_batch(3))
expiration = _get_msg_expiration()
neighbours_msg_payload = [
[n.address.to_endpoint() + [n.pubkey.to_bytes()] for n in neighbours],
expiration]
nursery.start_soon(service.recv_neighbours_v4, node, neighbours_msg_payload, b'')
received_neighbours = await service.wait_neighbours(node)
assert neighbours == received_neighbours
# Ensure wait_neighbours() cleaned up after itself.
assert not service.neighbours_channels.already_waiting_for(node)
# If wait_neighbours() times out, we get an empty list of neighbours.
received_neighbours = await service.wait_neighbours(node)
assert received_neighbours == tuple()
assert not service.neighbours_channels.already_waiting_for(node)
@pytest.mark.trio
async def test_bond(nursery, monkeypatch):
discovery = MockDiscoveryService([])
us = discovery.this_node
node = NodeFactory()
discovery.node_db.set_enr(node.enr)
token = b'token'
async def send_ping(node):
return token
# Do not send pings, instead simply return the pingid we'd expect back together with the pong.
monkeypatch.setattr(discovery, 'send_ping_v4', send_ping)
# Schedule a call to service.recv_pong() simulating a pong from the node we expect.
enr_seq = 1
pong_msg_payload = [
us.address.to_endpoint(), token, _get_msg_expiration(), int_to_big_endian(enr_seq)]
nursery.start_soon(discovery.recv_pong_v4, node, pong_msg_payload, b'')
bonded = await discovery.bond(node.id)
assert bonded
assert discovery.is_bond_valid_with(node.id)
# Upon successfully bonding, retrieval of the remote's ENR will be scheduled.
with trio.fail_after(1):
scheduled_enr_node_id, scheduled_enr_seq = await discovery.pending_enrs_consumer.receive()
assert scheduled_enr_node_id == node.id
assert scheduled_enr_seq == enr_seq
# If we try to bond with any other nodes we'll timeout and bond() will return False.
node2 = NodeFactory()
discovery.node_db.set_enr(node2.enr)
bonded = await discovery.bond(node2.id)
assert not bonded
@pytest.mark.trio
async def test_bond_short_circuits(monkeypatch):
discovery = MockDiscoveryService([])
bob = NodeFactory()
discovery.node_db.set_enr(bob.enr)
# Pretend we have a valid bond with bob.
discovery.node_db.set_last_pong_time(bob.id, int(time.monotonic()))
class AttemptedNewBond(Exception):
pass
async def send_ping(node):
raise AttemptedNewBond()
monkeypatch.setattr(discovery, 'send_ping_v4', send_ping)
# When we have a valid bond, we won't attempt a new one.
assert discovery.is_bond_valid_with(bob.id)
assert await discovery.bond(bob.id)
@pytest.mark.trio
async def test_fetch_enrs(nursery, manually_driven_discovery_pair):
alice, bob = manually_driven_discovery_pair
# Pretend that bob and alice have already bonded, otherwise bob will ignore alice's ENR
# request.
alice.node_db.set_last_pong_time(bob.this_node.id, int(time.monotonic()))
bob.node_db.set_last_pong_time(alice.this_node.id, int(time.monotonic()))
# Also add bob's node to alice's DB as when scheduling an ENR retrieval we only get the node ID
# and need to look it up in the DB.
alice.node_db.set_enr(bob.this_node.enr)
# This task will run in a loop consuming from the pending_enrs_consumer channel and requesting
# ENRs.
alice.manager.run_task(alice.fetch_enrs)
with trio.fail_after(1):
# Generate a new ENR for bob, because the old one alice already got when we manually added
# bob's node to her DB above.
bobs_new_enr = await bob._generate_local_enr(bob.this_node.enr.sequence_number + 1)
bob.this_node = Node(bobs_new_enr)
# This feeds a request to retrieve Bob's ENR to fetch_enrs(), which spawns a background
# task to do it.
await alice.pending_enrs_producer.send((bob.this_node.id, bobs_new_enr.sequence_number))
# bob cosumes the ENR_REQUEST and replies with its own ENR
await bob.consume_datagram()
# alice consumes the ENR_RESPONSE, feeding the ENR to the background task started above.
await alice.consume_datagram()
# Now we need to wait a little bit here for that background task to pick it up and store
# it in our DB.
while True:
await trio.sleep(0.1)
try:
bob_enr = alice.node_db.get_enr(bob.this_node.id)
except KeyError:
continue
else:
break
assert bob_enr is not None
assert bob_enr == await bob.get_local_enr()
@pytest.mark.trio
async def test_lookup_and_maybe_update_enr_new_node():
discovery = MockDiscoveryService([])
privkey = PrivateKeyFactory()
address = AddressFactory()
# When looking up the ENR for a node we haven't heard about before, we'll create a stub ENR
# and add that into our DB.
enr = discovery.lookup_and_maybe_update_enr(privkey.public_key, address)
assert enr.sequence_number == 0
node = Node(enr)
assert node.pubkey == privkey.public_key
assert node.address == address
db_enr = discovery.node_db.get_enr(node.id)
assert db_enr == enr
@pytest.mark.trio
async def test_lookup_and_maybe_update_enr_existing_node():
discovery = MockDiscoveryService([])
privkey = PrivateKeyFactory()
address = AddressFactory()
# When we have an ENR for the given pubkey, and its address matches the given one, the
# existing ENR is returned.
enr = ENRFactory(private_key=privkey.to_bytes(), address=address)
discovery.node_db.set_enr(enr)
lookedup_enr = discovery.lookup_and_maybe_update_enr(privkey.public_key, address)
assert lookedup_enr == enr
@pytest.mark.trio
async def test_lookup_and_maybe_update_enr_existing_node_different_address():
discovery = MockDiscoveryService([])
privkey = PrivateKeyFactory()
address = AddressFactory()
# If the address given is different than the one we have in our DB, though, a stub ENR would
# be created and stored in our DB, replacing the existing one.
enr = ENRFactory(private_key=privkey.to_bytes(), address=address)
discovery.node_db.set_enr(enr)
new_address = AddressFactory()
lookedup_enr = discovery.lookup_and_maybe_update_enr(privkey.public_key, new_address)
assert lookedup_enr != enr
assert lookedup_enr.public_key == enr.public_key
assert lookedup_enr.sequence_number == 0
assert Node(lookedup_enr).address == new_address
assert lookedup_enr == discovery.node_db.get_enr(enr.node_id)
@pytest.mark.trio
async def test_update_routing_table():
discovery = MockDiscoveryService([])
node = NodeFactory()
discovery.update_routing_table(node)
assert discovery.routing._contains(node.id, include_replacement_cache=False)
@pytest.mark.trio
async def test_update_routing_table_triggers_bond_if_eviction_candidate(
manually_driven_discovery, monkeypatch):
discovery = manually_driven_discovery
old_node, new_node = NodeFactory.create_batch(2)
bond_called = False
async def bond(node_id):
nonlocal bond_called
bond_called = True
assert node_id == old_node.id
monkeypatch.setattr(discovery, 'bond', bond)
# Pretend our routing table failed to add the new node by returning the least recently seen
# node for an eviction check.
monkeypatch.setattr(discovery.routing, 'update', lambda n: old_node.id)
discovery.update_routing_table(new_node)
assert not discovery.routing._contains(new_node.id, include_replacement_cache=False)
# The update_routing_table() call above will have scheduled a future call to discovery.bond() so
# we need to wait a bit here to give it a chance to run.
with trio.fail_after(0.5):
while not bond_called:
await trio.sleep(0.001)
@pytest.mark.trio
async def test_get_max_neighbours_per_packet(nursery):
# This test is just a safeguard against changes that inadvertently modify the behaviour of
# _get_max_neighbours_per_packet().
assert DiscoveryService._get_max_neighbours_per_packet() == 12
def test_discover_v4_message_pack():
sender, recipient = AddressFactory.create_batch(2)
version = rlp.sedes.big_endian_int.serialize(PROTO_VERSION)
payload = (version, sender.to_endpoint(), recipient.to_endpoint())
privkey = PrivateKeyFactory()
message = _pack_v4(CMD_PING.id, payload, privkey)
pubkey, cmd_id, payload, _ = _unpack_v4(message)
assert pubkey == privkey.public_key
assert cmd_id == CMD_PING.id
def test_unpack_eip8_packets():
# Test our _unpack() function against the sample packets specified in
# https://github.com/ethereum/EIPs/blob/master/EIPS/eip-8.md
for cmd, packets in eip8_packets.items():
for _, packet in packets.items():
pubkey, cmd_id, payload, _ = _unpack_v4(packet)
assert pubkey.to_hex() == '0xca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd31387574077f301b421bc84df7266c44e9e6d569fc56be00812904767bf5ccd1fc7f' # noqa: E501
assert cmd.id == cmd_id
@pytest.mark.trio
async def test_bootstrap_nodes():
private_key = PrivateKeyFactory().to_bytes()
bootnode1 = ENRFactory(private_key=private_key)
bootnode2 = ENRFactory()
discovery = MockDiscoveryService([Node(bootnode1), Node(bootnode2)])
assert discovery.node_db.get_enr(bootnode1.node_id) == bootnode1
assert discovery.node_db.get_enr(bootnode2.node_id) == bootnode2
assert [node.enr for node in discovery.bootstrap_nodes] == [bootnode1, bootnode2]
# If our DB gets updated with a newer ENR of one of our bootnodes, the @bootstrap_nodes
# property will reflect that.
new_bootnode1 = ENRFactory(
private_key=private_key, sequence_number=bootnode1.sequence_number + 1)
discovery.node_db.set_enr(new_bootnode1)
assert [node.enr for node in discovery.bootstrap_nodes] == [new_bootnode1, bootnode2]
class MockDiscoveryService(DiscoveryService):
"""A DiscoveryService that instead of sending messages over the wire simply stores them in
self.messages.
Do not attempt to run it as a service (e.g. with TrioManager.run_service()), because that
won't work.
"""
def __init__(self, bootnodes):
privkey = keys.PrivateKey(keccak(b"seed"))
self.messages = []
node_db = NodeDB(default_identity_scheme_registry, MemoryDB())
socket = trio.socket.socket(family=trio.socket.AF_INET, type=trio.socket.SOCK_DGRAM)
event_bus = None
address = AddressFactory()
super().__init__(
privkey, address.udp_port, address.tcp_port, bootnodes, event_bus, socket, node_db)
def send(self, node, msg_type, payload):
# Overwrite our parent's send() to ensure no tests attempt to use us to go over the
# network as that wouldn't work.
raise ValueError("MockDiscoveryService must not be used to send network messages")
async def send_ping_v4(self, node):
echo = hex(random.randint(0, 2**256))[-32:]
self.messages.append((node, 'ping', echo))
return echo
async def send_pong_v4(self, node, echo):
self.messages.append((node, 'pong', echo))
def send_find_node_v4(self, node, nodeid):
self.messages.append((node, 'find_node', nodeid))
def send_neighbours_v4(self, node, neighbours):
self.messages.append((node, 'neighbours', neighbours))
def remove_whitespace(s):
return re.sub(r"\s+", "", s)
eip8_packets = {
CMD_PING: dict(
# ping packet with version 4, additional list elements
ping1=decode_hex(remove_whitespace("""
e9614ccfd9fc3e74360018522d30e1419a143407ffcce748de3e22116b7e8dc92ff74788c0b6663a
aa3d67d641936511c8f8d6ad8698b820a7cf9e1be7155e9a241f556658c55428ec0563514365799a
4be2be5a685a80971ddcfa80cb422cdd0101ec04cb847f000001820cfa8215a8d790000000000000
000000000000000000018208ae820d058443b9a3550102""")),
# ping packet with version 555, additional list elements and additional random data
ping2=decode_hex(remove_whitespace("""
577be4349c4dd26768081f58de4c6f375a7a22f3f7adda654d1428637412c3d7fe917cadc56d4e5e
7ffae1dbe3efffb9849feb71b262de37977e7c7a44e677295680e9e38ab26bee2fcbae207fba3ff3
d74069a50b902a82c9903ed37cc993c50001f83e82022bd79020010db83c4d001500000000abcdef
12820cfa8215a8d79020010db885a308d313198a2e037073488208ae82823a8443b9a355c5010203
040531b9019afde696e582a78fa8d95ea13ce3297d4afb8ba6433e4154caa5ac6431af1b80ba7602
3fa4090c408f6b4bc3701562c031041d4702971d102c9ab7fa5eed4cd6bab8f7af956f7d565ee191
7084a95398b6a21eac920fe3dd1345ec0a7ef39367ee69ddf092cbfe5b93e5e568ebc491983c09c7
6d922dc3""")),
),
CMD_PONG: dict(
# pong packet with additional list elements and additional random data
pong=decode_hex(remove_whitespace("""
09b2428d83348d27cdf7064ad9024f526cebc19e4958f0fdad87c15eb598dd61d08423e0bf66b206
9869e1724125f820d851c136684082774f870e614d95a2855d000f05d1648b2d5945470bc187c2d2
216fbe870f43ed0909009882e176a46b0102f846d79020010db885a308d313198a2e037073488208
ae82823aa0fbc914b16819237dcd8801d7e53f69e9719adecb3cc0e790c57e91ca4461c9548443b9
a355c6010203c2040506a0c969a58f6f9095004c0177a6b47f451530cab38966a25cca5cb58f0555
42124e""")),
),
CMD_FIND_NODE: dict(
# findnode packet with additional list elements and additional random data
findnode=decode_hex(remove_whitespace("""
c7c44041b9f7c7e41934417ebac9a8e1a4c6298f74553f2fcfdcae6ed6fe53163eb3d2b52e39fe91
831b8a927bf4fc222c3902202027e5e9eb812195f95d20061ef5cd31d502e47ecb61183f74a504fe
04c51e73df81f25c4d506b26db4517490103f84eb840ca634cae0d49acb401d8a4c6b6fe8c55b70d
115bf400769cc1400f3258cd31387574077f301b421bc84df7266c44e9e6d569fc56be0081290476
7bf5ccd1fc7f8443b9a35582999983999999280dc62cc8255c73471e0a61da0c89acdc0e035e260a
dd7fc0c04ad9ebf3919644c91cb247affc82b69bd2ca235c71eab8e49737c937a2c396""")),
),
CMD_NEIGHBOURS: dict(
# neighbours packet with additional list elements and additional random data
neighbours=decode_hex(remove_whitespace("""
c679fc8fe0b8b12f06577f2e802d34f6fa257e6137a995f6f4cbfc9ee50ed3710faf6e66f932c4c8
d81d64343f429651328758b47d3dbc02c4042f0fff6946a50f4a49037a72bb550f3a7872363a83e1
b9ee6469856c24eb4ef80b7535bcf99c0004f9015bf90150f84d846321163782115c82115db84031
55e1427f85f10a5c9a7755877748041af1bcd8d474ec065eb33df57a97babf54bfd2103575fa8291
15d224c523596b401065a97f74010610fce76382c0bf32f84984010203040101b840312c55512422
cf9b8a4097e9a6ad79402e87a15ae909a4bfefa22398f03d20951933beea1e4dfa6f968212385e82
9f04c2d314fc2d4e255e0d3bc08792b069dbf8599020010db83c4d001500000000abcdef12820d05
820d05b84038643200b172dcfef857492156971f0e6aa2c538d8b74010f8e140811d53b98c765dd2
d96126051913f44582e8c199ad7c6d6819e9a56483f637feaac9448aacf8599020010db885a308d3
13198a2e037073488203e78203e8b8408dcab8618c3253b558d459da53bd8fa68935a719aff8b811
197101a4b2b47dd2d47295286fc00cc081bb542d760717d1bdd6bec2c37cd72eca367d6dd3b9df73
8443b9a355010203b525a138aa34383fec3d2719a0""")),
),
}
| 39.549525 | 184 | 0.741595 |
7957b27caf9fc7db4169eff2ea12e06b0fe27b02 | 32 | py | Python | teensy/memzip_files/src/test.py | lurch/micropython | 28dfbc2ba2ef41a7810e4e39290031eb2207a0a9 | [
"MIT"
] | 1 | 2015-06-15T11:52:01.000Z | 2015-06-15T11:52:01.000Z | teensy/memzip_files/src/test.py | lurch/micropython | 28dfbc2ba2ef41a7810e4e39290031eb2207a0a9 | [
"MIT"
] | null | null | null | teensy/memzip_files/src/test.py | lurch/micropython | 28dfbc2ba2ef41a7810e4e39290031eb2207a0a9 | [
"MIT"
] | null | null | null | print("Executing /src/test.py")
| 16 | 31 | 0.71875 |
7957b28720e87b51480cd6174189fba08eedec00 | 648 | py | Python | Methods/Machine/Machine/comp_width_airgap_mag.py | Superomeg4/pyleecan | 2b695b5f39e77475a07aa0ea89489fb0a9659337 | [
"Apache-2.0"
] | 2 | 2020-06-29T13:48:37.000Z | 2021-06-15T07:34:05.000Z | Methods/Machine/Machine/comp_width_airgap_mag.py | Superomeg4/pyleecan | 2b695b5f39e77475a07aa0ea89489fb0a9659337 | [
"Apache-2.0"
] | null | null | null | Methods/Machine/Machine/comp_width_airgap_mag.py | Superomeg4/pyleecan | 2b695b5f39e77475a07aa0ea89489fb0a9659337 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""@package Methods.Machine.Machine.comp_width_airgap_mag
Compute the magnetic airgap of the machine method
@date Created on Thu Jan 22 16:25:34 2015
@copyright (C) 2015-2016 EOMYS ENGINEERING.
@author pierre_b
"""
def comp_width_airgap_mag(self):
"""Compute the magnetic airgap (distance beetween the two Lamination)
Parameters
----------
self: Machine
A Machine object
Returns
-------
mag_gap: float
The magnetic airgap [m]
"""
if self.rotor.is_internal:
return self.stator.Rint - self.rotor.Rext
else:
return self.rotor.Rint - self.stator.Rext
| 23.142857 | 73 | 0.660494 |
7957b2bdb2d4ed90d944fce7c86e83c07fb9b5c3 | 2,099 | py | Python | src/WindowSorting/WindowFocuser.py | AlexandrePetrassi/awso-me | e174eb478d6f6359d3ad866af2008791b9c73d0c | [
"MIT"
] | null | null | null | src/WindowSorting/WindowFocuser.py | AlexandrePetrassi/awso-me | e174eb478d6f6359d3ad866af2008791b9c73d0c | [
"MIT"
] | null | null | null | src/WindowSorting/WindowFocuser.py | AlexandrePetrassi/awso-me | e174eb478d6f6359d3ad866af2008791b9c73d0c | [
"MIT"
] | null | null | null | """Gives focus to windows"""
import pywintypes
import win32api
import win32gui
import win32process
from win32con import HWND_TOPMOST, HWND_NOTOPMOST
from win32con import SWP_SHOWWINDOW, SWP_NOSIZE, SWP_NOMOVE
ORIGIN = (0, 0, 0, 0)
FLAG_NO_SHOW = SWP_NOSIZE | SWP_NOMOVE
FLAG_SHOW = SWP_SHOWWINDOW | SWP_NOSIZE | SWP_NOMOVE
def get_current_thread_id():
"""
Returns the id from the current thread this program is running on
:return: The programs current thread id
"""
return win32api.GetCurrentThreadId()
def get_top_window_thread_id():
"""
Returns the id from the top window's thread
:return: the id from the top window's thread
"""
current_window = win32gui.GetForegroundWindow()
window_id, _ = win32process.GetWindowThreadProcessId(current_window)
return window_id
def set_window_focus(handle):
"""
Focuses a window. If its not possible some other attempts will happen.
If all attempts fail nothing happens.
:param handle: The handle for a window which will be focused
"""
try_set_focus(handle, get_top_window_thread_id(), get_current_thread_id())
def try_set_focus(handle, top_window_id, current_id, retries=3):
"""
Try to focus a window, while retrying if it fails. If wall attempts are
unsuccessful simply does nothing.
:param handle: The handle for a window which will be focused
:param top_window_id: The thread id from the topmost window
:param current_id: The programs current thread id
:param retries: The maximum amount of tries in case of failure
"""
while retries > 0:
try:
win32process.AttachThreadInput(top_window_id, current_id, True)
win32gui.SetWindowPos(handle, HWND_TOPMOST, *ORIGIN, FLAG_NO_SHOW)
win32gui.SetWindowPos(handle, HWND_NOTOPMOST, *ORIGIN, FLAG_SHOW)
win32gui.SetForegroundWindow(handle)
win32gui.SetActiveWindow(handle)
win32process.AttachThreadInput(top_window_id, current_id, False)
return
except pywintypes.error:
retries -= 1
| 30.867647 | 78 | 0.716532 |
7957b2ea0874e85de1c7133ba0095840bafacc45 | 358 | py | Python | pygamer/Player.py | djpeach/pygamer | 77a0cdab58bc29d06cc88c8cc823850794fe0bf0 | [
"MIT"
] | null | null | null | pygamer/Player.py | djpeach/pygamer | 77a0cdab58bc29d06cc88c8cc823850794fe0bf0 | [
"MIT"
] | null | null | null | pygamer/Player.py | djpeach/pygamer | 77a0cdab58bc29d06cc88c8cc823850794fe0bf0 | [
"MIT"
] | null | null | null | class Player:
def __init__(self, score=0):
self.score = score
self.objects = []
def update(self):
for obj in self.objects:
obj.update()
def draw(self, surface_to_draw_on):
for obj in self.objects:
obj.draw(surface_to_draw_on)
def scored(self, points=1):
self.score += points
| 22.375 | 40 | 0.575419 |
7957b33bf1dcee18b80fbd9df8d05324688fba1c | 61,563 | py | Python | python/paddle/fluid/dygraph/jit.py | douch/Paddle | 81c40722869935d6e897f4b1aeb6e6f67606188a | [
"Apache-2.0"
] | 1 | 2021-12-27T02:40:41.000Z | 2021-12-27T02:40:41.000Z | python/paddle/fluid/dygraph/jit.py | LiYuRio/Paddle | dbd6e2df9d074973b7ee177e2d6b96ed2318008e | [
"Apache-2.0"
] | 1 | 2022-01-28T07:23:22.000Z | 2022-01-28T07:23:22.000Z | python/paddle/fluid/dygraph/jit.py | LiYuRio/Paddle | dbd6e2df9d074973b7ee177e2d6b96ed2318008e | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2021 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import pickle
import warnings
import functools
from collections import OrderedDict
import inspect
import threading
import six
import paddle
from paddle.fluid import core, dygraph
from paddle.fluid.compiler import BuildStrategy, CompiledProgram, ExecutionStrategy
from paddle.fluid.data_feeder import check_type
from paddle.fluid.layers.utils import flatten, pack_sequence_as
from paddle.fluid.dygraph.base import program_desc_tracing_guard, switch_to_static_graph
from paddle.fluid.dygraph.dygraph_to_static import logging_utils
from paddle.fluid.dygraph.dygraph_to_static.convert_call_func import ConversionOptions, CONVERSION_OPTIONS
from paddle.fluid.dygraph.dygraph_to_static.logging_utils import set_code_level, set_verbosity
from paddle.fluid.dygraph.dygraph_to_static.program_translator import ProgramTranslator, StaticFunction, unwrap_decorators
from paddle.fluid.dygraph.io import TranslatedLayer, INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX, INFER_PARAMS_INFO_SUFFIX
from paddle.fluid.dygraph.layers import Layer
from paddle.fluid.executor import Executor, scope_guard
from paddle.fluid.framework import Block, ParamBase, Program, Variable, Parameter, EagerParamBase
from paddle.fluid.framework import _current_expected_place, _dygraph_guard, _dygraph_tracer
from paddle.fluid.framework import dygraph_only, _non_static_mode
from paddle.fluid.wrapped_decorator import wrap_decorator
__all__ = [
'TracedLayer', 'declarative', 'dygraph_to_static_func', 'set_code_level',
'set_verbosity', 'save', 'load', 'not_to_static'
]
def create_program_from_desc(program_desc):
program = Program()
program.desc = program_desc
program.blocks = [Block(program, 0)]
program._sync_with_cpp()
return program
def _extract_vars(inputs, result_list, err_tag='inputs'):
if isinstance(inputs, Variable):
result_list.append(inputs)
elif isinstance(inputs, (list, tuple)):
for var in inputs:
_extract_vars(var, result_list, err_tag)
else:
raise TypeError(
"The type of 'each element of {}' in fluid.dygraph.jit.TracedLayer.trace must be fluid.Variable, but received {}.".
format(err_tag, type(inputs)))
def extract_vars(inputs, err_tag='inputs'):
result_list = []
_extract_vars(inputs, result_list, err_tag)
return result_list
def _dygraph_to_static_func_(dygraph_func):
"""
Converts imperative dygraph APIs into declarative function APIs. Decorator
@dygraph_to_static_func only converts imperative dygraph APIs into
declarative net-building APIs, which means it doesn't return immediate
digital result as imperative mode. Users should handle Program and Executor
by themselves.
Note:
This decorator is NOT our recommended way to transform imperative function
to declarative function. We will remove this decorator after we finalize
cleaning up code.
Args:
dygraph_func (callable): callable imperative function.
Returns:
Callable: converting imperative dygraph APIs into declarative
net-building APIs.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
from paddle.fluid.dygraph.jit import dygraph_to_static_func
@dygraph_to_static_func
def func(x):
if fluid.layers.mean(x) < 0:
x_v = x - 1
else:
x_v = x + 1
return x_v
x = fluid.layers.fill_constant(shape=[3, 3], value=0, dtype='float64')
x_v = func(x)
exe = fluid.Executor(fluid.CPUPlace())
out = exe.run(fetch_list=[x_v])
print(out[0])
# [[1. 1. 1.]
# [1. 1. 1.]
# [1. 1. 1.]]
"""
# TODO: remove this decorator after we finalize training API
def __impl__(*args, **kwargs):
program_translator = ProgramTranslator()
if _non_static_mode() or not program_translator.enable_to_static:
logging_utils.warn(
"The decorator 'dygraph_to_static_func' doesn't work in "
"dygraph mode or set ProgramTranslator.enable to False. "
"We will just return dygraph output.")
return dygraph_func(*args, **kwargs)
static_func = program_translator.get_func(dygraph_func)
return static_func(*args, **kwargs)
return __impl__
dygraph_to_static_func = wrap_decorator(_dygraph_to_static_func_)
def copy_decorator_attrs(original_func, decorated_obj):
"""
Copies some necessary attributes from original function into decorated function.
Args:
original_func(callable): the original decorated function.
decorated_obj(StaticFunction): the target decorated StaticFunction object.
"""
decorator_name = "declarative"
decorated_obj.__name__ = original_func.__name__
decorated_obj._decorator_name = decorator_name
decorated_obj.__wrapped__ = original_func
decorated_obj.__doc__ = original_func.__doc__
if hasattr(original_func, "__module__"):
decorated_obj.__module__ = original_func.__module__
return decorated_obj
def declarative(function=None, input_spec=None, build_strategy=None):
"""
Converts imperative dygraph APIs into declarative function APIs. Decorator
@declarative handles the Program and Executor of static mode and returns
the result as dygraph Tensor(s). Users could use the returned dygraph
Tensor(s) to do imperative training, inference, or other operations. If the
decorated function calls other imperative function, the called one will be
converted into declarative function as well.
Args:
function (callable): callable imperative function.
input_spec(list[InputSpec]|tuple[InputSpec]): list/tuple of InputSpec to specific the shape/dtype/name
information of each input Tensor.
build_strategy(BuildStrategy|None): This argument is used to compile the
converted program with the specified options, such as operators' fusion
in the computational graph and memory optimization during the execution
of the computational graph. For more information about build_strategy,
please refer to :code:`paddle.static.BuildStrategy`. The default is None.
Returns:
Tensor(s): containing the numerical result.
Examples:
.. code-block:: python
import paddle
from paddle.jit import to_static
@to_static
def func(x):
if paddle.mean(x) < 0:
x_v = x - 1
else:
x_v = x + 1
return x_v
x = paddle.ones([1, 2], dtype='float32')
x_v = func(x)
print(x_v) # [[2. 2.]]
"""
def decorated(python_func):
"""
Decorates a python function into a StaticFunction object.
"""
# Step 1. unwrap the function if it is already decorated.
_, python_func = unwrap_decorators(python_func)
# Step 2. copy some attributes from original python function.
static_layer = copy_decorator_attrs(
original_func=python_func,
decorated_obj=StaticFunction(
function=python_func,
input_spec=input_spec,
build_strategy=build_strategy))
return static_layer
build_strategy = build_strategy or BuildStrategy()
if not isinstance(build_strategy, BuildStrategy):
raise TypeError(
"Required type(build_strategy) shall be `paddle.static.BuildStrategy`, but received {}".
format(type(build_strategy).__name__))
# for usage: `declarative(foo, ...)`
if function is not None:
if isinstance(function, Layer):
if isinstance(function.forward, StaticFunction):
class_name = function.__class__.__name__
logging_utils.warn(
"`{}.forward` has already been decorated somewhere. It will be redecorated to replace previous one.".
format(class_name))
function.forward = decorated(function.forward)
return function
else:
return decorated(function)
# for usage: `@declarative`
return decorated
def not_to_static(func=None):
"""
A Decorator to suppresses the convertion of a function.
Args:
func(callable): The function to decorate.
Returns:
callable: A function which won't be converted in Dynamic-to-Static.
Examples:
.. code-block:: python
import paddle
@paddle.jit.not_to_static
def func_not_to_static(x):
res = x - 1
return res
@paddle.jit.to_static
def func(x):
if paddle.mean(x) < 0:
out = func_not_to_static(x)
else:
out = x + 1
return out
x = paddle.ones([1, 2], dtype='float32')
out = func(x)
print(out) # [[2. 2.]]
"""
if func is None:
return not_to_static
options = ConversionOptions(not_convert=True)
setattr(func, CONVERSION_OPTIONS, options)
return func
class _SaveLoadConfig(object):
def __init__(self):
self._output_spec = None
self._model_filename = None
self._params_filename = None
self._separate_params = False
# used for `paddle.load`
self._keep_name_table = False
# NOTE: Users rarely use following configs, so these configs are not open to users,
# reducing user learning costs, but we retain the configuration capabilities
# If True, programs are modified to only support direct inference deployment.
# Otherwise,more information will be stored for flexible optimization and re-training.
# Currently, only True is supported
self._export_for_deployment = True
# If True, It will save inference program only, and do not save params of Program
self._program_only = False
@property
def output_spec(self):
return self._output_spec
@output_spec.setter
def output_spec(self, spec):
if spec is None:
return
if not isinstance(spec, list):
raise TypeError(
"The config `output_spec` should be 'list', but received input type is %s."
% type(input))
for var in spec:
if not isinstance(var, core.VarBase):
raise TypeError(
"The element in config `output_spec` list should be 'Variable', but received element's type is %s."
% type(var))
self._output_spec = spec
@property
def model_filename(self):
return self._model_filename
@model_filename.setter
def model_filename(self, filename):
if filename is None:
return
if not isinstance(filename, six.string_types):
raise TypeError(
"The config `model_filename` should be str, but received input's type is %s."
% type(filename))
if len(filename) == 0:
raise ValueError("The config `model_filename` is empty string.")
self._model_filename = filename
@property
def params_filename(self):
return self._params_filename
@params_filename.setter
def params_filename(self, filename):
if filename is None:
return
if not isinstance(filename, six.string_types):
raise TypeError(
"The config `params_filename` should be str, but received input's type is %s."
% type(filename))
if len(filename) == 0:
raise ValueError("The config `params_filename` is empty string.")
self._params_filename = filename
@property
def keep_name_table(self):
return self._keep_name_table
@keep_name_table.setter
def keep_name_table(self, value):
if value is None:
return
if not isinstance(value, bool):
raise TypeError(
"The config `keep_name_table` should be bool value, but received input's type is %s."
% type(value))
self._keep_name_table = value
def _parse_save_configs(configs):
supported_configs = ['output_spec']
# input check
for key in configs:
if key not in supported_configs:
raise ValueError(
"The additional config (%s) of `paddle.jit.save` is not supported."
% (key))
# construct inner config
inner_config = _SaveLoadConfig()
inner_config.output_spec = configs.get('output_spec', None)
return inner_config
def _parse_load_config(configs):
supported_configs = ['model_filename', 'params_filename']
# input check
for key in configs:
if key not in supported_configs:
raise ValueError(
"The additional config (%s) of `paddle.jit.load` is not supported."
% (key))
# construct inner config
inner_config = _SaveLoadConfig()
inner_config.model_filename = configs.get('model_filename', None)
inner_config.params_filename = configs.get('params_filename', None)
return inner_config
def _get_input_var_names(inputs, input_spec):
name_none_error = "The %s's name is None. " \
"When using jit.save, please set InputSepc's name in " \
"to_static(input_spec=[]) and jit.save(input_spec=[]) " \
"and make sure they are consistent."
name_no_exists_error = "The tensor `%s` does not exists. " \
"Please make sure the name of InputSpec or example Tensor " \
"in input_spec is the same as the name of InputSpec in " \
"`to_static` decorated on the Layer.forward method."
result_list = []
input_var_names = [
var.name for var in flatten(inputs) if isinstance(var, Variable)
]
if input_spec is None:
# no prune
return input_var_names
else:
# fileter out non-tensor type spec infos.
input_spec = [
spec for spec in input_spec
if isinstance(spec, paddle.static.InputSpec)
]
if len(input_spec) == len(input_var_names):
# no prune
result_list = input_var_names
# if input spec name not in input_var_names, only raise warning
for spec in input_spec:
if spec.name is None:
warnings.warn(name_none_error % spec)
elif spec.name not in input_var_names:
warnings.warn(name_no_exists_error % spec.name)
else:
# do nothing
pass
else:
# prune
for spec in input_spec:
if spec.name is None:
# name is None, the input_spec only can be InputSpec
raise ValueError(name_none_error % spec)
elif spec.name not in input_var_names:
# the input_spec can be `InputSpec` or `VarBase`
raise ValueError(name_no_exists_error % spec.name)
else:
result_list.append(spec.name)
return result_list
def _get_output_vars(outputs, output_spec):
name_no_exists_error = "The tensor `%s` does not exists. " \
"Please make sure the name of example Tensor " \
"in configs.output_spec is the output tensor of " \
"Layer.forward method."
result_list = []
output_vars_dict = OrderedDict()
for var in flatten(outputs):
if isinstance(var, Variable):
output_vars_dict[var.name] = var
if output_spec is None:
result_list = output_vars_dict.values()
elif output_spec is not None and len(output_spec) == len(output_vars_dict):
result_list = output_vars_dict.values()
for var in output_spec:
if var.name not in output_vars_dict:
warnings.warn(name_no_exists_error % var.name)
else:
for var in output_spec:
if var.name not in output_vars_dict:
raise ValueError(name_no_exists_error % var.name)
else:
result_list.append(output_vars_dict[var.name])
return result_list
# NOTE(chenweihang): [ Handling of use cases of API paddle.jit.load ]
# `paddle.jit.load` may be used to load saved results of:
# 1. Expected cases:
# - paddle.jit.save
# - paddle.static.save_inference_model
# - paddle.fluid.io.save_inference_model
# 2. Error cases:
# - paddle.save: no .pdmodel for prefix
# - paddle.static.save: no .pdiparams but .pdparams exists
# - paddle.fluid.io.save_params/save_persistables: no __model__
# TODO(chenweihang): polish error message in above error cases
def _build_load_path_and_config(path, config):
# NOTE(chenweihang): If both [prefix save format] and [directory save format] exist,
# raise error, avoid confusing behavior
prefix_format_path = path + INFER_MODEL_SUFFIX
prefix_format_exist = os.path.exists(prefix_format_path)
directory_format_exist = os.path.isdir(path)
if prefix_format_exist and directory_format_exist:
raise ValueError(
"The %s.pdmodel and %s directory exist at the same time, "
"don't know which one to load, please make sure that the specified target "
"of ``path`` is unique." % (path, path))
elif not prefix_format_exist and not directory_format_exist:
raise ValueError("The ``path`` (%s) to load model not exists." % path)
else:
if prefix_format_exist:
file_prefix = os.path.basename(path)
model_path = os.path.dirname(path)
if config.model_filename is not None:
warnings.warn(
"When loading the result saved with the "
"specified file prefix, the ``model_filename`` config does "
"not take effect.")
config.model_filename = file_prefix + INFER_MODEL_SUFFIX
if config.params_filename is not None:
warnings.warn(
"When loading the result saved with the "
"specified file prefix, the ``params_filename`` config does "
"not take effect.")
config.params_filename = file_prefix + INFER_PARAMS_SUFFIX
else:
# Compatible with the old save_inference_model format
model_path = path
return model_path, config
_save_pre_hooks_lock = threading.Lock()
_save_pre_hooks = []
class HookRemoveHelper(object):
""" A HookRemoveHelper that can be used to remove hook. """
def __init__(self, hook):
self._hook = hook
def remove(self):
_remove_save_pre_hook(self._hook)
def _register_save_pre_hook(hook):
"""
Register a save pre-hook for `paddle.jit.save`.
This hook will be executed before `save` function has been invoked.
hook(layer, input_spec, configs) -> None
- layer (Layer|function): This argument is corresponding to `layer` in `paddle.jit.save`.
- input_spec (list or tuple[InputSpec|Tensor|Python built-in variable]): This argument is corresponding to `input_spec` in `paddle.jit.save`.
- configs (dict): This argument is corresponding to `configs` in `paddle.jit.save`.
Args:
hook(function): a function registered as a save pre-hook
Returns:
HookRemoveHelper: a HookRemoveHelper object that can be used to remove the added hook by calling `hook_remove_helper.remove()`.
Examples:
.. code-block:: python
import numpy as np
import paddle
IMAGE_SIZE = 256
CLASS_NUM = 10
class LinearNet(paddle.nn.Layer):
def __init__(self):
super(LinearNet, self).__init__()
self._linear = paddle.nn.Linear(IMAGE_SIZE, CLASS_NUM)
def forward(self, x):
return self._linear(x)
saving_count = 0
def save_pre_hook(layer, input_spec, configs):
global saving_count
saving_count += 1
remove_handler = paddle.jit.register_save_pre_hook(save_pre_hook)
layer = LinearNet()
paddle.jit.save(layer, "/tmp", [paddle.static.InputSpec(shape=[-1, IMAGE_SIZE])])
# saving_count == 1
remove_handler.remove()
paddle.jit.save(layer, "/tmp", [paddle.static.InputSpec(shape=[-1, IMAGE_SIZE])])
# saving_count == 1
"""
global _save_pre_hooks_lock
global _save_pre_hooks
_save_pre_hooks_lock.acquire()
if hook not in _save_pre_hooks:
_save_pre_hooks.append(hook)
_save_pre_hooks_lock.release()
return HookRemoveHelper(hook)
def _clear_save_pre_hooks():
global _save_pre_hooks_lock
global _save_pre_hooks
_save_pre_hooks_lock.acquire()
_save_pre_hooks.clear()
_save_pre_hooks_lock.release()
def _remove_save_pre_hook(hook):
global _save_pre_hooks_lock
global _save_pre_hooks
_save_pre_hooks_lock.acquire()
if hook in _save_pre_hooks:
_save_pre_hooks.remove(hook)
_save_pre_hooks_lock.release()
def _run_save_pre_hooks(func):
def wrapper(layer, path, input_spec=None, **configs):
global _save_pre_hooks
for hook in _save_pre_hooks:
hook(layer, input_spec, configs)
func(layer, path, input_spec, **configs)
return wrapper
@_run_save_pre_hooks
@switch_to_static_graph
def save(layer, path, input_spec=None, **configs):
"""
Saves input Layer or function as ``paddle.jit.TranslatedLayer``
format model, which can be used for inference or fine-tuning after loading.
It will save the translated program and all related persistable
variables of input Layer to given ``path`` .
``path`` is the prefix of saved objects, and the saved translated program file
suffix is ``.pdmodel`` , the saved persistable variables file suffix is ``.pdiparams`` ,
and here also saved some additional variable description information to a file,
its suffix is ``.pdiparams.info``, these additional information is used in fine-tuning.
The saved model can be loaded by follow APIs:
- ``paddle.jit.load``
- ``paddle.static.load_inference_model``
- Other C++ inference APIs
.. note::
When using ``paddle.jit.save`` to save a function, parameters will not be saved. If you have to
save the parameter, please pass the Layer containing function and parameter to ``paddle.jit.save``.
Args:
layer (Layer|function): The Layer or function to be saved.
path (str): The path prefix to save model. The format is ``dirname/file_prefix`` or ``file_prefix``.
input_spec (list or tuple[InputSpec|Tensor|Python built-in variable], optional): Describes the input of the saved model's forward
method, which can be described by InputSpec or example Tensor. Moreover, we support to specify non-tensor type argument,
such as int, float, string, or list/dict of them.If None, all input variables of
the original Layer's forward method would be the inputs of the saved model. Default None.
**configs (dict, optional): Other save configuration options for compatibility. We do not
recommend using these configurations, they may be removed in the future. If not necessary,
DO NOT use them. Default None.
The following options are currently supported:
(1) output_spec (list[Tensor]): Selects the output targets of the saved model.
By default, all return variables of original Layer's forward method are kept as the
output of the saved model. If the provided ``output_spec`` list is not all output variables,
the saved model will be pruned according to the given ``output_spec`` list.
Returns:
None
Examples:
.. code-block:: python
# example 1: save layer
import numpy as np
import paddle
import paddle.nn as nn
import paddle.optimizer as opt
BATCH_SIZE = 16
BATCH_NUM = 4
EPOCH_NUM = 4
IMAGE_SIZE = 784
CLASS_NUM = 10
# define a random dataset
class RandomDataset(paddle.io.Dataset):
def __init__(self, num_samples):
self.num_samples = num_samples
def __getitem__(self, idx):
image = np.random.random([IMAGE_SIZE]).astype('float32')
label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64')
return image, label
def __len__(self):
return self.num_samples
class LinearNet(nn.Layer):
def __init__(self):
super(LinearNet, self).__init__()
self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM)
@paddle.jit.to_static
def forward(self, x):
return self._linear(x)
def train(layer, loader, loss_fn, opt):
for epoch_id in range(EPOCH_NUM):
for batch_id, (image, label) in enumerate(loader()):
out = layer(image)
loss = loss_fn(out, label)
loss.backward()
opt.step()
opt.clear_grad()
print("Epoch {} batch {}: loss = {}".format(
epoch_id, batch_id, np.mean(loss.numpy())))
# 1. train & save model.
# create network
layer = LinearNet()
loss_fn = nn.CrossEntropyLoss()
adam = opt.Adam(learning_rate=0.001, parameters=layer.parameters())
# create data loader
dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)
loader = paddle.io.DataLoader(dataset,
batch_size=BATCH_SIZE,
shuffle=True,
drop_last=True,
num_workers=2)
# train
train(layer, loader, loss_fn, adam)
# save
path = "example_model/linear"
paddle.jit.save(layer, path)
# example 2: save function
import paddle
from paddle.static import InputSpec
def save_function():
@paddle.jit.to_static
def fun(inputs):
return paddle.tanh(inputs)
path = 'test_jit_save_load_function_1/func'
inps = paddle.rand([3, 6])
origin = fun(inps)
paddle.jit.save(fun, path)
load_func = paddle.jit.load(path)
load_result = load_func(inps)
print((load_result - origin).abs().max() < 1e-10)
save_function()
"""
# 1. input build & check
prog_translator = ProgramTranslator()
if not prog_translator.enable_to_static:
raise RuntimeError(
"The paddle.jit.save doesn't work when setting ProgramTranslator.enable to False."
)
if not (isinstance(layer, Layer) or inspect.isfunction(layer) or isinstance(
layer, StaticFunction)):
raise TypeError(
"The input of paddle.jit.save should be 'Layer' or 'Function', but received input type is %s."
% type(layer))
elif inspect.isfunction(layer) or isinstance(layer, StaticFunction):
warnings.warn(
'What you save is a function, and `jit.save` will generate the name of the model file according to `path` you specify. When loading these files with `jit.load`, you get a `TranslatedLayer` whose inference result is the same as the inference result of the function you saved.'
)
# NOTE(chenweihang): If the input layer be wrapped by DataParallel,
# the args and kwargs of forward method will can't be parsed by
# function_spec, so here we save DataParallel._layers instead
# DataParallel it self
# NOTE(chenweihang): using inner_layer, do not change input layer
if isinstance(layer, paddle.DataParallel):
inner_layer = layer._layers
else:
inner_layer = layer
# path check
file_prefix = os.path.basename(path)
if file_prefix == "":
raise ValueError(
"The input path MUST be format of dirname/file_prefix "
"[dirname\\file_prefix in Windows system], but received "
"file_prefix is empty string.")
dirname = os.path.dirname(path)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
# avoid change user given input_spec
inner_input_spec = None
if input_spec is not None:
if isinstance(layer, Layer):
for attr_func in dir(inner_layer):
static_func = getattr(inner_layer, attr_func, None)
if isinstance(static_func,
StaticFunction) and 'forward' != attr_func:
raise ValueError(
"If there are static functions other than 'forward' that need to be saved, the input 'input_spec' should be None, but received the type of 'input_spec' is %s."
% type(input_spec))
if not isinstance(input_spec, (list, tuple)):
raise TypeError(
"The input input_spec should be 'list', but received input_spec's type is %s."
% type(input_spec))
inner_input_spec = []
for var in flatten(input_spec):
if isinstance(var, paddle.static.InputSpec):
inner_input_spec.append(var)
elif isinstance(var, (core.VarBase, core.eager.Tensor, Variable)):
inner_input_spec.append(
paddle.static.InputSpec.from_tensor(var))
else:
# NOTE(Aurelius84): Support non-Tensor type in `input_spec`.
inner_input_spec.append(var)
# parse configs
configs = _parse_save_configs(configs)
scope = core.Scope()
extra_var_info = dict()
if isinstance(layer, Layer):
functions = dir(inner_layer)
else:
# layer is function
functions = [layer, ]
for attr_func in functions:
if isinstance(layer, Layer):
static_func = getattr(inner_layer, attr_func, None)
if isinstance(static_func, StaticFunction):
concrete_program = static_func.concrete_program_specify_input_spec(
inner_input_spec)
elif 'forward' == attr_func:
# transform in jit.save, if input_spec is incomplete, declarative will throw error
# inner_input_spec is list[InputSpec], it should be packed with same structure
# as original input_spec here.
if inner_input_spec:
inner_input_spec = pack_sequence_as(input_spec,
inner_input_spec)
static_forward = declarative(
inner_layer.forward, input_spec=inner_input_spec)
concrete_program = static_forward.concrete_program
# the input_spec has been used in declarative, which is equal to
# @declarative with input_spec and jit.save without input_spec,
# avoid needless warning
inner_input_spec = None
else:
continue
else:
# When layer is a function
if isinstance(attr_func, StaticFunction):
concrete_program = attr_func.concrete_program_specify_input_spec(
inner_input_spec)
else:
if inner_input_spec:
inner_input_spec = pack_sequence_as(input_spec,
inner_input_spec)
static_function = declarative(
attr_func, input_spec=inner_input_spec)
concrete_program = static_function.concrete_program
if static_function._class_instance is None:
warnings.warn(
'`jit.save` will only save the `Program`, not the parameters. If you have to save the parameters, please make sure that {} is a member function of `paddle.nn.Layer` and the saved parameters are in `state_dict`'.
format(layer))
dygraph_state_dict = None
if isinstance(inner_layer, Layer):
dygraph_state_dict = inner_layer.to_static_state_dict()
elif isinstance(attr_func, StaticFunction):
if attr_func._class_instance:
dygraph_state_dict = attr_func._class_instance.to_static_state_dict(
)
if dygraph_state_dict:
# NOTE(chenweihang): we maintain the mapping of variable name to
# structured name, the buffer variable (non-persistable)
# saved to inference program may not need by dygraph Layer,
# we only record the state_dict variable's structured name
state_names_dict = dict()
state_var_dict = dict()
for structured_name, var in six.iteritems(dygraph_state_dict):
state_names_dict[var.name] = structured_name
state_var_dict[var.name] = var
# 3. share parameters from Layer to scope & record var info
with dygraph.guard():
for param_or_buffer in concrete_program.parameters:
# share to scope
if param_or_buffer.type == core.VarDesc.VarType.VOCAB:
scr_tensor = param_or_buffer.value().get_map_tensor()
tgt_var = scope.var(param_or_buffer.name)
tgt_var.set_vocab(scr_tensor)
else:
param_or_buffer_tensor = scope.var(
param_or_buffer.name).get_tensor()
#src_tensor = param_or_buffer.value().get_tensor()
src_tensor = state_var_dict[param_or_buffer.name].value(
).get_tensor()
param_or_buffer_tensor._share_data_with(src_tensor)
# record var info
if param_or_buffer.name not in extra_var_info:
extra_info_dict = dict()
if param_or_buffer.name in state_names_dict:
extra_info_dict[
'structured_name'] = state_names_dict[
param_or_buffer.name]
extra_info_dict[
'stop_gradient'] = param_or_buffer.stop_gradient
if isinstance(param_or_buffer,
(ParamBase, EagerParamBase)):
extra_info_dict[
'trainable'] = param_or_buffer.trainable
extra_var_info[param_or_buffer.name] = extra_info_dict
# 4. build input & output of save_infernece_model
# NOTE(chenweihang): [ Get input variables name ]
# There are two cases, whether to prune the inputs or not
# - not prune inputs (recommend):
# - the len(input_spec) == len((concrete_program.inputs) - 1
# - here can use concrete_program.inputs directly
# - prune inputs:
# - the input_spec length < len((concrete_program.inputs) - 1
# - the input_spec's name should be in concrete_program.inputs
input_var_names = _get_input_var_names(concrete_program.inputs,
inner_input_spec)
# NOTE(chenweihang): [ Get output variables ]
# the rule is like [ Get input variables name ]. For output var,
# we only support VarBase spec, and actually, we only need the
# var name of output, and we don't recommended to use output_spec
output_vars = _get_output_vars(concrete_program.outputs,
configs.output_spec)
# 5. save inference model
from paddle.fluid.io import save_inference_model
# construct new save_inference_model arguments
model_path = dirname
# NOTE(chenweihang): because prefix contains model and params filename,
# so we don't support set model_filename & params_filename
if 'forward' == attr_func or not isinstance(layer, Layer):
model_filename = file_prefix + INFER_MODEL_SUFFIX
params_filename = file_prefix + INFER_PARAMS_SUFFIX
else:
model_filename = file_prefix + '.' + attr_func + INFER_MODEL_SUFFIX
params_filename = file_prefix + '.' + attr_func + INFER_PARAMS_SUFFIX
with scope_guard(scope):
save_inference_model(
dirname=model_path,
feeded_var_names=input_var_names,
target_vars=output_vars,
executor=Executor(_current_expected_place()),
main_program=concrete_program.main_program.clone(),
model_filename=model_filename,
params_filename=params_filename,
export_for_deployment=configs._export_for_deployment,
program_only=configs._program_only,
clip_extra=False)
# NOTE(chenweihang): [ Save extra variable info ]
# save_inference_model will lose some important variable information, including:
# - Variable name and correspondence (when saved variables as one file)
# - Variable.stop_gradient information
# - Which persistent variable are parameter and which are not
# - Parameter.trainable information
#
# The lost information cannot be recovered when it is loaded again,
# so if we want to perform fine-tune after loading, we may need to
# configure redundant information to proceed.
#
# Due to compatibility issues, we cannot change the original storage structure,
# but we can save these information in `jit.save` without changing the original
# storage to improve user experience. So we save extra information into
# file `***.pdiparams.info`
# "layer" can only be Layer or function or StaticFunction.
contain_parameter = False
for var in concrete_program.main_program.list_vars():
contain_parameter |= isinstance(var, Parameter)
if (isinstance(layer, Layer) or contain_parameter) and extra_var_info:
with scope_guard(scope):
extra_var_info_path = path + INFER_PARAMS_INFO_SUFFIX
with open(extra_var_info_path, 'wb') as f:
pickle.dump(extra_var_info, f, protocol=2)
@dygraph_only
def load(path, **configs):
"""
:api_attr: imperative
Load model saved by ``paddle.jit.save`` or ``paddle.static.save_inference_model`` or
paddle 1.x API ``paddle.fluid.io.save_inference_model`` as ``paddle.jit.TranslatedLayer``,
then performing inference or fine-tune training.
.. note::
If you load model saved by ``paddle.static.save_inference_model`` ,
there will be the following limitations when using it in fine-tuning:
1. Imperative mode do not support LoDTensor. All original model's feed targets or parametars that depend on LoD are temporarily unavailable.
2. All saved model's feed targets need to be passed into TranslatedLayer's forward function.
3. The variable's ``stop_gradient`` information is lost and can not be recovered.
4. The parameter's ``trainable`` information is lost and can not be recovered.
Args:
path (str): The path prefix to load model. The format is ``dirname/file_prefix`` or ``file_prefix`` .
**configs (dict, optional): Other load configuration options for compatibility. We do not
recommend using these configurations, they may be removed in the future. If not necessary,
DO NOT use them. Default None.
The following options are currently supported:
(1) model_filename (str): The inference model file name of the paddle 1.x
``save_inference_model`` save format. Default file name is :code:`__model__` .
(2) params_filename (str): The persistable variables file name of the paddle 1.x
``save_inference_model`` save format. No default file name, save variables separately
by default.
Returns:
TranslatedLayer: A Layer object can run saved translated model.
Examples:
1. Load model saved by ``paddle.jit.save`` then performing inference and fine-tune training.
.. code-block:: python
import numpy as np
import paddle
import paddle.nn as nn
import paddle.optimizer as opt
BATCH_SIZE = 16
BATCH_NUM = 4
EPOCH_NUM = 4
IMAGE_SIZE = 784
CLASS_NUM = 10
# define a random dataset
class RandomDataset(paddle.io.Dataset):
def __init__(self, num_samples):
self.num_samples = num_samples
def __getitem__(self, idx):
image = np.random.random([IMAGE_SIZE]).astype('float32')
label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64')
return image, label
def __len__(self):
return self.num_samples
class LinearNet(nn.Layer):
def __init__(self):
super(LinearNet, self).__init__()
self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM)
@paddle.jit.to_static
def forward(self, x):
return self._linear(x)
def train(layer, loader, loss_fn, opt):
for epoch_id in range(EPOCH_NUM):
for batch_id, (image, label) in enumerate(loader()):
out = layer(image)
loss = loss_fn(out, label)
loss.backward()
opt.step()
opt.clear_grad()
print("Epoch {} batch {}: loss = {}".format(
epoch_id, batch_id, np.mean(loss.numpy())))
# 1. train & save model.
# create network
layer = LinearNet()
loss_fn = nn.CrossEntropyLoss()
adam = opt.Adam(learning_rate=0.001, parameters=layer.parameters())
# create data loader
dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)
loader = paddle.io.DataLoader(dataset,
batch_size=BATCH_SIZE,
shuffle=True,
drop_last=True,
num_workers=2)
# train
train(layer, loader, loss_fn, adam)
# save
path = "example_model/linear"
paddle.jit.save(layer, path)
# 2. load model
# load
loaded_layer = paddle.jit.load(path)
# inference
loaded_layer.eval()
x = paddle.randn([1, IMAGE_SIZE], 'float32')
pred = loaded_layer(x)
# fine-tune
loaded_layer.train()
adam = opt.Adam(learning_rate=0.001, parameters=loaded_layer.parameters())
train(loaded_layer, loader, loss_fn, adam)
2. Load model saved by ``paddle.fluid.io.save_inference_model`` then performing and fine-tune training.
.. code-block:: python
import numpy as np
import paddle
import paddle.static as static
import paddle.nn as nn
import paddle.optimizer as opt
import paddle.nn.functional as F
BATCH_SIZE = 16
BATCH_NUM = 4
EPOCH_NUM = 4
IMAGE_SIZE = 784
CLASS_NUM = 10
# define a random dataset
class RandomDataset(paddle.io.Dataset):
def __init__(self, num_samples):
self.num_samples = num_samples
def __getitem__(self, idx):
image = np.random.random([IMAGE_SIZE]).astype('float32')
label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64')
return image, label
def __len__(self):
return self.num_samples
paddle.enable_static()
image = static.data(name='image', shape=[None, 784], dtype='float32')
label = static.data(name='label', shape=[None, 1], dtype='int64')
pred = static.nn.fc(x=image, size=10, activation='softmax')
loss = F.cross_entropy(input=pred, label=label)
avg_loss = paddle.mean(loss)
optimizer = paddle.optimizer.SGD(learning_rate=0.001)
optimizer.minimize(avg_loss)
place = paddle.CPUPlace()
exe = static.Executor(place)
exe.run(static.default_startup_program())
# create data loader
dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)
loader = paddle.io.DataLoader(dataset,
feed_list=[image, label],
places=place,
batch_size=BATCH_SIZE,
shuffle=True,
drop_last=True,
return_list=False,
num_workers=2)
# 1. train and save inference model
for data in loader():
exe.run(
static.default_main_program(),
feed=data,
fetch_list=[avg_loss])
model_path = "fc.example.model"
paddle.fluid.io.save_inference_model(
model_path, ["image"], [pred], exe)
# 2. load model
# enable dygraph mode
paddle.disable_static(place)
# load
fc = paddle.jit.load(model_path)
# inference
fc.eval()
x = paddle.randn([1, IMAGE_SIZE], 'float32')
pred = fc(x)
# fine-tune
fc.train()
loss_fn = nn.CrossEntropyLoss()
adam = opt.Adam(learning_rate=0.001, parameters=fc.parameters())
loader = paddle.io.DataLoader(dataset,
places=place,
batch_size=BATCH_SIZE,
shuffle=True,
drop_last=True,
num_workers=2)
for epoch_id in range(EPOCH_NUM):
for batch_id, (image, label) in enumerate(loader()):
out = fc(image)
loss = loss_fn(out, label)
loss.backward()
adam.step()
adam.clear_grad()
print("Epoch {} batch {}: loss = {}".format(
epoch_id, batch_id, np.mean(loss.numpy())))
"""
# 1. construct correct config
config = _parse_load_config(configs)
model_path, config = _build_load_path_and_config(path, config)
return TranslatedLayer._construct(model_path, config)
@dygraph_only
def _trace(layer,
inputs,
feed_prefix='feed_',
fetch_prefix='fetch_',
tmp_prefix='t_'):
assert isinstance(layer, Layer)
if not isinstance(inputs, (list, tuple)):
inputs = [inputs]
tracer = _dygraph_tracer()._get_program_desc_tracer()
var_list = extract_vars(inputs)
with program_desc_tracing_guard(True):
original_outputs = layer(*inputs)
if not isinstance(original_outputs, (list, tuple)):
outputs = [original_outputs]
else:
outputs = original_outputs
out_vars = extract_vars(outputs, err_tag='outputs')
program_desc, feed_names, fetch_names, parameters = tracer.create_program_desc(
var_list, feed_prefix, out_vars, fetch_prefix, tmp_prefix)
tracer.reset()
with _dygraph_guard(None):
program = create_program_from_desc(program_desc)
return original_outputs, program, feed_names, fetch_names, parameters
class TracedLayer(object):
"""
:api_attr: imperative
TracedLayer is used to convert a forward dygraph model to a static
graph model. This is mainly used to save the dygraph model for online
inference using C++. Besides, users can also do inference in Python
using the converted static graph model, which usually has better
performance than the original dygraph model.
TracedLayer would run the static graph model using :code:`Executor`
and :code:`CompiledProgram` . The static graph model would share
parameters with the dygraph model.
All TracedLayer objects should not be created by constructor and should
be created by static method :code:`TracedLayer.trace(layer, inputs)` .
The TracedLayer can only be used to convert the data-independent dygraph
model into the static graph model, which means the dygraph model should
be independent with the tensor data and shape.
"""
def __init__(self, program, parameters, feed_names, fetch_names):
self._program = program
self._feed_names = feed_names
self._fetch_names = fetch_names
self._params = parameters
self._place = _current_expected_place()
self._scope = core.Scope()
for p in parameters:
src_tensor = p.value().get_tensor()
dst_tensor = self._scope.var(p.name).get_tensor()
dst_tensor._share_data_with(src_tensor)
self._exe = Executor(self._place)
self._compiled_program = None
self._build_strategy = None
self._exec_strategy = None
@property
def program(self):
return self._program
def _switch(self, is_test=True):
for block_id in range(self._program.num_blocks):
block = self._program.block(block_id)
for op in block.ops:
if op.has_attr("is_test"):
op._set_attr("is_test", is_test)
@staticmethod
@dygraph_only
def trace(layer, inputs):
"""
This method is the only allowed method to create TracedLayer object.
It would call the :code:`layer(*inputs)` method to run the dygraph
model and convert it into a static graph model.
Args:
layer (paddle.nn.Layer): the layer object to be traced.
inputs (list(Tensor)|tuple(Tensor)|Tensor): the input tensors of
the layer object.
Returns:
tuple: A tuple of 2 items, whose the first item is the output of
:code:`layer(*inputs)` , and the second item is the created
TracedLayer object.
Examples:
.. code-block:: python:
import paddle
class ExampleLayer(paddle.nn.Layer):
def __init__(self):
super(ExampleLayer, self).__init__()
self._fc = paddle.nn.Linear(3, 10)
def forward(self, input):
return self._fc(input)
layer = ExampleLayer()
in_var = paddle.uniform(shape=[2, 3], dtype='float32')
out_dygraph, static_layer = paddle.jit.TracedLayer.trace(layer, inputs=[in_var])
# run the static graph model using Executor inside
out_static_graph = static_layer([in_var])
print(len(out_static_graph)) # 1
print(out_static_graph[0].shape) # (2, 10)
# save the static graph model for inference
static_layer.save_inference_model(dirname='./saved_infer_model')
"""
assert isinstance(
layer, Layer
), "The type of 'layer' in fluid.dygraph.jit.TracedLayer.trace must be fluid.dygraph.Layer, but received {}.".format(
type(layer))
outs, prog, feed, fetch, parameters = _trace(layer, inputs)
traced = TracedLayer(prog, parameters, feed, fetch)
return outs, traced
def set_strategy(self, build_strategy=None, exec_strategy=None):
"""
Set the strategies when running static graph model.
Args:
build_strategy (BuildStrategy, optional): build strategy of
:code:`CompiledProgram` inside TracedLayer. Default None.
exec_strategy (ExecutionStrategy, optional): execution strategy of
:code:`CompiledProgram` inside TracedLayer. Default None.
Returns:
None
Examples:
.. code-block:: python:
import paddle
class ExampleLayer(paddle.nn.Layer):
def __init__(self):
super(ExampleLayer, self).__init__()
self._fc = paddle.nn.Linear(3, 10)
def forward(self, input):
return self._fc(input)
layer = ExampleLayer()
in_var = paddle.uniform(shape=[2, 3], dtype='float32')
out_dygraph, static_layer = paddle.jit.TracedLayer.trace(layer, inputs=[in_var])
build_strategy = paddle.static.BuildStrategy()
build_strategy.enable_inplace = True
exec_strategy = paddle.static.ExecutionStrategy()
exec_strategy.num_threads = 2
static_layer.set_strategy(build_strategy=build_strategy, exec_strategy=exec_strategy)
out_static_graph = static_layer([in_var])
"""
assert self._compiled_program is None, "Cannot set strategy after run"
assert isinstance(
build_strategy, (type(None), BuildStrategy)
), "The type of 'build_strategy' in fluid.dygraph.jit.TracedLayer.set_strategy must be fluid.BuildStrategy, but received {}.".format(
type(build_strategy))
assert isinstance(
exec_strategy, (type(None), ExecutionStrategy)
), "The type of 'exec_strategy' in fluid.dygraph.jit.TracedLayer.set_strategy must be fluid.ExecutionStrategy, but received {}.".format(
type(exec_strategy))
self._build_strategy = build_strategy
self._exec_strategy = exec_strategy
@switch_to_static_graph
def _compile(self):
self._compiled_program = CompiledProgram(
self._program).with_data_parallel(
build_strategy=self._build_strategy,
exec_strategy=self._exec_strategy,
places=self._place)
def _build_feed(self, inputs):
assert isinstance(inputs, (list, tuple)), \
"Inputs should be a list or tuple of variables"
assert len(inputs) == len(self._feed_names)
feed_dict = {}
if _non_static_mode():
for x, name in zip(inputs, self._feed_names):
feed_dict[name] = x.value().get_tensor()
else:
for x, name in zip(inputs, self._feed_names):
feed_dict[name] = x
return feed_dict
@switch_to_static_graph
def _run(self, feed):
return self._exe.run(self._compiled_program,
feed=feed,
fetch_list=self._fetch_names)
def __call__(self, inputs):
with scope_guard(self._scope):
if self._compiled_program is None:
self._compile()
return self._run(self._build_feed(inputs))
@switch_to_static_graph
def save_inference_model(self, path, feed=None, fetch=None, **kwargs):
"""
Save the TracedLayer to a model for inference. The saved
inference model can be loaded by C++ inference APIs.
``path`` is the prefix of saved objects, and the saved translated program file
suffix is ``.pdmodel`` , the saved persistable variables file suffix is ``.pdiparams`` .
Args:
path(str): The path prefix to save model. The format is ``dirname/file_prefix`` or ``file_prefix``.
feed (list[int], optional): the input variable indices of the saved
inference model. If None, all input variables of the
TracedLayer object would be the inputs of the saved inference
model. Default None.
fetch (list[int], optional): the output variable indices of the
saved inference model. If None, all output variables of the
TracedLayer object would be the outputs of the saved inference
model. Default None.
kwargs: Supported keys including 'clip_extra'.set to True if you want to clip extra information for every operator.
Returns:
None
Examples:
.. code-block:: python:
import numpy as np
import paddle
class ExampleLayer(paddle.nn.Layer):
def __init__(self):
super(ExampleLayer, self).__init__()
self._fc = paddle.nn.Linear(3, 10)
def forward(self, input):
return self._fc(input)
save_dirname = './saved_infer_model'
in_np = np.random.random([2, 3]).astype('float32')
in_var = paddle.to_tensor(in_np)
layer = ExampleLayer()
out_dygraph, static_layer = paddle.jit.TracedLayer.trace(layer, inputs=[in_var])
static_layer.save_inference_model(save_dirname, feed=[0], fetch=[0])
paddle.enable_static()
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
program, feed_vars, fetch_vars = paddle.static.load_inference_model(save_dirname,
exe)
fetch, = exe.run(program, feed={feed_vars[0]: in_np}, fetch_list=fetch_vars)
print(fetch.shape) # (2, 10)
"""
check_type(path, "path", str,
"fluid.dygraph.jit.TracedLayer.save_inference_model")
check_type(feed, "feed", (type(None), list),
"fluid.dygraph.jit.TracedLayer.save_inference_model")
if isinstance(feed, list):
for f in feed:
check_type(f, "each element of feed", int,
"fluid.dygraph.jit.TracedLayer.save_inference_model")
check_type(fetch, "fetch", (type(None), list),
"fluid.dygraph.jit.TracedLayer.save_inference_model")
if isinstance(fetch, list):
for f in fetch:
check_type(f, "each element of fetch", int,
"fluid.dygraph.jit.TracedLayer.save_inference_model")
clip_extra = kwargs.get('clip_extra', False)
# path check
file_prefix = os.path.basename(path)
if file_prefix == "":
raise ValueError(
"The input path MUST be format of dirname/file_prefix "
"[dirname\\file_prefix in Windows system], but received "
"file_prefix is empty string.")
dirname = os.path.dirname(path)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
from paddle.fluid.io import save_inference_model
def get_feed_fetch(all_vars, partial_vars):
if partial_vars is None:
return all_vars
return [all_vars[idx] for idx in partial_vars]
with scope_guard(self._scope):
feeded_var_names = get_feed_fetch(self._feed_names, feed)
target_var_names = get_feed_fetch(self._fetch_names, fetch)
target_vars = []
for name in target_var_names:
target_var = self._program.global_block().vars.get(name, None)
assert target_var is not None, "{} cannot be found".format(name)
target_vars.append(target_var)
model_filename = file_prefix + INFER_MODEL_SUFFIX
params_filename = file_prefix + INFER_PARAMS_SUFFIX
save_inference_model(
dirname=dirname,
feeded_var_names=feeded_var_names,
target_vars=target_vars,
executor=self._exe,
main_program=self._program.clone(),
model_filename=model_filename,
params_filename=params_filename,
clip_extra=clip_extra)
| 39.162214 | 287 | 0.610562 |
7957b345a1812f4546f8f2a3f38ef8ba6a39fed3 | 636 | py | Python | read_excel_file.py | thiru15/Python-1 | f276f34a77579e552ca2adb3b5a3a1b0f3ebddee | [
"MIT"
] | 12 | 2019-12-27T07:32:35.000Z | 2022-02-20T20:15:08.000Z | read_excel_file.py | DiasNikita/Python | f276f34a77579e552ca2adb3b5a3a1b0f3ebddee | [
"MIT"
] | 1 | 2018-11-15T01:54:25.000Z | 2018-11-15T01:54:25.000Z | read_excel_file.py | DiasNikita/Python | f276f34a77579e552ca2adb3b5a3a1b0f3ebddee | [
"MIT"
] | 22 | 2019-10-06T20:30:25.000Z | 2022-01-11T16:31:14.000Z | # extract number of rows using Python
import xlrd
# Give the location of the file
loc = ("sample.xlsx")
wb = xlrd.open_workbook(loc)
sheet = wb.sheet_by_index(0)
sheet.cell_value(0, 0)
# Extracting number of rows
print(sheet.nrows)
# extract number of columns in Python
print(sheet.ncols)
# extracting all columns name in Python
for i in range(sheet.ncols):
print(sheet.cell_value(0, i))
#extracting first column
sheet = wb.sheet_by_index(0)
for i in range(sheet.nrows):
print(sheet.cell_value(i, 0))
#extract a particular row value
sheet = wb.sheet_by_index(0)
print(sheet.row_values(1))
| 20.516129 | 40 | 0.707547 |
7957b4ddf735d8b06f12abc6447e9ab2937b2e87 | 7,297 | py | Python | main.py | gcarq/keras-timeseries-prediction | 5d58d174bc1386c5fd60b01de0b9d6c4998a7242 | [
"MIT"
] | 116 | 2017-02-25T15:33:45.000Z | 2022-03-28T14:06:11.000Z | main.py | Kushal334/keras-timeseries-prediction | 8ab377b6ad6a60d469a4da8430951df7f7230e9e | [
"MIT"
] | 2 | 2017-10-11T21:07:53.000Z | 2020-06-27T17:41:25.000Z | main.py | Kushal334/keras-timeseries-prediction | 8ab377b6ad6a60d469a4da8430951df7f7230e9e | [
"MIT"
] | 61 | 2017-01-21T16:08:49.000Z | 2021-01-06T18:49:35.000Z | import numpy
import pandas
import matplotlib.pyplot as plt
from keras.layers import Dense, LSTM
from keras.models import Sequential
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import MinMaxScaler
from tqdm import trange
# fix random seed for reproducibility
numpy.random.seed(7)
def load_dataset(datasource: str) -> (numpy.ndarray, MinMaxScaler):
"""
The function loads dataset from given file name and uses MinMaxScaler to transform data
:param datasource: file name of data source
:return: tuple of dataset and the used MinMaxScaler
"""
# load the dataset
dataframe = pandas.read_csv(datasource, usecols=[1])
dataframe = dataframe.fillna(method='pad')
dataset = dataframe.values
dataset = dataset.astype('float32')
plt.plot(dataset)
plt.show()
# normalize the dataset
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(dataset)
return dataset, scaler
def create_dataset(dataset: numpy.ndarray, look_back: int=1) -> (numpy.ndarray, numpy.ndarray):
"""
The function takes two arguments: the `dataset`, which is a NumPy array that we want to convert into a dataset,
and the `look_back`, which is the number of previous time steps to use as input variables
to predict the next time period — in this case defaulted to 1.
:param dataset: numpy dataset
:param look_back: number of previous time steps as int
:return: tuple of input and output dataset
"""
data_x, data_y = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back), 0]
data_x.append(a)
data_y.append(dataset[i + look_back, 0])
return numpy.array(data_x), numpy.array(data_y)
def split_dataset(dataset: numpy.ndarray, train_size, look_back) -> (numpy.ndarray, numpy.ndarray):
"""
Splits dataset into training and test datasets. The last `look_back` rows in train dataset
will be used as `look_back` for the test dataset.
:param dataset: source dataset
:param train_size: specifies the train data size
:param look_back: number of previous time steps as int
:return: tuple of training data and test dataset
"""
if not train_size > look_back:
raise ValueError('train_size must be lager than look_back')
train, test = dataset[0:train_size, :], dataset[train_size - look_back:len(dataset), :]
print('train_dataset: {}, test_dataset: {}'.format(len(train), len(test)))
return train, test
def build_model(look_back: int, batch_size: int=1) -> Sequential:
"""
The function builds a keras Sequential model
:param look_back: number of previous time steps as int
:param batch_size: batch_size as int, defaults to 1
:return: keras Sequential model
"""
model = Sequential()
model.add(LSTM(64,
activation='relu',
batch_input_shape=(batch_size, look_back, 1),
stateful=True,
return_sequences=False))
model.add(Dense(1, activation='linear'))
model.compile(loss='mean_squared_error', optimizer='adam')
return model
def plot_data(dataset: numpy.ndarray,
look_back: int,
train_predict: numpy.ndarray,
test_predict: numpy.ndarray,
forecast_predict: numpy.ndarray):
"""
Plots baseline and predictions.
blue: baseline
green: prediction with training data
red: prediction with test data
cyan: prediction based on predictions
:param dataset: dataset used for predictions
:param look_back: number of previous time steps as int
:param train_predict: predicted values based on training data
:param test_predict: predicted values based on test data
:param forecast_predict: predicted values based on previous predictions
:return: None
"""
plt.plot(dataset)
plt.plot([None for _ in range(look_back)] +
[x for x in train_predict])
plt.plot([None for _ in range(look_back)] +
[None for _ in train_predict] +
[x for x in test_predict])
plt.plot([None for _ in range(look_back)] +
[None for _ in train_predict] +
[None for _ in test_predict] +
[x for x in forecast_predict])
plt.show()
def make_forecast(model: Sequential, look_back_buffer: numpy.ndarray, timesteps: int=1, batch_size: int=1):
forecast_predict = numpy.empty((0, 1), dtype=numpy.float32)
for _ in trange(timesteps, desc='predicting data\t', mininterval=1.0):
# make prediction with current lookback buffer
cur_predict = model.predict(look_back_buffer, batch_size)
# add prediction to result
forecast_predict = numpy.concatenate([forecast_predict, cur_predict], axis=0)
# add new axis to prediction to make it suitable as input
cur_predict = numpy.reshape(cur_predict, (cur_predict.shape[1], cur_predict.shape[0], 1))
# remove oldest prediction from buffer
look_back_buffer = numpy.delete(look_back_buffer, 0, axis=1)
# concat buffer with newest prediction
look_back_buffer = numpy.concatenate([look_back_buffer, cur_predict], axis=1)
return forecast_predict
def main():
datasource = 'international-airline-passengers.csv'
dataset, scaler = load_dataset(datasource)
# split into train and test sets
look_back = int(len(dataset) * 0.20)
train_size = int(len(dataset) * 0.70)
train, test = split_dataset(dataset, train_size, look_back)
# reshape into X=t and Y=t+1
train_x, train_y = create_dataset(train, look_back)
test_x, test_y = create_dataset(test, look_back)
# reshape input to be [samples, time steps, features]
train_x = numpy.reshape(train_x, (train_x.shape[0], train_x.shape[1], 1))
test_x = numpy.reshape(test_x, (test_x.shape[0], test_x.shape[1], 1))
# create and fit Multilayer Perceptron model
batch_size = 1
model = build_model(look_back, batch_size=batch_size)
for _ in trange(100, desc='fitting model\t', mininterval=1.0):
model.fit(train_x, train_y, nb_epoch=1, batch_size=batch_size, verbose=0, shuffle=False)
model.reset_states()
# generate predictions for training
train_predict = model.predict(train_x, batch_size)
test_predict = model.predict(test_x, batch_size)
# generate forecast predictions
forecast_predict = make_forecast(model, test_x[-1::], timesteps=100, batch_size=batch_size)
# invert dataset and predictions
dataset = scaler.inverse_transform(dataset)
train_predict = scaler.inverse_transform(train_predict)
train_y = scaler.inverse_transform([train_y])
test_predict = scaler.inverse_transform(test_predict)
test_y = scaler.inverse_transform([test_y])
forecast_predict = scaler.inverse_transform(forecast_predict)
# calculate root mean squared error
train_score = numpy.sqrt(mean_squared_error(train_y[0], train_predict[:, 0]))
print('Train Score: %.2f RMSE' % train_score)
test_score = numpy.sqrt(mean_squared_error(test_y[0], test_predict[:, 0]))
print('Test Score: %.2f RMSE' % test_score)
plot_data(dataset, look_back, train_predict, test_predict, forecast_predict)
if __name__ == '__main__':
main()
| 38.81383 | 115 | 0.695765 |
7957b8be9778cfbdb1cae784c737a9b6d8b8ed51 | 5,905 | py | Python | app.py | vibhor-voicebot/merastore-2fa-enabled | 9ca3a4310d6c1ecb44169bc66dced3cbfd2a15c6 | [
"MIT"
] | null | null | null | app.py | vibhor-voicebot/merastore-2fa-enabled | 9ca3a4310d6c1ecb44169bc66dced3cbfd2a15c6 | [
"MIT"
] | null | null | null | app.py | vibhor-voicebot/merastore-2fa-enabled | 9ca3a4310d6c1ecb44169bc66dced3cbfd2a15c6 | [
"MIT"
] | null | null | null | import os
import base64
from io import BytesIO
from flask import Flask, render_template, redirect, url_for, flash, session, \
abort
from werkzeug.security import generate_password_hash, check_password_hash
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager, UserMixin, login_user, logout_user, \
current_user
from flask_bootstrap import Bootstrap
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField
from wtforms.validators import Required, Length, EqualTo
import onetimepass
import pyqrcode
# create application instance
app = Flask(__name__)
app.config.from_object('config')
# initialize extensions
bootstrap = Bootstrap(app)
db = SQLAlchemy(app)
lm = LoginManager(app)
class User(UserMixin, db.Model):
"""User model."""
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), index=True)
password_hash = db.Column(db.String(128))
otp_secret = db.Column(db.String(16))
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
if self.otp_secret is None:
# generate a random secret
self.otp_secret = base64.b32encode(os.urandom(10)).decode('utf-8')
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def get_totp_uri(self):
return 'otpauth://totp/2FA-Demo:{0}?secret={1}&issuer=2FA-Demo' \
.format(self.username, self.otp_secret)
def verify_totp(self, token):
return onetimepass.valid_totp(token, self.otp_secret)
@lm.user_loader
def load_user(user_id):
"""User loader callback for Flask-Login."""
return User.query.get(int(user_id))
class RegisterForm(FlaskForm):
"""Registration form."""
username = StringField('Username', validators=[Required(), Length(1, 64)])
password = PasswordField('Password', validators=[Required()])
password_again = PasswordField('Password again',
validators=[Required(), EqualTo('password')])
submit = SubmitField('Register')
class LoginForm(FlaskForm):
"""Login form."""
username = StringField('Username', validators=[Required(), Length(1, 64)])
password = PasswordField('Password', validators=[Required()])
token = StringField('Token', validators=[Required(), Length(6, 6)])
submit = SubmitField('Login')
@app.route('/')
def index():
return render_template('index.html')
@app.route('/index_redirect')
def index_redirect():
return render_template('index_redirect.html')
@app.route('/register', methods=['GET', 'POST'])
def register():
"""User registration route."""
if current_user.is_authenticated:
# if user is logged in we get out of here
return redirect(url_for('index'))
form = RegisterForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user is not None:
flash('Username already exists.')
return redirect(url_for('register'))
# add new user to the database
user = User(username=form.username.data, password=form.password.data)
db.session.add(user)
db.session.commit()
# redirect to the two-factor auth page, passing username in session
session['username'] = user.username
return redirect(url_for('two_factor_setup'))
return render_template('register.html', form=form)
@app.route('/twofactor')
def two_factor_setup():
if 'username' not in session:
return redirect(url_for('index'))
user = User.query.filter_by(username=session['username']).first()
if user is None:
return redirect(url_for('index'))
# since this page contains the sensitive qrcode, make sure the browser
# does not cache it
return render_template('two-factor-setup.html'), 200, {
'Cache-Control': 'no-cache, no-store, must-revalidate',
'Pragma': 'no-cache',
'Expires': '0'}
@app.route('/qrcode')
def qrcode():
if 'username' not in session:
abort(404)
user = User.query.filter_by(username=session['username']).first()
if user is None:
abort(404)
# for added security, remove username from session
del session['username']
# render qrcode for FreeTOTP
url = pyqrcode.create(user.get_totp_uri())
stream = BytesIO()
url.svg(stream, scale=3)
return stream.getvalue(), 200, {
'Content-Type': 'image/svg+xml',
'Cache-Control': 'no-cache, no-store, must-revalidate',
'Pragma': 'no-cache',
'Expires': '0'}
@app.route('/login', methods=['GET', 'POST'])
def login():
"""User login route."""
if current_user.is_authenticated:
# if user is logged in we get out of here
return redirect(url_for('index'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user is None or not user.verify_password(form.password.data) or \
not user.verify_totp(form.token.data):
flash('Invalid username, password or token.')
return redirect(url_for('login'))
# log user in
login_user(user)
flash('You are now logged in!')
return redirect(url_for('index_redirect'))
return render_template('login.html', form=form)
@app.route('/logout')
def logout():
"""User logout route."""
logout_user()
return redirect(url_for('index'))
# create database tables if they don't exist yet
db.create_all()
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
| 31.57754 | 80 | 0.669094 |
7957b90901283100f232d844e6417a16c3fd4d36 | 37,927 | py | Python | icepyx/core/query.py | whyjz/icepyx | 0a0984b5ef6013734d0851bc9fe545c3eb37fbf8 | [
"BSD-3-Clause"
] | null | null | null | icepyx/core/query.py | whyjz/icepyx | 0a0984b5ef6013734d0851bc9fe545c3eb37fbf8 | [
"BSD-3-Clause"
] | 2 | 2020-06-18T17:05:17.000Z | 2020-06-18T17:55:46.000Z | icepyx/core/query.py | yochannah/icepyx | fe57c41a2775b663c19a2ab59a6ebb644caf4a3b | [
"BSD-3-Clause"
] | null | null | null | import datetime as dt
import os
import requests
import json
import warnings
import pprint
import time
import geopandas as gpd
import matplotlib.pyplot as plt
from icepyx.core.Earthdata import Earthdata
import icepyx.core.APIformatting as apifmt
import icepyx.core.is2ref as is2ref
import icepyx.core.granules as granules
from icepyx.core.granules import Granules as Granules
# QUESTION: why doesn't from granules import Granules as Granules work, since granules=icepyx.core.granules?
# from icepyx.core.granules import Granules
from icepyx.core.variables import Variables as Variables
import icepyx.core.geospatial as geospatial
import icepyx.core.validate_inputs as val
from icepyx.core.visualization import Visualize
# DevGoal: update docs throughout to allow for polygon spatial extent
# Note: add files to docstring once implemented
# DevNote: currently this class is not tested
class Query:
"""
ICESat-2 Data object to query, obtain, and perform basic operations on
available ICESat-2 datasets using temporal and spatial input parameters.
Allows the easy input and formatting of search parameters to match the
NASA NSIDC DAAC and (development goal-not yet implemented) conversion to multiple data types.
Parameters
----------
dataset : string
ICESat-2 dataset ID, also known as "short name" (e.g. ATL03).
Available datasets can be found at: https://nsidc.org/data/icesat-2/data-sets
spatial_extent : list or string
Spatial extent of interest, provided as a bounding box, list of polygon coordinates, or
geospatial polygon file.
Bounding box coordinates should be provided in decimal degrees as
[lower-left-longitude, lower-left-latitute, upper-right-longitude, upper-right-latitude].
Polygon coordinates should be provided as coordinate pairs in decimal degrees as
[(longitude1, latitude1), (longitude2, latitude2), ... (longitude_n,latitude_n), (longitude1,latitude1)]
or
[longitude1, latitude1, longitude2, latitude2, ... longitude_n,latitude_n, longitude1,latitude1].
Your list must contain at least four points, where the first and last are identical.
DevGoal: adapt code so the polygon is automatically closed if need be
Geospatial polygon files are entered as strings with the full file path and
must contain only one polygon with the area of interest.
Currently supported formats are: kml, shp, and gpkg
date_range : list of 'YYYY-MM-DD' strings
Date range of interest, provided as start and end dates, inclusive.
The required date format is 'YYYY-MM-DD' strings, where
YYYY = 4 digit year, MM = 2 digit month, DD = 2 digit day.
Currently, a list of specific dates (rather than a range) is not accepted.
DevGoal: accept date-time objects, dicts (with 'start_date' and 'end_date' keys, and DOY inputs).
DevGoal: allow searches with a list of dates, rather than a range.
start_time : HH:mm:ss, default 00:00:00
Start time in UTC/Zulu (24 hour clock). If None, use default.
DevGoal: check for time in date-range date-time object, if that's used for input.
end_time : HH:mm:ss, default 23:59:59
End time in UTC/Zulu (24 hour clock). If None, use default.
DevGoal: check for time in date-range date-time object, if that's used for input.
version : string, default most recent version
Dataset version, given as a 3 digit string. If no version is given, the current
version is used.
cycle : string, default all available orbital cycles
Dataset cycle, given as a 2 digit string. If no cycle is given, all available
cycles are used.
track : string, default all available reference ground tracks (RGTs)
Dataset track, given as a 4 digit string. If no track is given, all available
reference ground tracks are used.
Returns
-------
query object
Examples
--------
Initializing Query with a bounding box.
>>> reg_a_bbox = [-55, 68, -48, 71]
>>> reg_a_dates = ['2019-02-20','2019-02-28']
>>> reg_a = icepyx.query.Query('ATL06', reg_a_bbox, reg_a_dates)
>>> reg_a
<icepyx.core.query.Query at [location]>
Initializing Query with a list of polygon vertex coordinate pairs.
>>> reg_a_poly = [(-55, 68), (-55, 71), (-48, 71), (-48, 68), (-55, 68)]
>>> reg_a_dates = ['2019-02-20','2019-02-28']
>>> reg_a = icepyx.query.Query('ATL06', reg_a_poly, reg_a_dates)
>>> reg_a
<icepyx.core.query.Query at [location]>
Initializing Query with a geospatial polygon file.
>>> aoi = '/User/name/location/aoi.shp'
>>> reg_a_dates = ['2019-02-22','2019-02-28']
>>> reg_a = icepyx.query.Query('ATL06', aoi, reg_a_dates)
>>> reg_a
<icepyx.core.query.Query at [location]>
"""
# ----------------------------------------------------------------------
# Constructors
def __init__(
self,
dataset=None,
spatial_extent=None,
date_range=None,
start_time=None,
end_time=None,
version=None,
cycles=None,
tracks=None,
files=None,
):
# warnings.filterwarnings("always")
# warnings.warn("Please note: as of 2020-05-05, a major reorganization of the core icepyx.query code may result in errors produced by now depricated functions. Please see our documentation pages or example notebooks for updates.")
if (
(dataset is None or spatial_extent is None)
and (date_range is None or cycles is None or tracks is None)
and files is None
):
raise ValueError(
"Please provide the required inputs. Use help([function]) to view the function's documentation"
)
if files is not None:
self._source = "files"
# self.file_vars = Variables(self._source)
else:
self._source = "order"
# self.order_vars = Variables(self._source)
# self.variables = Variables(self._source)
self._dset = is2ref._validate_dataset(dataset)
self.extent_type, self._spat_extent, self._geom_filepath = val.spatial(
spatial_extent
)
if date_range:
self._start, self._end = val.temporal(date_range, start_time, end_time)
self._version = val.dset_version(self.latest_version(), version)
# build list of available CMR parameters if reducing by cycle or RGT
# or a list of explicitly named files (full or partial names)
# DevGoal: add file name search to optional queries
if cycles or tracks:
# get lists of available ICESat-2 cycles and tracks
self._cycles = val.cycles(cycles)
self._tracks = val.tracks(tracks)
# create list of CMR parameters for granule name
self._readable_granule_name = apifmt._fmt_readable_granules(
self._dset, cycles=self.cycles, tracks=self.tracks
)
# ----------------------------------------------------------------------
# Properties
@property
def dataset(self):
"""
Return the short name dataset ID string associated with the query object.
Examples
--------
>>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'])
>>> reg_a.dataset
'ATL06'
"""
return self._dset
@property
def dataset_version(self):
"""
Return the dataset version of the data object.
Examples
--------
>>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'])
>>> reg_a.dataset_version
'003'
>>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'], version='1')
>>> reg_a.dataset_version
'001'
"""
return self._version
@property
def spatial_extent(self):
"""
Return an array showing the spatial extent of the query object.
Spatial extent is returned as an input type (which depends on how
you initially entered your spatial data) followed by the geometry data.
Bounding box data is [lower-left-longitude, lower-left-latitute, upper-right-longitude, upper-right-latitude].
Polygon data is [[array of longitudes],[array of corresponding latitudes]].
Examples
--------
>>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'])
>>> reg_a.spatial_extent
['bounding box', [-55, 68, -48, 71]]
>>> reg_a = icepyx.query.Query('ATL06',[(-55, 68), (-55, 71), (-48, 71), (-48, 68), (-55, 68)],['2019-02-20','2019-02-28'])
>>> reg_a.spatial_extent
['polygon', [-55.0, 68.0, -55.0, 71.0, -48.0, 71.0, -48.0, 68.0, -55.0, 68.0]]
"""
if self.extent_type == "bounding_box":
return ["bounding box", self._spat_extent]
elif self.extent_type == "polygon":
# return ['polygon', self._spat_extent]
# Note: self._spat_extent is a shapely geometry object
return ["polygon", self._spat_extent.exterior.coords.xy]
else:
return ["unknown spatial type", None]
@property
def dates(self):
"""
Return an array showing the date range of the query object.
Dates are returned as an array containing the start and end datetime objects, inclusive, in that order.
Examples
--------
>>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'])
>>> reg_a.dates
['2019-02-20', '2019-02-28']
"""
if not hasattr(self, "_start"):
return ["No temporal parameters set"]
else:
return [
self._start.strftime("%Y-%m-%d"),
self._end.strftime("%Y-%m-%d"),
] # could also use self._start.date()
@property
def start_time(self):
"""
Return the start time specified for the start date.
Examples
--------
>>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'])
>>> reg_a.start_time
'00:00:00'
>>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'], start_time='12:30:30')
>>> reg_a.start_time
'12:30:30'
"""
if not hasattr(self, "_start"):
return ["No temporal parameters set"]
else:
return self._start.strftime("%H:%M:%S")
@property
def end_time(self):
"""
Return the end time specified for the end date.
Examples
--------
>>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'])
>>> reg_a.end_time
'23:59:59'
>>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'], end_time='10:20:20')
>>> reg_a.end_time
'10:20:20'
"""
if not hasattr(self, "_end"):
return ["No temporal parameters set"]
else:
return self._end.strftime("%H:%M:%S")
@property
def cycles(self):
"""
Return the unique ICESat-2 orbital cycle.
Examples
--------
>>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'])
>>> reg_a.cycles
['02']
"""
if not hasattr(self, "_cycles"):
return ["No orbital parameters set"]
else:
return sorted(set(self._cycles))
@property
def tracks(self):
"""
Return the unique ICESat-2 Reference Ground Tracks
Examples
--------
>>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'])
>>> reg_a.tracks
['0841', '0849', '0902', '0910']
"""
if not hasattr(self, "_tracks"):
return ["No orbital parameters set"]
else:
return sorted(set(self._tracks))
@property
def CMRparams(self):
"""
Display the CMR key:value pairs that will be submitted. It generates the dictionary if it does not already exist.
Examples
--------
>>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'])
>>> reg_a.CMRparams
{'short_name': 'ATL06',
'version': '002',
'temporal': '2019-02-20T00:00:00Z,2019-02-28T23:59:59Z',
'bounding_box': '-55,68,-48,71'}
"""
if not hasattr(self, "_CMRparams"):
self._CMRparams = apifmt.Parameters("CMR")
# print(self._CMRparams)
# print(self._CMRparams.fmted_keys)
# dictionary of optional CMR parameters
kwargs = {}
# temporal CMR parameters
if hasattr(self, "_start") and hasattr(self, "_end"):
kwargs["start"] = self._start
kwargs["end"] = self._end
# granule name CMR parameters (orbital or file name)
# DevGoal: add to file name search to optional queries
if hasattr(self, "_readable_granule_name"):
kwargs["options[readable_granule_name][pattern]"] = "true"
kwargs["options[spatial][or]"] = "true"
kwargs["readable_granule_name[]"] = self._readable_granule_name
if self._CMRparams.fmted_keys == {}:
self._CMRparams.build_params(
dataset=self.dataset,
version=self._version,
extent_type=self.extent_type,
spatial_extent=self._spat_extent,
**kwargs,
)
return self._CMRparams.fmted_keys
@property
def reqparams(self):
"""
Display the required key:value pairs that will be submitted. It generates the dictionary if it does not already exist.
Examples
--------
>>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'])
>>> reg_a.reqparams
{'page_size': 10, 'page_num': 1}
>>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'])
>>> reg_a.earthdata_login(user_id,user_email)
Earthdata Login password: ········
>>> reg_a.order_granules()
>>> reg_a.reqparams
{'page_size': 10, 'page_num': 1, 'request_mode': 'async', 'include_meta': 'Y', 'client_string': 'icepyx'}
"""
if not hasattr(self, "_reqparams"):
self._reqparams = apifmt.Parameters("required", reqtype="search")
self._reqparams.build_params()
return self._reqparams.fmted_keys
# @property
# DevQuestion: if I make this a property, I get a "dict" object is not callable when I try to give input kwargs... what approach should I be taking?
def subsetparams(self, **kwargs):
"""
Display the subsetting key:value pairs that will be submitted. It generates the dictionary if it does not already exist
and returns an empty dictionary if subsetting is set to False during ordering.
Parameters
----------
**kwargs : key-value pairs
Additional parameters to be passed to the subsetter.
By default temporal and spatial subset keys are passed.
Acceptable key values are ['format','projection','projection_parameters','Coverage'].
At this time (2020-05), only variable ('Coverage') parameters will be automatically formatted.
See Also
--------
order_granules
Examples
--------
>>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'])
>>> reg_a.subsetparams()
{'time': '2019-02-20T00:00:00,2019-02-28T23:59:59', 'bbox': '-55,68,-48,71'}
"""
if not hasattr(self, "_subsetparams"):
self._subsetparams = apifmt.Parameters("subset")
# temporal subsetting parameters
if hasattr(self, "_start") and hasattr(self, "_end"):
kwargs["start"] = self._start
kwargs["end"] = self._end
if self._subsetparams == None and not kwargs:
return {}
else:
if self._subsetparams == None:
self._subsetparams = apifmt.Parameters("subset")
if self._geom_filepath is not None:
self._subsetparams.build_params(
geom_filepath=self._geom_filepath,
extent_type=self.extent_type,
spatial_extent=self._spat_extent,
**kwargs,
)
else:
self._subsetparams.build_params(
extent_type=self.extent_type,
spatial_extent=self._spat_extent,
**kwargs,
)
return self._subsetparams.fmted_keys
# DevGoal: add to tests
# DevGoal: add statements to the following vars properties to let the user know if they've got a mismatched source and vars type
@property
def order_vars(self):
"""
Return the order variables object.
This instance is generated when data is ordered from the NSIDC.
See Also
--------
variables.Variables
Examples
--------
>>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'])
>>> reg_a.earthdata_login(user_id,user_email)
Earthdata Login password: ········
>>> reg_a.order_vars
<icepyx.core.variables.Variables at [location]>
"""
if not hasattr(self, "_order_vars"):
if self._source == "order":
# DevGoal: check for active session here
if hasattr(self, "_cust_options"):
self._order_vars = Variables(
self._source,
session=self._session,
dataset=self.dataset,
avail=self._cust_options["variables"],
)
else:
self._order_vars = Variables(
self._source,
session=self._session,
dataset=self.dataset,
version=self._version,
)
# I think this is where property setters come in, and one should be used here? Right now order_vars.avail is only filled in
# if _cust_options exists when the class is initialized, but not if _cust_options is filled in prior to another call to order_vars
# if self._order_vars.avail == None and hasattr(self, '_cust_options'):
# print('got into the loop')
# self._order_vars.avail = self._cust_options['variables']
return self._order_vars
@property
def file_vars(self):
"""
Return the file variables object.
This instance is generated when files are used to create the data object (not yet implemented).
See Also
--------
variables.Variables
Examples
--------
>>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'])
>>> reg_a.earthdata_login(user_id,user_email)
Earthdata Login password: ········
>>> reg_a.file_vars
<icepyx.core.variables.Variables at [location]>
"""
if not hasattr(self, "_file_vars"):
if self._source == "file":
self._file_vars = Variables(self._source, dataset=self.dataset)
return self._file_vars
@property
def granules(self):
"""
Return the granules object, which provides the underlying funtionality for searching, ordering,
and downloading granules for the specified dataset. Users are encouraged to use the built in wrappers
rather than trying to access the granules object themselves.
See Also
--------
avail_granules
order_granules
download_granules
granules.Granules
Examples
--------
>>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'])
>>> reg_a.granules
<icepyx.core.granules.Granules at [location]>
"""
if not hasattr(self, "_granules"):
self._granules = Granules()
elif self._granules == None:
self._granules = Granules()
return self._granules
# ----------------------------------------------------------------------
# Methods - Get and display neatly information at the dataset level
def dataset_summary_info(self):
"""
Display a summary of selected metadata for the specified version of the dataset
of interest (the collection).
Examples
--------
>>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'])
>>> reg_a.dataset_summary_info()
dataset_id : ATLAS/ICESat-2 L3A Land Ice Height V002
short_name : ATL06
version_id : 002
time_start : 2018-10-14T00:00:00.000Z
coordinate_system : CARTESIAN
summary : This data set (ATL06) provides geolocated, land-ice surface heights (above the WGS 84 ellipsoid, ITRF2014 reference frame), plus ancillary parameters that can be used to interpret and assess the quality of the height estimates. The data were acquired by the Advanced Topographic Laser Altimeter System (ATLAS) instrument on board the Ice, Cloud and land Elevation Satellite-2 (ICESat-2) observatory.
orbit_parameters : {'swath_width': '36.0', 'period': '94.29', 'inclination_angle': '92.0', 'number_of_orbits': '0.071428571', 'start_circular_latitude': '0.0'}
"""
if not hasattr(self, "_about_dataset"):
self._about_dataset = is2ref.about_dataset(self._dset)
summ_keys = [
"dataset_id",
"short_name",
"version_id",
"time_start",
"coordinate_system",
"summary",
"orbit_parameters",
]
for key in summ_keys:
print(key, ": ", self._about_dataset["feed"]["entry"][-1][key])
def dataset_all_info(self):
"""
Display all metadata about the dataset of interest (the collection).
Examples
--------
>>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'])
>>> reg_a.dataset_all_info()
{very long prettily-formatted dictionary output}
"""
if not hasattr(self, "_about_dataset"):
self._about_dataset = is2ref.about_dataset(self._dset)
pprint.pprint(self._about_dataset)
def latest_version(self):
"""
Determine the most recent version available for the given dataset.
Examples
--------
>>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'])
>>> reg_a.latest_version()
'003'
"""
if not hasattr(self, "_about_dataset"):
self._about_dataset = is2ref.about_dataset(self._dset)
return max(
[entry["version_id"] for entry in self._about_dataset["feed"]["entry"]]
)
def show_custom_options(self, dictview=False):
"""
Display customization/subsetting options available for this dataset.
Parameters
----------
dictview : boolean, default False
Show the variable portion of the custom options list as a dictionary with key:value
pairs representing variable:paths-to-variable rather than as a long list of full
variable paths.
Examples
--------
>>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'])
>>> reg_a.earthdata_login(user_id,user_email)
Earthdata Login password: ········
>>> reg_a.show_custom_options(dictview=True):
Subsetting options
[{'id': 'ICESAT2',
'maxGransAsyncRequest': '2000',
'maxGransSyncRequest': '100',
'spatialSubsetting': 'true',
'spatialSubsettingShapefile': 'true',
'temporalSubsetting': 'true',
'type': 'both'}]
Data File Formats (Reformatting Options)
['TABULAR_ASCII', 'NetCDF4-CF', 'Shapefile', 'NetCDF-3']
Reprojection Options
[]
Data File (Reformatting) Options Supporting Reprojection
['TABULAR_ASCII', 'NetCDF4-CF', 'Shapefile', 'NetCDF-3', 'No reformatting']
Data File (Reformatting) Options NOT Supporting Reprojection
[]
Data Variables (also Subsettable)
['ancillary_data/atlas_sdp_gps_epoch',
'ancillary_data/control',
'ancillary_data/data_end_utc',
.
.
.
'quality_assessment/gt3r/signal_selection_source_fraction_3']
"""
headers = [
"Subsetting options",
"Data File Formats (Reformatting Options)",
"Reprojection Options",
"Data File (Reformatting) Options Supporting Reprojection",
"Data File (Reformatting) Options NOT Supporting Reprojection",
"Data Variables (also Subsettable)",
]
keys = [
"options",
"fileformats",
"reprojectionONLY",
"formatreproj",
"noproj",
"variables",
]
try:
all(key in self._cust_options.keys() for key in keys)
except AttributeError or KeyError:
self._cust_options = is2ref._get_custom_options(
self._session, self.dataset, self._version
)
for h, k in zip(headers, keys):
print(h)
if k == "variables" and dictview:
vgrp, paths = Variables.parse_var_list(self._cust_options[k])
pprint.pprint(vgrp)
else:
pprint.pprint(self._cust_options[k])
# ----------------------------------------------------------------------
# Methods - Login and Granules (NSIDC-API)
def earthdata_login(self, uid, email):
"""
Log in to NSIDC EarthData to access data. Generates the needed session and token for most
data searches and data ordering/download.
Parameters
----------
uid : string
Earthdata login user ID
email : string
Email address. NSIDC will automatically send you emails about the status of your order.
See Also
--------
Earthdata.Earthdata
Examples
--------
>>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'])
>>> reg_a.earthdata_login(user_id,user_email)
Earthdata Login password: ········
"""
capability_url = f"https://n5eil02u.ecs.nsidc.org/egi/capabilities/{self.dataset}.{self._version}.xml"
self._session = Earthdata(uid, email, capability_url).login()
self._email = email
# DevGoal: check to make sure the see also bits of the docstrings work properly in RTD
def avail_granules(self, ids=False, cycles=False, tracks=False):
"""
Obtain information about the available granules for the query
object's parameters. By default, a complete list of available granules is
obtained and stored in the object, but only summary information is returned.
Lists of granule IDs, cycles and RGTs can be obtained using the boolean triggers.
Parameters
----------
ids : boolean, default False
Indicates whether the function should return a list of granule IDs.
cycles : boolean, default False
Indicates whether the function should return a list of orbital cycles.
tracks : boolean, default False
Indicates whether the function should return a list of RGTs.
Examples
--------
>>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'])
>>> reg_a.avail_granules()
{'Number of available granules': 4,
'Average size of granules (MB)': 48.975419759750004,
'Total size of all granules (MB)': 195.90167903900002}
>>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'])
>>> reg_a.avail_granules(ids=True)
>>> reg_a.avail_granules(cycles=True)
['02']
>>> reg_a.avail_granules(tracks=True)
['0841', '0849', '0902', '0910']
"""
# REFACTOR: add test to make sure there's a session
if not hasattr(self, "_granules"):
self.granules
try:
self.granules.avail
except AttributeError:
self.granules.get_avail(self.CMRparams, self.reqparams)
if ids or cycles or tracks:
# list of outputs in order of ids, cycles, tracks
return granules.gran_IDs(
self.granules.avail, ids=ids, cycles=cycles, tracks=tracks
)
else:
return granules.info(self.granules.avail)
# DevGoal: display output to indicate number of granules successfully ordered (and number of errors)
# DevGoal: deal with subset=True for variables now, and make sure that if a variable subset Coverage kwarg is input it's successfully passed through all other functions even if this is the only one run.
def order_granules(self, verbose=False, subset=True, email=True, **kwargs):
"""
Place an order for the available granules for the query object.
Parameters
----------
verbose : boolean, default False
Print out all feedback available from the order process.
Progress information is automatically printed regardless of the value of verbose.
subset : boolean, default True
Apply subsetting to the data order from the NSIDC, returning only data that meets the
subset parameters. Spatial and temporal subsetting based on the input parameters happens
by default when subset=True, but additional subsetting options are available.
Spatial subsetting returns all data that are within the area of interest (but not complete
granules. This eliminates false-positive granules returned by the metadata-level search)
email: boolean, default True
Have NSIDC auto-send order status email updates to indicate order status as pending/completed.
**kwargs : key-value pairs
Additional parameters to be passed to the subsetter.
By default temporal and spatial subset keys are passed.
Acceptable key values are ['format','projection','projection_parameters','Coverage'].
The variable 'Coverage' list should be constructed using the `order_vars.wanted` attribute of the object.
At this time (2020-05), only variable ('Coverage') parameters will be automatically formatted.
See Also
--------
granules.place_order
Examples
--------
>>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'])
>>> reg_a.earthdata_login(user_id,user_email)
Earthdata Login password: ········
>>> reg_a.order_granules()
order ID: [###############]
[order status output]
error messages:
[if any were returned from the NSIDC subsetter, e.g. No data found that matched subset constraints.]
.
.
.
Retry request status is: complete
"""
if not hasattr(self, "reqparams"):
self.reqparams
if self._reqparams._reqtype == "search":
self._reqparams._reqtype = "download"
if "email" in self._reqparams.fmted_keys.keys() or email == False:
self._reqparams.build_params(**self._reqparams.fmted_keys)
else:
self._reqparams.build_params(
**self._reqparams.fmted_keys, email=self._email
)
if subset is False:
self._subsetparams = None
elif (
subset == True
and hasattr(self, "_subsetparams")
and self._subsetparams == None
):
del self._subsetparams
# REFACTOR: add checks here to see if the granules object has been created, and also if it already has a list of avail granules (if not, need to create one and add session)
if not hasattr(self, "_granules"):
self.granules
self._granules.place_order(
self.CMRparams,
self.reqparams,
self.subsetparams(**kwargs),
verbose,
subset,
session=self._session,
geom_filepath=self._geom_filepath,
)
# DevGoal: put back in the kwargs here so that people can just call download granules with subset=False!
def download_granules(
self, path, verbose=False, subset=True, restart=False, **kwargs
): # , extract=False):
"""
Downloads the data ordered using order_granules.
Parameters
----------
path : string
String with complete path to desired download location.
verbose : boolean, default False
Print out all feedback available from the order process.
Progress information is automatically printed regardless of the value of verbose.
subset : boolean, default True
Apply subsetting to the data order from the NSIDC, returning only data that meets the
subset parameters. Spatial and temporal subsetting based on the input parameters happens
by default when subset=True, but additional subsetting options are available.
Spatial subsetting returns all data that are within the area of interest (but not complete
granules. This eliminates false-positive granules returned by the metadata-level search)
restart: boolean, default false
If previous download was terminated unexpectedly. Run again with restart set to True to continue.
**kwargs : key-value pairs
Additional parameters to be passed to the subsetter.
By default temporal and spatial subset keys are passed.
Acceptable key values are ['format','projection','projection_parameters','Coverage'].
The variable 'Coverage' list should be constructed using the `order_vars.wanted` attribute of the object.
At this time (2020-05), only variable ('Coverage') parameters will be automatically formatted.
See Also
--------
granules.download
"""
"""
extract : boolean, default False
Unzip the downloaded granules.
Examples
--------
>>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'])
>>> reg_a.earthdata_login(user_id,user_email)
Earthdata Login password: ········
>>> reg_a.download_granules('/path/to/download/folder')
Beginning download of zipped output...
Data request [##########] of x order(s) is complete.
"""
# if not os.path.exists(path):
# os.mkdir(path)
# os.chdir(path)
if not hasattr(self, "_granules"):
self.granules
if restart == True:
pass
else:
if (
not hasattr(self._granules, "orderIDs")
or len(self._granules.orderIDs) == 0
):
self.order_granules(verbose=verbose, subset=subset, **kwargs)
self._granules.download(verbose, path, session=self._session, restart=restart)
# DevGoal: add testing? What do we test, and how, given this is a visualization.
# DevGoal(long term): modify this to accept additional inputs, etc.
# DevGoal: move this to it's own module for visualizing, etc.
# DevGoal: see Amy's data access notebook for a zoomed in map - implement here?
def visualize_spatial_extent(
self,
): # additional args, basemap, zoom level, cmap, export
"""
Creates a map displaying the input spatial extent
Examples
--------
>>> icepyx.query.Query('ATL06','path/spatialfile.shp',['2019-02-22','2019-02-28'])
>>> reg_a.visualize_spatial_extent
[visual map output]
"""
gdf = geospatial.geodataframe(self.extent_type, self._spat_extent)
try:
from shapely.geometry import Polygon
import geoviews as gv
gv.extension("bokeh")
line_geoms = Polygon(gdf["geometry"][0]).boundary
bbox_poly = gv.Path(line_geoms).opts(color="red", line_color="red")
tile = gv.tile_sources.EsriImagery.opts(width=500, height=500)
return tile * bbox_poly
except ImportError:
world = gpd.read_file(gpd.datasets.get_path("naturalearth_lowres"))
f, ax = plt.subplots(1, figsize=(12, 6))
world.plot(ax=ax, facecolor="lightgray", edgecolor="gray")
gdf.plot(ax=ax, color="#FF8C00", alpha=0.7)
plt.show()
def visualize_elevation(self):
"""
Visualize elevation requested from OpenAltimetry API using datashader based on cycles
https://holoviz.org/tutorial/Large_Data.html
Returns
-------
map_cycle, map_rgt + lineplot_rgt : Holoviews objects
Holoviews data visualization elements
"""
viz = Visualize(self)
cycle_map, rgt_map = viz.viz_elevation()
return cycle_map, rgt_map
| 39.548488 | 418 | 0.594511 |
7957b98bc64298134ef8e28ebd9ecc7a12a972b7 | 3,182 | py | Python | backend/imagineer_django/week3/week3/settings.py | cmdlhz/study_for_ver2 | 2e5110d0ce80e70a163a132c9c9cbb0cd9f4f134 | [
"MIT"
] | null | null | null | backend/imagineer_django/week3/week3/settings.py | cmdlhz/study_for_ver2 | 2e5110d0ce80e70a163a132c9c9cbb0cd9f4f134 | [
"MIT"
] | null | null | null | backend/imagineer_django/week3/week3/settings.py | cmdlhz/study_for_ver2 | 2e5110d0ce80e70a163a132c9c9cbb0cd9f4f134 | [
"MIT"
] | null | null | null | """
Django settings for week3 project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'stk=k9eb_s18mxokwrf7ke8=d-hggpy&%k%1!e-8e_qw6cjg%9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# My apps
'blog'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'week3.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [ 'templates' ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'week3.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static")
] | 25.055118 | 91 | 0.693589 |
7957bad8f5bad7cb70c8311811091d949d79def5 | 3,001 | py | Python | cartoframes/data/dataset_info.py | oss-spanish-geoserver/cartoframes | 2e1a1aa785180f8c031688012b3c2663f7c7edc7 | [
"BSD-3-Clause"
] | null | null | null | cartoframes/data/dataset_info.py | oss-spanish-geoserver/cartoframes | 2e1a1aa785180f8c031688012b3c2663f7c7edc7 | [
"BSD-3-Clause"
] | null | null | null | cartoframes/data/dataset_info.py | oss-spanish-geoserver/cartoframes | 2e1a1aa785180f8c031688012b3c2663f7c7edc7 | [
"BSD-3-Clause"
] | null | null | null | import time
from warnings import warn
from carto.datasets import DatasetManager
from carto.exceptions import CartoException
from .utils import setting_value_exception
from ..columns import normalize_name
class DatasetInfo(object):
PRIVATE = 'PRIVATE'
PUBLIC = 'PUBLIC'
LINK = 'LINK'
def __init__(self, carto_context, table_name):
self._metadata = self._get_metadata(carto_context, table_name)
self._privacy = self._metadata.privacy
self._name = self._metadata.name
@property
def privacy(self):
return self._privacy
@privacy.setter
def privacy(self, privacy):
raise setting_value_exception('privacy', privacy)
@property
def name(self):
return self._name
@name.setter
def name(self, name):
raise setting_value_exception('name', name)
def update(self, privacy=None, name=None):
modified = False
if privacy and self._validate_privacy(privacy):
self._privacy = privacy.upper()
modified = True
if name:
normalized_name = normalize_name(name)
if self._validate_name(normalized_name):
self._name = normalized_name
modified = True
if self._name != name:
warn('Dataset name will be named `{}`'.format(self._name))
if modified:
self._save_metadata()
def _get_metadata(self, carto_context, table_name, retries=6, retry_wait_time=1):
ds_manager = DatasetManager(carto_context.auth_client)
try:
return ds_manager.get(table_name)
except Exception as e:
if type(e).__name__ == 'NotFoundException' and retries > 0:
# if retry_wait_time > 7: # it should be after more than 15 seconds
# warn('We are still procesing the CARTO table. Sorry for the delay.')
time.sleep(retry_wait_time)
self._get_metadata(carto_context=carto_context, table_name=table_name,
retries=retries-1, retry_wait_time=retry_wait_time*2)
else:
raise CartoException('We could not get the table metadata.'
'Please, try again in a few seconds or contact support for help')
def _save_metadata(self):
self._metadata.privacy = self._privacy
self._metadata.name = self._name
self._metadata.save()
def _validate_privacy(self, privacy):
privacy = privacy.upper()
if privacy not in [self.PRIVATE, self.PUBLIC, self.LINK]:
raise ValueError('Wrong privacy. The privacy: {p} is not valid. You can use: {o1}, {o2}, {o3}'.format(
p=privacy, o1=self.PRIVATE, o2=self.PUBLIC, o3=self.LINK))
if privacy != self._privacy:
return True
return False
def _validate_name(self, name):
if name != self._name:
return True
return False
| 32.619565 | 114 | 0.618794 |
7957bbce91fd9173a5c777b405209a92fef06679 | 504 | py | Python | core/tasks.py | Rwibutso/django_celery | 36d03339b8c435ef61089ba966668fea4011f949 | [
"MIT"
] | null | null | null | core/tasks.py | Rwibutso/django_celery | 36d03339b8c435ef61089ba966668fea4011f949 | [
"MIT"
] | null | null | null | core/tasks.py | Rwibutso/django_celery | 36d03339b8c435ef61089ba966668fea4011f949 | [
"MIT"
] | null | null | null | import time
from celery import shared_task
from django.core.mail import send_mail
@shared_task
def add(x, y):
return x + y
@shared_task
def send_email_task(email):
"background task to send an email asynchronously"
subject = 'Helo from Celery'
message = 'This is a test email sent asynchronously with Celery.'
time.sleep(5)
return send_mail(
subject,
message,
'alfabravo318@gmail.com', # from
[email], # to
fail_silently=False
) | 21 | 69 | 0.65873 |
7957bd8c0ba70e8ced584386676b935d3d3c7186 | 3,909 | py | Python | env/lib/python3.6/site-packages/telegram/inline/inlinequeryresultarticle.py | rogerscristo/BotFWD | 4f2ab1f4f4543c157ca0a79084536c065f74159f | [
"MIT"
] | null | null | null | env/lib/python3.6/site-packages/telegram/inline/inlinequeryresultarticle.py | rogerscristo/BotFWD | 4f2ab1f4f4543c157ca0a79084536c065f74159f | [
"MIT"
] | 3 | 2017-09-01T22:18:30.000Z | 2017-09-01T22:24:57.000Z | env/lib/python3.6/site-packages/telegram/inline/inlinequeryresultarticle.py | rogerscristo/BotFWD | 4f2ab1f4f4543c157ca0a79084536c065f74159f | [
"MIT"
] | 3 | 2018-02-22T22:20:27.000Z | 2018-04-22T10:58:24.000Z | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2017
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains the classes that represent Telegram InlineQueryResultArticle."""
from telegram import InlineQueryResult
class InlineQueryResultArticle(InlineQueryResult):
"""This object represents a Telegram InlineQueryResultArticle.
Attributes:
type (:obj:`str`): 'article'.
id (:obj:`str`): Unique identifier for this result, 1-64 Bytes.
title (:obj:`str`): Title of the result.
input_message_content (:class:`telegram.InputMessageContent`): Content of the message to
be sent.
reply_markup (:class:`telegram.ReplyMarkup`): Optional. Inline keyboard attached to
the message.
url (:obj:`str`): Optional. URL of the result.
hide_url (:obj:`bool`): Optional. Pass True, if you don't want the URL to be shown in the
message.
description (:obj:`str`): Optional. Short description of the result.
thumb_url (:obj:`str`): Optional. Url of the thumbnail for the result.
thumb_width (:obj:`int`): Optional. Thumbnail width.
thumb_height (:obj:`int`): Optional. Thumbnail height.
Args:
id (:obj:`str`): Unique identifier for this result, 1-64 Bytes.
title (:obj:`str`): Title of the result.
input_message_content (:class:`telegram.InputMessageContent`): Content of the message to
be sent.
reply_markup (:class:`telegram.ReplyMarkup`, optional): Inline keyboard attached to
the message
url (:obj:`str`, optional): URL of the result.
hide_url (:obj:`bool`, optional): Pass True, if you don't want the URL to be shown in the
message.
description (:obj:`str`, optional): Short description of the result.
thumb_url (:obj:`str`, optional): Url of the thumbnail for the result.
thumb_width (:obj:`int`, optional): Thumbnail width.
thumb_height (:obj:`int`, optional): Thumbnail height.
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
"""
def __init__(self,
id,
title,
input_message_content,
reply_markup=None,
url=None,
hide_url=None,
description=None,
thumb_url=None,
thumb_width=None,
thumb_height=None,
**kwargs):
# Required
super(InlineQueryResultArticle, self).__init__('article', id)
self.title = title
self.input_message_content = input_message_content
# Optional
if reply_markup:
self.reply_markup = reply_markup
if url:
self.url = url
if hide_url:
self.hide_url = hide_url
if description:
self.description = description
if thumb_url:
self.thumb_url = thumb_url
if thumb_width:
self.thumb_width = thumb_width
if thumb_height:
self.thumb_height = thumb_height
| 41.585106 | 98 | 0.622154 |
7957be0b77d128b775bfe6614d8dc44eeb4775af | 5,730 | py | Python | addons/account/tests/test_bank_statement_reconciliation.py | jjiege/odoo | fd5b8ad387c1881f349d125cbd56433f4d49398f | [
"MIT"
] | null | null | null | addons/account/tests/test_bank_statement_reconciliation.py | jjiege/odoo | fd5b8ad387c1881f349d125cbd56433f4d49398f | [
"MIT"
] | null | null | null | addons/account/tests/test_bank_statement_reconciliation.py | jjiege/odoo | fd5b8ad387c1881f349d125cbd56433f4d49398f | [
"MIT"
] | null | null | null | from odoo.addons.account.tests.account_test_classes import AccountingTestCase
from odoo.tests import tagged
@tagged('post_install', '-at_install')
class TestBankStatementReconciliation(AccountingTestCase):
def setUp(self):
super(TestBankStatementReconciliation, self).setUp()
self.i_model = self.env['account.invoice']
self.il_model = self.env['account.invoice.line']
self.bs_model = self.env['account.bank.statement']
self.bsl_model = self.env['account.bank.statement.line']
self.reconciliation_widget = self.env['account.reconciliation.widget']
self.partner = self.env['res.partner'].create({'name': 'test'})
self.currency_usd_id = self.env.ref("base.USD").id
self.currency_euro_id = self.env.ref("base.EUR").id
def test_reconciliation_proposition(self):
rcv_mv_line = self.create_invoice(100)
st_line = self.create_statement_line(100)
# exact amount match
rec_prop = self.reconciliation_widget.get_bank_statement_line_data(st_line.ids)['lines']
prop = rec_prop[0]['reconciliation_proposition']
self.assertEqual(len(prop), 1)
self.assertEqual(prop[0]['id'], rcv_mv_line.id)
def test_full_reconcile(self):
self._reconcile_invoice_with_statement(False)
def test_post_at_bank_rec_full_reconcile(self):
""" Test the full reconciliation of a bank statement directly with an invoice.
"""
self._reconcile_invoice_with_statement(True)
def _reconcile_invoice_with_statement(self, post_at_bank_rec):
""" Tests the reconciliation of an invoice with a bank statement, using
the provided 'post at bank reconciliation' value for the bank journal
where to generate the statement.
"""
self.bs_model.with_context(journal_type='bank')._default_journal().post_at_bank_reconciliation = post_at_bank_rec
rcv_mv_line = self.create_invoice(100)
st_line = self.create_statement_line(100)
# reconcile
st_line.process_reconciliation(counterpart_aml_dicts=[{
'move_line': rcv_mv_line,
'credit': 100,
'debit': 0,
'name': rcv_mv_line.name,
}])
# check everything went as expected
self.assertTrue(st_line.journal_entry_ids)
counterpart_mv_line = None
for l in st_line.journal_entry_ids:
if l.account_id.user_type_id.type == 'receivable':
counterpart_mv_line = l
break
self.assertIsNotNone(counterpart_mv_line)
self.assertTrue(rcv_mv_line.reconciled)
self.assertTrue(counterpart_mv_line.reconciled)
self.assertEqual(counterpart_mv_line.matched_credit_ids, rcv_mv_line.matched_debit_ids)
self.assertEqual(rcv_mv_line.invoice_id.state, 'paid', "The related invoice's state should now be 'paid'")
def test_reconcile_with_write_off(self):
pass
def create_invoice(self, amount):
""" Return the move line that gets to be reconciled (the one in the receivable account) """
vals = {'partner_id': self.partner.id,
'type': 'out_invoice',
'name': '-',
'currency_id': self.env.user.company_id.currency_id.id,
}
# new creates a temporary record to apply the on_change afterwards
invoice = self.i_model.new(vals)
invoice._onchange_partner_id()
vals.update({'account_id': invoice.account_id.id})
invoice = self.i_model.create(vals)
self.il_model.create({
'quantity': 1,
'price_unit': amount,
'invoice_id': invoice.id,
'name': '.',
'account_id': self.env['account.account'].search([('user_type_id', '=', self.env.ref('account.data_account_type_revenue').id)], limit=1).id,
})
invoice.action_invoice_open()
mv_line = None
for l in invoice.move_id.line_ids:
if l.account_id.id == vals['account_id']:
mv_line = l
self.assertIsNotNone(mv_line)
return mv_line
def create_statement_line(self, st_line_amount):
journal = self.bs_model.with_context(journal_type='bank')._default_journal()
#journal = self.env.ref('l10n_be.bank_journal')
bank_stmt = self.bs_model.create({'journal_id': journal.id})
bank_stmt_line = self.bsl_model.create({
'name': '_',
'statement_id': bank_stmt.id,
'partner_id': self.partner.id,
'amount': st_line_amount,
})
return bank_stmt_line
def test_confirm_statement_usd(self):
company = self.env.ref('base.main_company')
self.cr.execute("UPDATE res_company SET currency_id = %s WHERE id = %s", [self.currency_euro_id, company.id])
self.env['res.currency.rate'].search([]).unlink()
self.env['res.currency.rate'].create({
'currency_id': self.currency_usd_id,
'rate': 2.0,
'name': '2001-01-01',
})
bank_journal_usd = self.env['account.journal'].create({
'name': 'Bank US',
'type': 'bank',
'code': 'BNK68',
'currency_id': self.currency_usd_id,
})
statement = self.bs_model.create({
'journal_id': bank_journal_usd.id,
'balance_end_real': 100,
'line_ids': [(0, 0, {
'name': '_',
'partner_id': self.partner.id,
'amount': 100,
'account_id': bank_journal_usd.default_debit_account_id.id,
})],
})
statement.button_open()
statement.button_confirm_bank()
| 40.638298 | 152 | 0.629494 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.