hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7957be7789dfa9e8117e2d80e97257197db65655 | 2,235 | py | Python | Algo and DSA/LeetCode-Solutions-master/Python/minimum-cost-to-make-at-least-one-valid-path-in-a-grid.py | Sourav692/FAANG-Interview-Preparation | f523e5c94d582328b3edc449ea16ac6ab28cdc81 | [
"Unlicense"
] | 3,269 | 2018-10-12T01:29:40.000Z | 2022-03-31T17:58:41.000Z | Algo and DSA/LeetCode-Solutions-master/Python/minimum-cost-to-make-at-least-one-valid-path-in-a-grid.py | Sourav692/FAANG-Interview-Preparation | f523e5c94d582328b3edc449ea16ac6ab28cdc81 | [
"Unlicense"
] | 53 | 2018-12-16T22:54:20.000Z | 2022-02-25T08:31:20.000Z | Algo and DSA/LeetCode-Solutions-master/Python/minimum-cost-to-make-at-least-one-valid-path-in-a-grid.py | Sourav692/FAANG-Interview-Preparation | f523e5c94d582328b3edc449ea16ac6ab28cdc81 | [
"Unlicense"
] | 1,236 | 2018-10-12T02:51:40.000Z | 2022-03-30T13:30:37.000Z | # Time: O(m * n)
# Space: O(m * n)
import collections
# A* Search Algorithm without heap
class Solution(object):
def minCost(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
directions = [(0, 1), (0, -1), (1, 0), (-1, 0)]
def a_star(grid, b, t):
R, C = len(grid), len(grid[0])
f, dh = 0, 1
closer, detour = [b], []
lookup = set()
while closer or detour:
if not closer:
f += dh
closer, detour = detour, closer
b = closer.pop()
if b == t:
return f
if b in lookup:
continue
lookup.add(b)
for nd, (dr, dc) in enumerate(directions, 1):
nb = (b[0]+dr, b[1]+dc)
if not (0 <= nb[0] < R and 0 <= nb[1] < C and nb not in lookup):
continue
(closer if nd == grid[b[0]][b[1]] else detour).append(nb)
return -1
return a_star(grid, (0, 0), (len(grid)-1, len(grid[0])-1))
# Time: O(m * n)
# Space: O(m * n)
# 0-1 bfs solution
class Solution2(object):
def minCost(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
directions = [(0, 1), (0, -1), (1, 0), (-1, 0)]
R, C = len(grid), len(grid[0])
b, t = (0, 0), (R-1, C-1)
dq = collections.deque([(b, 0)])
lookup = {b: 0}
while dq:
b, d = dq.popleft()
if b == t:
return d
if lookup[b] < d:
continue
for nd, (dr, dc) in enumerate(directions, 1):
nb = (b[0]+dr, b[1]+dc)
cost = 1 if nd != grid[b[0]][b[1]] else 0
if not (0 <= nb[0] < R and 0 <= nb[1] < C and
(nb not in lookup or lookup[nb] > d+cost)):
continue
lookup[nb] = d+cost
if not cost:
dq.appendleft((nb, d))
else:
dq.append((nb, d+cost))
return -1 # never reach here
| 31.041667 | 84 | 0.391946 |
7957be7daea50274a7d064e78a2d83718f8b0438 | 7,401 | py | Python | cohesity_management_sdk/models/register_application_servers_parameters.py | cohesity/management-sdk-python | 867d8c0c40dd317cdb017902c895527da7ae31c0 | [
"Apache-2.0"
] | 18 | 2019-09-24T17:35:53.000Z | 2022-03-25T08:08:47.000Z | cohesity_management_sdk/models/register_application_servers_parameters.py | cohesity/management-sdk-python | 867d8c0c40dd317cdb017902c895527da7ae31c0 | [
"Apache-2.0"
] | 18 | 2019-03-29T19:32:29.000Z | 2022-01-03T23:16:45.000Z | cohesity_management_sdk/models/register_application_servers_parameters.py | cohesity/management-sdk-python | 867d8c0c40dd317cdb017902c895527da7ae31c0 | [
"Apache-2.0"
] | 16 | 2019-02-27T06:54:12.000Z | 2021-11-16T18:10:24.000Z | # -*- coding: utf-8 -*-
# Copyright 2021 Cohesity Inc.
class RegisterApplicationServersParameters(object):
"""Implementation of the 'RegisterApplicationServersParameters' model.
Specifies the parameters required to register Application Servers
running in a Protection Source.
Attributes:
applications (list of ApplicationEnum): Specifies the types of
applications such as 'kSQL', 'kExchange' running on the Protection
Source. overrideDescription: true Supported environment types such
as 'kView', 'kSQL', 'kVMware', etc. NOTE: 'kPuppeteer' refers to
Cohesity's Remote Adapter. 'kVMware' indicates the VMware
Protection Source environment. 'kHyperV' indicates the HyperV
Protection Source environment. 'kSQL' indicates the SQL Protection
Source environment. 'kView' indicates the View Protection Source
environment. 'kPuppeteer' indicates the Cohesity's Remote Adapter.
'kPhysical' indicates the physical Protection Source environment.
'kPure' indicates the Pure Storage Protection Source environment.
'Nimble' indicates the Nimble Storage Protection Source
environment. 'kAzure' indicates the Microsoft's Azure Protection
Source environment. 'kNetapp' indicates the Netapp Protection
Source environment. 'kAgent' indicates the Agent Protection Source
environment. 'kGenericNas' indicates the Generic Network Attached
Storage Protection Source environment. 'kAcropolis' indicates the
Acropolis Protection Source environment. 'kPhsicalFiles' indicates
the Physical Files Protection Source environment. 'kIsilon'
indicates the Dell EMC's Isilon Protection Source environment.
'kGPFS' indicates IBM's GPFS Protection Source environment. 'kKVM'
indicates the KVM Protection Source environment. 'kAWS' indicates
the AWS Protection Source environment. 'kExchange' indicates the
Exchange Protection Source environment. 'kHyperVVSS' indicates the
HyperV VSS Protection Source environment. 'kOracle' indicates the
Oracle Protection Source environment. 'kGCP' indicates the Google
Cloud Platform Protection Source environment. 'kFlashBlade'
indicates the Flash Blade Protection Source environment.
'kAWSNative' indicates the AWS Native Protection Source
environment. 'kO365' indicates the Office 365 Protection Source
environment. 'kO365Outlook' indicates Office 365 outlook
Protection Source environment. 'kHyperFlex' indicates the Hyper
Flex Protection Source environment. 'kGCPNative' indicates the GCP
Native Protection Source environment. 'kAzureNative' indicates the
Azure Native Protection Source environment. 'kKubernetes'
indicates a Kubernetes Protection Source environment.
'kElastifile' indicates Elastifile Protection Source environment.
'kAD' indicates Active Directory Protection Source environment.
'kRDSSnapshotManager' indicates AWS RDS Protection Source
environment.'kCassandra' indicates Cassandra Protection Source
environment. 'kMongoDB' indicates MongoDB Protection Source
environment. 'kCouchbase' indicates Couchbase Protection Source
environment. 'kHdfs' indicates Hdfs Protection Source environment.
'kHive' indicates Hive Protection Source environment. 'kHBase'
indicates HBase Protection Source environment. 'kUDA' indicates
Universal Data Adapter Protection Source environment.
encryption_key (string): If set, user has encrypted the credential with
'user_ecryption_key'. It is assumed that credentials are first
encrypted using internal magento key and then encrypted using user
encryption key.
has_persistent_agent (bool): Set this to true if a persistent agent is
running on the host. If this is specified, then credentials would
not be used to log into the host environment. This mechanism may
be used in environments such as VMware to get around UAC
permission issues by running the agent as a service with the right
credentials. If this field is not specified, credentials must be
specified.
is_internal_encrypted (bool): Set to true if credentials are encrypted
by internal magneto key.
password (string): Specifies password of the username to access the
target source.
protection_source_id (long|int): Specifies the Id of the Protection
Source that contains one or more Application Servers running on
it.
username (string): Specifies username to access the target source.
"""
# Create a mapping from Model property names to API property names
_names = {
"applications":'applications',
"encryption_key":'encryptionKey',
"has_persistent_agent":'hasPersistentAgent',
"is_internal_encrypted":'isInternalEncrypted',
"password":'password',
"protection_source_id":'protectionSourceId',
"username":'username'
}
def __init__(self,
applications=None,
encryption_key=None,
has_persistent_agent=None,
is_internal_encrypted=None,
password=None,
protection_source_id=None,
username=None):
"""Constructor for the RegisterApplicationServersParameters class"""
# Initialize members of the class
self.applications = applications
self.encryption_key = encryption_key
self.has_persistent_agent = has_persistent_agent
self.is_internal_encrypted = is_internal_encrypted
self.password = password
self.protection_source_id = protection_source_id
self.username = username
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
applications = dictionary.get('applications')
encryption_key = dictionary.get('encryptionKey')
has_persistent_agent = dictionary.get('hasPersistentAgent')
is_internal_encrypted = dictionary.get('isInternalEncrypted')
password = dictionary.get('password')
protection_source_id = dictionary.get('protectionSourceId')
username = dictionary.get('username')
# Return an object of this model
return cls(applications,
encryption_key,
has_persistent_agent,
is_internal_encrypted,
password,
protection_source_id,
username)
| 50.006757 | 81 | 0.673558 |
7957bec8d86fcab916e8f424b809bcfe5e9b42b4 | 16,811 | py | Python | oslo_messaging/tests/drivers/pika/test_poller.py | ox12345/oslo.messaging | bdb21c0bcddfb2dac1e0f4d926e7df53d975bf0c | [
"Apache-1.1"
] | null | null | null | oslo_messaging/tests/drivers/pika/test_poller.py | ox12345/oslo.messaging | bdb21c0bcddfb2dac1e0f4d926e7df53d975bf0c | [
"Apache-1.1"
] | null | null | null | oslo_messaging/tests/drivers/pika/test_poller.py | ox12345/oslo.messaging | bdb21c0bcddfb2dac1e0f4d926e7df53d975bf0c | [
"Apache-1.1"
] | null | null | null | # Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
import time
import unittest
from concurrent import futures
from six.moves import mock
from oslo_messaging._drivers.pika_driver import pika_exceptions as pika_drv_exc
from oslo_messaging._drivers.pika_driver import pika_poller
class PikaPollerTestCase(unittest.TestCase):
def setUp(self):
self._pika_engine = mock.Mock()
self._poller_connection_mock = mock.Mock()
self._poller_channel_mock = mock.Mock()
self._poller_connection_mock.channel.return_value = (
self._poller_channel_mock
)
self._pika_engine.create_connection.return_value = (
self._poller_connection_mock
)
self._executor = futures.ThreadPoolExecutor(1)
def timer_task(timeout, callback):
time.sleep(timeout)
callback()
self._poller_connection_mock.add_timeout.side_effect = (
lambda *args: self._executor.submit(timer_task, *args)
)
self._prefetch_count = 123
@mock.patch("oslo_messaging._drivers.pika_driver.pika_poller.PikaPoller."
"_declare_queue_binding")
def test_start(self, declare_queue_binding_mock):
poller = pika_poller.PikaPoller(
self._pika_engine, 1, None, self._prefetch_count, None
)
poller.start(None)
self.assertTrue(self._pika_engine.create_connection.called)
self.assertTrue(self._poller_connection_mock.channel.called)
self.assertTrue(declare_queue_binding_mock.called)
def test_start_when_connection_unavailable(self):
poller = pika_poller.PikaPoller(
self._pika_engine, 1, None, self._prefetch_count, None
)
self._pika_engine.create_connection.side_effect = (
pika_drv_exc.EstablishConnectionException
)
# start() should not raise socket.timeout exception
poller.start(None)
# stop is needed to stop reconnection background job
poller.stop()
@mock.patch("oslo_messaging._drivers.pika_driver.pika_poller.PikaPoller."
"_declare_queue_binding")
def test_message_processing(self, declare_queue_binding_mock):
res = []
def on_incoming_callback(incoming):
res.append(incoming)
incoming_message_class_mock = mock.Mock()
poller = pika_poller.PikaPoller(
self._pika_engine, 1, None, self._prefetch_count,
incoming_message_class=incoming_message_class_mock
)
unused = object()
method = object()
properties = object()
body = object()
poller.start(on_incoming_callback)
poller._on_message_with_ack_callback(
unused, method, properties, body
)
self.assertEqual(1, len(res))
self.assertEqual([incoming_message_class_mock.return_value], res[0])
incoming_message_class_mock.assert_called_once_with(
self._pika_engine, self._poller_channel_mock, method, properties,
body
)
self.assertTrue(self._pika_engine.create_connection.called)
self.assertTrue(self._poller_connection_mock.channel.called)
self.assertTrue(declare_queue_binding_mock.called)
@mock.patch("oslo_messaging._drivers.pika_driver.pika_poller.PikaPoller."
"_declare_queue_binding")
def test_message_processing_batch(self, declare_queue_binding_mock):
incoming_message_class_mock = mock.Mock()
n = 10
params = []
res = []
def on_incoming_callback(incoming):
res.append(incoming)
poller = pika_poller.PikaPoller(
self._pika_engine, n, None, self._prefetch_count,
incoming_message_class=incoming_message_class_mock
)
for i in range(n):
params.append((object(), object(), object(), object()))
poller.start(on_incoming_callback)
for i in range(n):
poller._on_message_with_ack_callback(
*params[i]
)
self.assertEqual(1, len(res))
self.assertEqual(10, len(res[0]))
self.assertEqual(n, incoming_message_class_mock.call_count)
for i in range(n):
self.assertEqual(incoming_message_class_mock.return_value,
res[0][i])
self.assertEqual(
(self._pika_engine, self._poller_channel_mock) + params[i][1:],
incoming_message_class_mock.call_args_list[i][0]
)
self.assertTrue(self._pika_engine.create_connection.called)
self.assertTrue(self._poller_connection_mock.channel.called)
self.assertTrue(declare_queue_binding_mock.called)
@mock.patch("oslo_messaging._drivers.pika_driver.pika_poller.PikaPoller."
"_declare_queue_binding")
def test_message_processing_batch_with_timeout(self,
declare_queue_binding_mock):
incoming_message_class_mock = mock.Mock()
n = 10
timeout = 1
res = []
evt = threading.Event()
def on_incoming_callback(incoming):
res.append(incoming)
evt.set()
poller = pika_poller.PikaPoller(
self._pika_engine, n, timeout, self._prefetch_count,
incoming_message_class=incoming_message_class_mock
)
params = []
success_count = 5
poller.start(on_incoming_callback)
for i in range(n):
params.append((object(), object(), object(), object()))
for i in range(success_count):
poller._on_message_with_ack_callback(
*params[i]
)
self.assertTrue(evt.wait(timeout * 2))
self.assertEqual(1, len(res))
self.assertEqual(success_count, len(res[0]))
self.assertEqual(success_count, incoming_message_class_mock.call_count)
for i in range(success_count):
self.assertEqual(incoming_message_class_mock.return_value,
res[0][i])
self.assertEqual(
(self._pika_engine, self._poller_channel_mock) + params[i][1:],
incoming_message_class_mock.call_args_list[i][0]
)
self.assertTrue(self._pika_engine.create_connection.called)
self.assertTrue(self._poller_connection_mock.channel.called)
self.assertTrue(declare_queue_binding_mock.called)
class RpcServicePikaPollerTestCase(unittest.TestCase):
def setUp(self):
self._pika_engine = mock.Mock()
self._poller_connection_mock = mock.Mock()
self._poller_channel_mock = mock.Mock()
self._poller_connection_mock.channel.return_value = (
self._poller_channel_mock
)
self._pika_engine.create_connection.return_value = (
self._poller_connection_mock
)
self._pika_engine.get_rpc_queue_name.side_effect = (
lambda topic, server, no_ack, worker=False:
"_".join([topic, str(server), str(no_ack), str(worker)])
)
self._pika_engine.get_rpc_exchange_name.side_effect = (
lambda exchange: exchange
)
self._prefetch_count = 123
self._target = mock.Mock(exchange="exchange", topic="topic",
server="server")
self._pika_engine.rpc_queue_expiration = 12345
@mock.patch("oslo_messaging._drivers.pika_driver.pika_message."
"RpcPikaIncomingMessage")
def test_declare_rpc_queue_bindings(self, rpc_pika_incoming_message_mock):
poller = pika_poller.RpcServicePikaPoller(
self._pika_engine, self._target, 1, None,
self._prefetch_count
)
poller.start(None)
self.assertTrue(self._pika_engine.create_connection.called)
self.assertTrue(self._poller_connection_mock.channel.called)
declare_queue_binding_by_channel_mock = (
self._pika_engine.declare_queue_binding_by_channel
)
self.assertEqual(
6, declare_queue_binding_by_channel_mock.call_count
)
declare_queue_binding_by_channel_mock.assert_has_calls((
mock.call(
channel=self._poller_channel_mock, durable=False,
exchange="exchange",
exchange_type='direct',
queue="topic_None_True_False",
queue_expiration=12345,
routing_key="topic_None_True_False"
),
mock.call(
channel=self._poller_channel_mock, durable=False,
exchange="exchange",
exchange_type='direct',
queue="topic_server_True_False",
queue_expiration=12345,
routing_key="topic_server_True_False"
),
mock.call(
channel=self._poller_channel_mock, durable=False,
exchange="exchange",
exchange_type='direct',
queue="topic_server_True_True",
queue_expiration=12345,
routing_key="topic_all_workers_True_False"
),
mock.call(
channel=self._poller_channel_mock, durable=False,
exchange="exchange",
exchange_type='direct',
queue="topic_None_False_False",
queue_expiration=12345,
routing_key="topic_None_False_False"
),
mock.call(
channel=self._poller_channel_mock, durable=False,
exchange="exchange",
exchange_type='direct',
queue="topic_server_False_False",
queue_expiration=12345,
routing_key='topic_server_False_False'
),
mock.call(
channel=self._poller_channel_mock, durable=False,
exchange="exchange",
exchange_type='direct',
queue="topic_server_False_True",
queue_expiration=12345,
routing_key='topic_all_workers_False_False'
)
))
class RpcReplyServicePikaPollerTestCase(unittest.TestCase):
def setUp(self):
self._pika_engine = mock.Mock()
self._poller_connection_mock = mock.Mock()
self._poller_channel_mock = mock.Mock()
self._poller_connection_mock.channel.return_value = (
self._poller_channel_mock
)
self._pika_engine.create_connection.return_value = (
self._poller_connection_mock
)
self._prefetch_count = 123
self._exchange = "rpc_reply_exchange"
self._queue = "rpc_reply_queue"
self._pika_engine.rpc_reply_retry_delay = 12132543456
self._pika_engine.rpc_queue_expiration = 12345
self._pika_engine.rpc_reply_retry_attempts = 3
def test_declare_rpc_reply_queue_binding(self):
poller = pika_poller.RpcReplyPikaPoller(
self._pika_engine, self._exchange, self._queue, 1, None,
self._prefetch_count,
)
poller.start(None)
poller.stop()
declare_queue_binding_by_channel_mock = (
self._pika_engine.declare_queue_binding_by_channel
)
self.assertEqual(
1, declare_queue_binding_by_channel_mock.call_count
)
declare_queue_binding_by_channel_mock.assert_called_once_with(
channel=self._poller_channel_mock, durable=False,
exchange='rpc_reply_exchange', exchange_type='direct',
queue='rpc_reply_queue', queue_expiration=12345,
routing_key='rpc_reply_queue'
)
class NotificationPikaPollerTestCase(unittest.TestCase):
def setUp(self):
self._pika_engine = mock.Mock()
self._poller_connection_mock = mock.Mock()
self._poller_channel_mock = mock.Mock()
self._poller_connection_mock.channel.return_value = (
self._poller_channel_mock
)
self._pika_engine.create_connection.return_value = (
self._poller_connection_mock
)
self._prefetch_count = 123
self._target_and_priorities = (
(
mock.Mock(exchange="exchange1", topic="topic1",
server="server1"), 1
),
(
mock.Mock(exchange="exchange1", topic="topic1"), 2
),
(
mock.Mock(exchange="exchange2", topic="topic2",), 1
),
)
self._pika_engine.notification_persistence = object()
def test_declare_notification_queue_bindings_default_queue(self):
poller = pika_poller.NotificationPikaPoller(
self._pika_engine, self._target_and_priorities, 1, None,
self._prefetch_count, None
)
poller.start(None)
self.assertTrue(self._pika_engine.create_connection.called)
self.assertTrue(self._poller_connection_mock.channel.called)
declare_queue_binding_by_channel_mock = (
self._pika_engine.declare_queue_binding_by_channel
)
self.assertEqual(
3, declare_queue_binding_by_channel_mock.call_count
)
declare_queue_binding_by_channel_mock.assert_has_calls((
mock.call(
channel=self._poller_channel_mock,
durable=self._pika_engine.notification_persistence,
exchange="exchange1",
exchange_type='direct',
queue="topic1.1",
queue_expiration=None,
routing_key="topic1.1"
),
mock.call(
channel=self._poller_channel_mock,
durable=self._pika_engine.notification_persistence,
exchange="exchange1",
exchange_type='direct',
queue="topic1.2",
queue_expiration=None,
routing_key="topic1.2"
),
mock.call(
channel=self._poller_channel_mock,
durable=self._pika_engine.notification_persistence,
exchange="exchange2",
exchange_type='direct',
queue="topic2.1",
queue_expiration=None,
routing_key="topic2.1"
)
))
def test_declare_notification_queue_bindings_custom_queue(self):
poller = pika_poller.NotificationPikaPoller(
self._pika_engine, self._target_and_priorities, 1, None,
self._prefetch_count, "custom_queue_name"
)
poller.start(None)
self.assertTrue(self._pika_engine.create_connection.called)
self.assertTrue(self._poller_connection_mock.channel.called)
declare_queue_binding_by_channel_mock = (
self._pika_engine.declare_queue_binding_by_channel
)
self.assertEqual(
3, declare_queue_binding_by_channel_mock.call_count
)
declare_queue_binding_by_channel_mock.assert_has_calls((
mock.call(
channel=self._poller_channel_mock,
durable=self._pika_engine.notification_persistence,
exchange="exchange1",
exchange_type='direct',
queue="custom_queue_name",
queue_expiration=None,
routing_key="topic1.1"
),
mock.call(
channel=self._poller_channel_mock,
durable=self._pika_engine.notification_persistence,
exchange="exchange1",
exchange_type='direct',
queue="custom_queue_name",
queue_expiration=None,
routing_key="topic1.2"
),
mock.call(
channel=self._poller_channel_mock,
durable=self._pika_engine.notification_persistence,
exchange="exchange2",
exchange_type='direct',
queue="custom_queue_name",
queue_expiration=None,
routing_key="topic2.1"
)
))
| 34.805383 | 79 | 0.619178 |
7957bee9e2d547669d8f4cccf3172443848683df | 399 | py | Python | Backend/open_garden/open_garden/wsgi.py | nananananate/open-garden-py | c0d729f99f3c73e43b22f373b54e170374f0d057 | [
"MIT"
] | 7 | 2020-10-02T22:08:48.000Z | 2022-01-29T17:09:25.000Z | Backend/open_garden/open_garden/wsgi.py | nananananate/open-garden-py | c0d729f99f3c73e43b22f373b54e170374f0d057 | [
"MIT"
] | 12 | 2020-09-30T21:49:06.000Z | 2020-11-01T06:52:49.000Z | Backend/open_garden/open_garden/wsgi.py | nananananate/open-garden-py | c0d729f99f3c73e43b22f373b54e170374f0d057 | [
"MIT"
] | 7 | 2020-10-01T04:07:23.000Z | 2022-01-29T17:09:26.000Z | """
WSGI config for open_garden project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "open_garden.settings")
application = get_wsgi_application()
| 23.470588 | 78 | 0.789474 |
7957bf64e90b18c27c69f1bab344da305b36b946 | 2,450 | py | Python | var/spack/repos/builtin/packages/tioga/package.py | klevzoff/spack | 396936d24173254ecf4148bc460702185e4c99e5 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2019-02-10T13:47:48.000Z | 2019-04-17T13:05:17.000Z | var/spack/repos/builtin/packages/tioga/package.py | klevzoff/spack | 396936d24173254ecf4148bc460702185e4c99e5 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 17 | 2019-03-21T15:54:00.000Z | 2022-03-29T19:34:28.000Z | var/spack/repos/builtin/packages/tioga/package.py | Kerilk/spack | e027942b55407a4a5fe323b93d8e57200c873a43 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2018-04-06T09:04:11.000Z | 2020-01-24T12:52:12.000Z | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import sys
from spack import *
class Tioga(CMakePackage, CudaPackage):
"""Topology Independent Overset Grid Assembly (TIOGA)"""
homepage = "https://github.com/jsitaraman/tioga"
git = "https://github.com/jsitaraman/tioga.git"
maintainers = ['jsitaraman', 'sayerhs']
version('develop', branch='exawind')
version('master', branch='master')
variant('shared', default=sys.platform != 'darwin',
description="Build shared libraries")
variant('pic', default=True,
description="Position independent code")
variant('nodegid', default=True,
description="Enable support for global Node IDs")
variant('timers', default=False,
description="Enable timers")
variant('stats', default=False,
description="Enable output of holecut stats")
variant('cxxstd', default='11',
values=('11', '14'), multi=False,
description="C++ standard to use")
depends_on('mpi')
depends_on('cuda@9.0.0:', when='+cuda')
# Tioga has the fortran module file problem with parallel builds
parallel = False
def cmake_args(self):
spec = self.spec
args = [
self.define_from_variant('BUILD_SHARED_LIBS', 'shared'),
self.define_from_variant('CMAKE_POSITION_INDEPENDENT_CODE', 'pic'),
self.define_from_variant('CMAKE_CXX_STANDARD', 'cxxstd'),
self.define_from_variant('TIOGA_HAS_NODEGID', 'nodegid'),
self.define_from_variant('TIOGA_ENABLE_TIMERS', 'timers'),
self.define_from_variant('TIOGA_OUTPUT_STATS', 'stats'),
self.define_from_variant('TIOGA_ENABLE_CUDA', 'cuda'),
]
if '+cuda' in self.spec:
args.append(self.define('CMAKE_CUDA_SEPARABLE_COMPILATION', True))
# Currently TIOGA only supports one device arch during specialization
cuda_arch = self.spec.variants['cuda_arch'].value
if cuda_arch:
arch_sorted = list(sorted(cuda_arch, reverse=True))
args.append(self.define('TIOGA_CUDA_SM', arch_sorted[0]))
if 'darwin' in spec.architecture:
args.append(self.define('CMAKE_MACOSX_RPATH', True))
return args
| 37.121212 | 81 | 0.647755 |
7957bf8c8d9186e9d93c7d17b03384f70ac3f03c | 7,234 | py | Python | vedaseg/models/utils/conv_module.py | E18301194/vedaseg | c62c8ea46dbba12f03262452dd7bed22969cfe4e | [
"Apache-2.0"
] | 2 | 2020-07-15T02:36:46.000Z | 2021-03-08T03:18:26.000Z | vedaseg/models/utils/conv_module.py | E18301194/vedaseg | c62c8ea46dbba12f03262452dd7bed22969cfe4e | [
"Apache-2.0"
] | null | null | null | vedaseg/models/utils/conv_module.py | E18301194/vedaseg | c62c8ea46dbba12f03262452dd7bed22969cfe4e | [
"Apache-2.0"
] | 1 | 2021-09-16T09:40:12.000Z | 2021-09-16T09:40:12.000Z | # modify from mmcv and mmdetection
import warnings
import torch.nn as nn
from .norm import build_norm_layer
from .act import build_act_layer
from .registry import UTILS
conv_cfg = {
'Conv': nn.Conv2d,
# TODO: octave conv
}
def build_conv_layer(cfg, *args, **kwargs):
""" Build convolution layer
Args:
cfg (None or dict): cfg should contain:
type (str): identify conv layer type.
layer args: args needed to instantiate a conv layer.
Returns:
layer (nn.Module): created conv layer
"""
assert isinstance(cfg, dict) and 'type' in cfg
cfg_ = cfg.copy()
layer_type = cfg_.pop('type')
if layer_type not in conv_cfg:
raise KeyError('Unrecognized norm type {}'.format(layer_type))
else:
conv_layer = conv_cfg[layer_type]
layer = conv_layer(*args, **kwargs, **cfg_)
return layer
@UTILS.register_module
class ConvModule(nn.Module):
"""A conv block that contains conv/norm/activation layers.
Args:
in_channels (int): Same as nn.Conv2d.
out_channels (int): Same as nn.Conv2d.
kernel_size (int or tuple[int]): Same as nn.Conv2d.
stride (int or tuple[int]): Same as nn.Conv2d.
padding (int or tuple[int]): Same as nn.Conv2d.
dilation (int or tuple[int]): Same as nn.Conv2d.
groups (int): Same as nn.Conv2d.
bias (bool or str): If specified as `auto`, it will be decided by the
norm_cfg. Bias will be set as True if norm_cfg is None, otherwise
False.
conv_cfg (dict): Config dict for convolution layer.
norm_cfg (dict): Config dict for normalization layer.
act_cfg (str or None): Config dict for activation layer.
order (tuple[str]): The order of conv/norm/activation layers. It is a
sequence of "conv", "norm" and "act". Examples are
("conv", "norm", "act") and ("act", "conv", "norm").
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias='auto',
conv_cfg=dict(type='Conv'),
norm_cfg=None,
act_cfg=dict(type='Relu', inplace=True),
order=('conv', 'norm', 'act'),
dropout=None):
super(ConvModule, self).__init__()
assert isinstance(conv_cfg, dict)
assert norm_cfg is None or isinstance(norm_cfg, dict)
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.order = order
assert isinstance(self.order, tuple) and len(self.order) == 3
assert set(order) == set(['conv', 'norm', 'act'])
self.with_norm = norm_cfg is not None
self.with_act = act_cfg is not None
self.with_dropout = dropout is not None
# if the conv layer is before a norm layer, bias is unnecessary.
if bias == 'auto':
bias = False if self.with_norm else True
self.with_bias = bias
if self.with_norm and self.with_bias:
warnings.warn('ConvModule has norm and bias at the same time')
# build convolution layer
self.conv = build_conv_layer(conv_cfg,
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
# export the attributes of self.conv to a higher level for convenience
self.in_channels = self.conv.in_channels
self.out_channels = self.conv.out_channels
self.kernel_size = self.conv.kernel_size
self.stride = self.conv.stride
self.padding = self.conv.padding
self.dilation = self.conv.dilation
self.transposed = self.conv.transposed
self.output_padding = self.conv.output_padding
self.groups = self.conv.groups
# build normalization layers
if self.with_norm:
# norm layer is after conv layer
if order.index('norm') > order.index('conv'):
norm_channels = out_channels
else:
norm_channels = in_channels
self.norm_name, norm = build_norm_layer(norm_cfg, norm_channels)
self.add_module(self.norm_name, norm)
# build activation layer
if self.with_act:
# activate layer is after conv layer
if order.index('act') > order.index('conv'):
act_channels = out_channels
else:
act_channels = in_channels
self.act_name, act = build_act_layer(act_cfg, act_channels)
self.add_module(self.act_name, act)
if self.with_dropout:
self.dropout = nn.Dropout2d(p=dropout)
@property
def norm(self):
return getattr(self, self.norm_name)
@property
def activate(self):
return getattr(self, self.act_name)
def forward(self, x, activate=True, norm=True):
for layer in self.order:
if layer == 'conv':
x = self.conv(x)
elif layer == 'norm' and norm and self.with_norm:
x = self.norm(x)
elif layer == 'act' and activate and self.with_act:
x = self.activate(x)
if self.with_dropout:
x = self.dropout(x)
return x
@UTILS.register_module
class ConvModules(nn.Module):
"""Head
Args:
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias='auto',
conv_cfg=dict(type='Conv'),
norm_cfg=None,
act_cfg=dict(type='Relu', inplace=True),
order=('conv', 'norm', 'act'),
dropouts=None,
num_convs=1):
super().__init__()
if dropouts is not None:
assert num_convs == len(dropouts)
dropout = dropouts[0]
else:
dropout = None
layers = [
ConvModule(in_channels, out_channels, kernel_size, stride, padding,
dilation, groups, bias, conv_cfg, norm_cfg, act_cfg,
order, dropout),
]
for ii in range(1, num_convs):
if dropouts is not None:
dropout = dropouts[ii]
else:
dropout = None
layers.append(
ConvModule(out_channels, out_channels, kernel_size, stride,
padding, dilation, groups, bias, conv_cfg, norm_cfg, act_cfg,
order, dropout))
self.block = nn.Sequential(*layers)
def forward(self, x):
feat = self.block(x)
return feat
| 33.962441 | 88 | 0.545065 |
7957bfdafbb2e97e18b76a15e3314af9f0bf1aef | 656 | py | Python | Tests/JsonProcTest.py | RippeR37/PyPong | 601db4346f7c27c88226ce79317008941cbc5754 | [
"MIT"
] | 1 | 2018-12-06T06:16:49.000Z | 2018-12-06T06:16:49.000Z | Tests/JsonProcTest.py | RippeR37/PyPong | 601db4346f7c27c88226ce79317008941cbc5754 | [
"MIT"
] | 10 | 2016-01-07T19:22:44.000Z | 2016-01-10T14:32:37.000Z | Tests/JsonProcTest.py | RippeR37/PyPong | 601db4346f7c27c88226ce79317008941cbc5754 | [
"MIT"
] | null | null | null | from Systems.Network.Messages.JsonProc import JsonProc
import unittest
import json
class JsonProcTest(unittest.TestCase):
def test_constructor(self):
proc = "proc_name"
jp = JsonProc(proc)
self.assertEqual(jp.proc, proc)
def test_to_json(self):
proc = "proc_name"
jp = JsonProc(proc)
jp_json = jp.to_json()
self.assertEqual(jp_json, '{"proc": "proc_name"}')
def test_to_json_from_json(self):
proc = "proc_name"
jp = JsonProc(proc)
jp_json = jp.to_json()
jp2 = JsonProc("").from_json(json.loads(jp_json))
self.assertEqual(jp.proc, jp2.proc)
| 23.428571 | 58 | 0.629573 |
7957c111fda9814e04db2b546ea80f028575d9ae | 936 | py | Python | access360/api/model/payment/paymentModel.py | Kokoserver/ac | 8226a7e212aef60c0903204ca2b0602fef06f133 | [
"MIT"
] | null | null | null | access360/api/model/payment/paymentModel.py | Kokoserver/ac | 8226a7e212aef60c0903204ca2b0602fef06f133 | [
"MIT"
] | null | null | null | access360/api/model/payment/paymentModel.py | Kokoserver/ac | 8226a7e212aef60c0903204ca2b0602fef06f133 | [
"MIT"
] | null | null | null | from datetime import datetime
from mongoengine import *
class Invoice(Document):
userId = ObjectIdField(required=True)
courseList = ListField(ObjectIdField(), required=True)
total_price = DecimalField(default=0, required=True)
paymentStatus = StringField(choices=("success", "pending", "failed"), required=True)
issued_date = DateField(default=datetime.utcnow)
def to_json(self, *args, **kwargs):
return {
"userid":str(self.userId),
"course_List":self.courseList,
"course_count":len(self.courseList),
"paymentStatus":self.paymentStatus,
"issued_data":str(self.issued_date)
}
@queryset_manager
def getUserInvoice(doc_cls, queryset, userId) -> dict:
userInvoice = queryset.filter(userId=userId).all()
if userInvoice:
return userInvoice
return False
class Payment(Document):
pass
| 26 | 88 | 0.655983 |
7957c220e3eb1147a1407779a291f94997729e47 | 31,713 | py | Python | main.py | yukuzntcva/Denoising-drone-rotors | 0122b020fc959dd3869b3863989fee3520aede73 | [
"MIT"
] | 19 | 2018-10-02T02:11:16.000Z | 2021-05-14T12:39:38.000Z | main.py | yukuzntcva/Denoising-drone-rotors | 0122b020fc959dd3869b3863989fee3520aede73 | [
"MIT"
] | 1 | 2018-10-23T02:43:18.000Z | 2018-10-23T02:43:18.000Z | main.py | yukuzntcva/Denoising-drone-rotors | 0122b020fc959dd3869b3863989fee3520aede73 | [
"MIT"
] | 5 | 2019-04-11T13:40:31.000Z | 2020-07-19T14:44:12.000Z | import argparse
import csv
import logging
import os
import sys
from ast import literal_eval
from datetime import datetime
import time
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from torch.optim.lr_scheduler import MultiStepLR
from tqdm import tqdm, trange
from collections import OrderedDict
import actquant
import models
from clr import CyclicLR # Until it will be included in official PyTorch release
from data import get_dataset
from logger import CsvLogger
from preprocess import get_transform
from utils.log import save_checkpoint
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ConvNet Training')
parser.add_argument('--results_dir', metavar='RESULTS_DIR', default='./results', help='results dir')
parser.add_argument('--datapath', metavar='DATA_PATH', default='./results', help='datasets dir')
parser.add_argument('--save', metavar='SAVE', default='', help='saved folder')
parser.add_argument('--dataset', metavar='DATASET', default='imagenet', help='dataset name or folder')
parser.add_argument('--model', '-a', metavar='MODEL', default='alexnet', choices=model_names,
help='model architecture: ' + ' | '.join(model_names) + ' (default: alexnet)')
parser.add_argument('--input_size', type=int, default=None, help='image input size')
parser.add_argument('--model_config', default='', help='additional architecture configuration')
parser.add_argument('--type', default='float32', help='type of tensor - e.g float16')
parser.add_argument('--gpus', default='0', help='gpus used for training - e.g 0,1,3')
parser.add_argument('-j', '--workers', default=8, type=int, metavar='N',
help='number of data loading workers (default: 8)')
parser.add_argument('--epochs', default=200, type=int, metavar='N', help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N', help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int, metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--optimizer', default='SGD', type=str, metavar='OPT', help='optimizer function used')
parser.add_argument('--print-freq', '-p', default=10, type=int, metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', type=str, metavar='FILE', help='evaluate model FILE on validation set')
parser.add_argument('--no-quantization', action='store_true', default=False, help='disables quantization')
parser.add_argument('--no-noise', action='store_true', default=False, help='noise')
parser.add_argument('--bitwidth', default=32, type=int, metavar='N', help='Quantization bitwidth (default: 5)')
parser.add_argument('--scale', default=1, type=float, metavar='N', help='scale of MobileNet2')
parser.add_argument('--step', default=2, type=int, metavar='N',
help='portion of net to be quantized at second stage(default: 2)')
parser.add_argument('--depth', default=18, type=int, metavar='N', help='depth of the model(default: 18)')
parser.add_argument('--act-bitwidth', default=32, type=int, metavar='N',
help='Quantization activation bitwidth (default: 5)')
parser.add_argument('--no-act-quantization', action='store_true', default=False, help='disables quantization')
parser.add_argument('--start-from-zero', action='store_true', default=False, help='Start from epoch 0')
parser.add_argument('--no-quant-edges', action='store_true', default=False,
help='no quantization for first and last layers')
#parser.add_argument('--step-setup', default='15,9', help='start of steps and interval- e.g 0,1')
parser.add_argument('--quant_start_stage', default=0, type=int, metavar='N', help='from which level of quant to start')
parser.add_argument('--quant_epoch_step', type=float, default=1.0, help='hot often to change state of quant')
# CLR
parser.add_argument('--clr', dest='clr', action='store_true', help='Use CLR')
parser.add_argument('--min-lr', type=float, default=1e-5, help='Minimal LR for CLR.')
parser.add_argument('--max-lr', type=float, default=1, help='Maximal LR for CLR.')
parser.add_argument('--epochs-per-step', type=int, default=20,
help='Number of epochs per step in CLR, recommended to be between 2 and 10.')
parser.add_argument('--mode', default='triangular2', help='CLR mode. One of {triangular, triangular2, exp_range}')
parser.add_argument('--find-clr', dest='find_clr', action='store_true',
help='Run search for optimal LR in range (min_lr, max_lr)')
# Optimization options
parser.add_argument('--learning_rate', '-lr', type=float, default=0.1, help='The learning rate.')
parser.add_argument('--momentum', '-m', type=float, default=0.9, help='Momentum.')
parser.add_argument('--decay', '-d', type=float, default=4e-5, help='Weight decay (L2 penalty).')
parser.add_argument('--gamma', type=float, default=0.1, help='LR is multiplied by gamma at scheduled epochs.')
parser.add_argument('--schedule', type=int, nargs='+', default=[81, 122, 164],
help='Decrease learning rate at these epochs.')
parser.add_argument('--val_batch_size', default=64, type=int , help='val mini-batch size (default: 64)')
# NICE
parser.add_argument('--param-std-cutoff', type=float, default=3, help='how many std to include before cutoff')
parser.add_argument('--quant-dataloader', action='store_true', default=False, help='Load quantized data loader')
parser.add_argument('-sb', '--act_stats_batch_size', default=64, type=int, metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--no_pre_process_normalize', action='store_true', default=False, help='normalize in the preprocess')
parser.add_argument('--noise_mask', type=float, default=0.05, help='Probability to add noise')
clamp_stats_dict = {}
cos_loss_dict = {}
def load_model(model, checkpoint):
new_state_dict = OrderedDict()
for k, v in checkpoint['state_dict'].items():
name = k if k[0:7] != 'module.' else k[7:] # remove `module. if needed (happen when the model created with DataParallel
new_state_dict[name] = v if v.dim() > 1 or 'num_batches_tracked' in name else v*v.new_ones(1)
# load params
model.load_state_dict(new_state_dict, strict=False) #strict false in case the loaded doesn't have alll variables like running mean
def main():
torch.manual_seed(1)
torch.cuda.manual_seed_all(1)
global args, best_prec1
best_prec1 = 0
args = parser.parse_args()
time_stamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
if args.evaluate:
args.results_dir = '/tmp'
if args.save is '':
args.save = time_stamp
save_path = os.path.join(args.results_dir, args.save)
if not os.path.exists(save_path):
os.makedirs(save_path)
args.noise = not args.no_noise
args.quant = not args.no_quantization
args.act_quant = not args.no_act_quantization
args.quant_edges = not args.no_quant_edges
logging.info("saving to %s", save_path)
logging.debug("run arguments: %s", args)
if args.gpus is not None:
args.gpus = [int(i) for i in args.gpus.split(',')]
device = 'cuda:' + str(args.gpus[0])
cudnn.benchmark = True
else:
device = 'cpu'
dtype = torch.float32
args.step_setup = None
model = models.__dict__[args.model]
model_config = {'scale': args.scale, 'input_size': args.input_size, 'dataset': args.dataset,
'bitwidth': args.bitwidth, 'quantize': args.quant, 'noise': args.noise, 'step': args.step,
'depth': args.depth, 'act_bitwidth': args.act_bitwidth, 'act_quant': args.act_quant,
'quant_edges': args.quant_edges, 'step_setup': args.step_setup,
'quant_epoch_step': args.quant_epoch_step, 'quant_start_stage': args.quant_start_stage,
'normalize': args.no_pre_process_normalize,
'noise_mask': args.noise_mask}
if args.model_config is not '':
model_config = dict(model_config, **literal_eval(args.model_config))
# create model
model = model(**model_config)
logging.info("creating model %s", args.model)
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
print("number of parameters: ", params)
logging.info("created model with configuration: %s", model_config)
print(model)
data = None
checkpoint_epoch=0
# optionally resume from a checkpoint
if args.evaluate:
if not os.path.isfile(args.evaluate):
parser.error('invalid checkpoint: {}'.format(args.evaluate))
checkpoint = torch.load(args.evaluate, map_location=device)
load_model(model, checkpoint)
logging.info("loaded checkpoint '%s' (epoch %s)",
args.evaluate, checkpoint['epoch'])
print("loaded checkpoint {0} (epoch {1})".format(args.evaluate, checkpoint['epoch']))
elif args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume, map_location=device)
if not args.start_from_zero:
args.start_epoch = checkpoint['epoch'] - 1
best_test = checkpoint['best_prec1']
checkpoint_epoch = checkpoint['epoch']
load_model(model, checkpoint)
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
elif os.path.isdir(args.resume):
checkpoint_path = os.path.join(args.resume, 'checkpoint.pth.tar')
csv_path = os.path.join(args.resume, 'results.csv')
print("=> loading checkpoint '{}'".format(checkpoint_path))
checkpoint = torch.load(checkpoint_path, map_location=device)
best_test = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})".format(checkpoint_path, checkpoint['epoch']))
data = []
with open(csv_path) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
data.append(row)
else:
print("=> no checkpoint found at '{}'".format(args.resume))
if args.gpus is not None:
model = torch.nn.DataParallel(model, [args.gpus[0]]) # Statistics need to be calculated on single GPU to be consistant with data among multiplr GPUs
# Data loading code
default_transform = {
'train': get_transform(args.dataset, input_size=args.input_size, augment=True,
integer_values=args.quant_dataloader, norm=not args.no_pre_process_normalize),
'eval': get_transform(args.dataset, input_size=args.input_size, augment=False,
integer_values=args.quant_dataloader, norm=not args.no_pre_process_normalize)
}
transform = getattr(model.module, 'input_transform', default_transform)
val_data = get_dataset(args.dataset, 'val', transform['eval'], datasets_path=args.datapath)
val_loader = torch.utils.data.DataLoader(
val_data,
batch_size=args.val_batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
train_data = get_dataset(args.dataset, 'train', transform['train'], datasets_path=args.datapath)
train_loader = torch.utils.data.DataLoader(
train_data,
batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
statistics_train_loader = torch.utils.data.DataLoader(
train_data,
batch_size=args.act_stats_batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), args.learning_rate, momentum=args.momentum, weight_decay=args.decay,
nesterov=True)
model, criterion = model.to(device, dtype), criterion.to(device, dtype)
if args.clr:
scheduler = CyclicLR(optimizer, base_lr=args.min_lr, max_lr=args.max_lr,
step_size=args.epochs_per_step * len(train_loader), mode=args.mode)
else:
scheduler = MultiStepLR(optimizer, milestones=args.schedule, gamma=args.gamma)
csv_logger = CsvLogger(filepath=save_path, data=data)
csv_logger.save_params(sys.argv, args)
csv_logger_training_stats = os.path.join(save_path, 'training_stats.csv')
# pre-training activation and parameters statistics calculation ####
if check_if_need_to_collect_statistics(model):
for layer in model.modules():
if isinstance(layer, actquant.ActQuantBuffers):
layer.pre_training_statistics = True # Turn on pre-training activation statistics calculation
model.module.statistics_phase = True
validate(statistics_train_loader, model, criterion, device, epoch=0, num_of_batches=80, stats_phase=True) # Run validation on training set for statistics
model.module.quantize.get_act_max_value_from_pre_calc_stats(list(model.modules()))
_ = model.module.quantize.set_weight_basis(list(model.modules()), None)
for layer in model.modules():
if isinstance(layer, actquant.ActQuantBuffers):
layer.pre_training_statistics = False # Turn off pre-training activation statistics calculation
model.module.statistics_phase = False
else: # Maximal activation values still need to be derived from loaded stats
model.module.quantize.assign_act_clamp_during_val(list(model.modules()), print_clamp_val=True)
model.module.quantize.assign_weight_clamp_during_val(list(model.modules()), print_clamp_val=True)
# model.module.quantize.get_act_max_value_from_pre_calc_stats(list(model.modules()))
if args.gpus is not None: # Return to Multi-GPU after statistics calculations
model = torch.nn.DataParallel(model.module, args.gpus)
model, criterion = model.to(device, dtype), criterion.to(device, dtype)
# pre-training activation statistics calculation ####
if args.evaluate:
val_loss, val_prec1, val_prec5 = validate(val_loader, model, criterion, device, epoch=0)
print("val_prec1: ", val_prec1)
return
# fast forward to curr stage
for i in range(args.quant_start_stage):
model.module.switch_stage(0)
for epoch in trange(args.start_epoch, args.epochs + 1):
if not isinstance(scheduler, CyclicLR):
scheduler.step()
# scheduler.optimizer = optimizer
train_loss, train_prec1, train_prec5 = train(
train_loader, model, criterion, device, epoch, optimizer, scheduler,
training_stats_logger=csv_logger_training_stats)
for layer in model.modules():
if isinstance(layer, actquant.ActQuantBuffers):
layer.print_clamp()
# evaluate on validation set
val_loss, val_prec1, val_prec5 = validate(
val_loader, model, criterion, device, epoch)
# remember best prec@1 and save checkpoint
is_best = val_prec1 > best_prec1
best_prec1 = max(val_prec1, best_prec1)
save_checkpoint({
'epoch': epoch + 1,
'model': args.model,
'config': args.model_config,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'layers_b_dict': model.module.layers_b_dict #TODO this doesn't work for multi gpu - need to del
}, is_best, path=save_path)
# New type of logging
csv_logger.write({'epoch': epoch + 1, 'val_error1': 1 - val_prec1, 'val_error5': 1 - val_prec5,
'val_loss': val_loss, 'train_error1': 1 - train_prec1,
'train_error5': 1 - train_prec5, 'train_loss': train_loss})
csv_logger.plot_progress(title=args.model+str(args.depth))
csv_logger.write_text('Epoch {}: Best accuracy is {:.2f}% top-1'.format(epoch + 1, best_prec1 * 100.))
def check_if_need_to_collect_statistics(model):
for layer in model.modules():
# for layer in model.module.layers_list():
if isinstance(layer, actquant.ActQuantBuffers):
if hasattr(layer, 'running_std') and float(layer.running_std) != 0:
return False
return True
def forward(data_loader, model, criterion, device, epoch=0, num_of_batches=None, training=True, optimizer=None,
scheduler=None, training_stats_logger=None, stats_phase=False):
correct1, correct5 = 0, 0
print_correct_1 , print_correct_5 = 0, 0
print_batch_counter = 0
quant_stage_counter = 0
quant_stage_correct_1 = 0
t = time.time()
for batch_idx, (inputs, target) in enumerate(tqdm(data_loader)):
if num_of_batches:
if batch_idx > num_of_batches: # Debug
break
if isinstance(scheduler, CyclicLR):
scheduler.batch_step()
inputs, target = inputs.to(device=device), target.to(device=device)
if (training):
epoch_progress = epoch + batch_idx/len(data_loader)
stage_switch = model.module.switch_stage(epoch_progress)
if stage_switch:
quant_stage_counter = 0
quant_stage_correct_1 = 0
# compute output
output = model(inputs)
loss = criterion(output, target)
if type(output) is list:
output = output[0]
# measure accuracy and record loss
corr = correct(output, target, topk=(1, 5))
correct1 += corr[0]
correct5 += corr[1]
if training:
# compute gradient
optimizer.zero_grad()
loss.backward()
optimizer.step()
quant_stage_correct_1 += corr[0]
print_correct_1 += corr[0]
print_correct_5 += corr[1]
print_batch_counter += output.shape[0]
quant_stage_counter += output.shape[0]
if batch_idx % args.print_freq == 0:
if stats_phase:
tqdm.write('Stats phase : [{}/{} ({:.0f}%)]\tLoss: {:.6f}. Top-1 accuracy: {:.2f}%({:.2f}%). '
'Top-5 accuracy: '
'{:.2f}%({:.2f}%).'.format(batch_idx, len(data_loader),
100. * batch_idx / len(data_loader), loss.item(),
100. * print_correct_1 / print_batch_counter,
100. * correct1 / (args.act_stats_batch_size * (batch_idx + 1)),
100. * print_correct_5 / print_batch_counter,
100. * correct5 / (args.act_stats_batch_size * (batch_idx + 1))))
elif training:
tqdm.write('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}. Top-1 accuracy: {:.2f}%({:.2f}%). '
'Top-5 accuracy: {:.2f}%({:.2f}%). \t'
'lr: {:.2e}.'.format(epoch, batch_idx, len(data_loader),
100. * batch_idx / len(data_loader), loss.item(),
100. * print_correct_1 / print_batch_counter,
100. * correct1 / (args.batch_size * (batch_idx + 1)),
100. * print_correct_5 / print_batch_counter,
100. * correct5 / (args.batch_size * (batch_idx + 1)),
scheduler.get_lr()[0] if scheduler is not None else 0))
dur = time.time() - t
with open(training_stats_logger, 'a') as f: #TODO add title
f.write('{},{},{},{},{},{},{},{},{},{},{},{},{} \n'.format(epoch, batch_idx, len(data_loader),
epoch * len(data_loader) + batch_idx,
100. * batch_idx / len(data_loader), loss.item(),
100. * print_correct_1 / print_batch_counter,
100. * correct1 / (args.batch_size * (batch_idx + 1)),
100. * print_correct_5 / print_batch_counter,
100. * correct5 / (args.batch_size * (batch_idx + 1)),
scheduler.get_lr()[0] if scheduler is not None else 0,
dur ,
100. * quant_stage_correct_1 / quant_stage_counter,
)
)
else:
tqdm.write('Validation Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}. Top-1 accuracy: {:.2f}%({:.2f}%). '
'Top-5 accuracy: '
'{:.2f}%({:.2f}%).'.format(epoch, batch_idx, len(data_loader),
100. * batch_idx / len(data_loader), loss.item(),
100. * print_correct_1 / print_batch_counter,
100. * correct1 / (args.val_batch_size * (batch_idx + 1)),
100. * print_correct_5 / print_batch_counter,
100. * correct5 / (args.val_batch_size * (batch_idx + 1))))
print_correct_1, print_correct_5 = 0 , 0
print_batch_counter = 0
return loss.item(), correct1 / len(data_loader.dataset), correct5 / len(data_loader.dataset)
def train(data_loader, model, criterion, device, epoch, optimizer, scheduler,
training_stats_logger=None, num_of_batches=None):
# switch to train mode
model.train()
return forward(data_loader, model, criterion, device, epoch, training=True, optimizer=optimizer,
scheduler=scheduler, training_stats_logger=training_stats_logger,num_of_batches=num_of_batches)
def validate(data_loader, model, criterion, device, epoch, num_of_batches=None, stats_phase=False):
# switch to evaluate mode
model.eval()
return forward(data_loader, model, criterion, device, epoch, num_of_batches=num_of_batches,
training=False, optimizer=None, scheduler=None, stats_phase=stats_phase)
# TODO: separate file
def correct(output, target, topk=(1,)):
"""Computes the correct@k for the specified values of k"""
maxk = max(topk)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t().type_as(target)
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0).item()
res.append(correct_k)
return res
def plot_bn_statistic(model):
# plot histogram
i = 0
for m in model.module.modules():
if isinstance(m, torch.nn.BatchNorm2d):
for p in m._parameters:
if m._parameters[p] is not None:
gaussian_numbers = m._parameters[p].view(-1).cpu().detach().numpy()
plt.hist(gaussian_numbers, bins=256)
file_name = p + '_layer_' + str(i)
directory = './plot_stats'
if not os.path.isdir(directory):
os.mkdir(directory)
file_name = os.path.join(directory, file_name + '.png')
plt.savefig(file_name)
plt.close()
for b in m._buffers:
if m._buffers[b] is not None:
gaussian_numbers = m._buffers[b].view(-1).cpu().detach().numpy()
plt.hist(gaussian_numbers, bins=256)
file_name = b + '_layer_' + str(i)
directory = './plot_stats'
if not os.path.isdir(directory):
os.mkdir(directory)
file_name = os.path.join(directory, file_name + '.png')
plt.savefig(file_name)
plt.close()
i += 1
def migrate_models(model, target_model, best_epoch, model_name='marvis_mobilenet_multi_gpu'):
"""
This code snnipet is meant to adapt pre-trained model to a new model containing buffers
"""
module_list = [m for m in list(model.modules()) if isinstance(m, torch.nn.Conv2d) or
isinstance(m, torch.nn.Linear) or isinstance(m, torch.nn.BatchNorm2d)]
if args.gpus is not None:
target_model = torch.nn.DataParallel(target_model, args.gpus)
target_module_list = [m for m in list(target_model.modules()) if isinstance(m, torch.nn.Conv2d) or
isinstance(m, torch.nn.Linear) or isinstance(m, torch.nn.BatchNorm2d)]
for idx, m in enumerate(module_list):
for p in m._parameters:
if m._parameters[p] is not None:
target_module_list[idx]._parameters[p].data = m._parameters[p].data.clone()
for b in m._buffers: # For batchnorm stats
if m._buffers[b] is not None:
target_module_list[idx]._buffers[b].data = m._buffers[b].data.clone()
save_dir = os.path.join('./trained_models', model_name)
if not os.path.isdir(save_dir):
os.mkdir(save_dir)
save_checkpoint({
'epoch': best_epoch,
'model': args.model,
'config': args.model_config,
'state_dict': target_model.state_dict(),
'best_prec1': best_epoch
}, True, path=save_dir)
def gather_clamp_statistic(model):
act_layer_num = 0
conv_linear_layer_num = 0
# Activation clamp are taken from the model itself
for layer in list(model.modules()):
if isinstance(layer, actquant.ActQuantBuffers):
layer_name = 'Activation_{}_clamp_val'.format(act_layer_num)
if layer.clamp_val.data is not None:
if layer_name not in clamp_stats_dict:
clamp_stats_dict[layer_name] = []
clamp_stats_dict[layer_name].append(layer.clamp_val.data.item())
else:
clamp_stats_dict[layer_name].append(layer.clamp_val.data.item())
act_layer_num += 1
for layer in list(model.modules()):
if isinstance(layer, torch.nn.Conv2d) or isinstance(layer, torch.nn.Linear):
for p in layer._parameters:
if layer._parameters[p] is not None:
if p == 'layer_basis':
layer_name = 'Conv_Fc_{}_clamp_val'.format(conv_linear_layer_num)
lb = layer._parameters[p]
if lb is not None:
clamp_val = (2 ** (args.bitwidth - 1) - 1) * lb * layer.layer_b
if layer_name not in clamp_stats_dict:
clamp_stats_dict[layer_name] = []
clamp_stats_dict[layer_name].append(clamp_val.item())
else:
clamp_stats_dict[layer_name].append(clamp_val.item())
conv_linear_layer_num += 1
def plot_clamp_statistic(stats_dict, save_path):
# plot histogram
for k, v in stats_dict.items():
epoch = len(stats_dict[k])
plt.plot(list(range(epoch)), v,'.')
file_name = k
directory = os.path.join(save_path, 'clamp_plot_stats')
# directory = 'clamp_plot_stats'
if not os.path.isdir(directory):
os.mkdir(directory)
file_name = os.path.join(directory, file_name + '.png')
plt.savefig(file_name)
plt.close()
def plot_clamp_statistic_from_file(dict_file, act_layers_list, save_path):
plt.figure()
file_name = os.path.join(save_path,'unified_activation_clamp.png')
stats_dict = np.load(dict_file)
dict_keys = list(stats_dict.item().keys())
for layer in act_layers_list:
act_vals = stats_dict.item()[dict_keys[layer]]
epoch = len(act_vals)
plt.plot(list(range(epoch)), act_vals)
plt.xlabel('epoch')
plt.ylabel('Clamp Value')
plt.savefig(file_name)
plt.show()
def plot_cos_loss(stats_dict, save_path):
for k, v in stats_dict.items():
epoch = len(stats_dict[k])
plt.plot(list(range(epoch)), v,'.')
file_name = k
directory = os.path.join(save_path, 'cos_loss')
if not os.path.isdir(directory):
os.mkdir(directory)
file_name = os.path.join(directory, file_name + '.png')
plt.savefig(file_name)
plt.close()
def gather_cos_loss(model):
num_layers = len(model.module.quantize.cosine_sim_loss)
total_cosine_loss=0
layer_num = 0
for layer, cos_loss in model.module.quantize.cosine_sim_loss.items():
total_cosine_loss += cos_loss
layer_string = "cos_loss_layer_{}".format(layer_num)
if layer_string not in cos_loss_dict:
cos_loss_dict[layer_string] = []
cos_loss_dict[layer_string].append(cos_loss)
else:
cos_loss_dict[layer_string].append(cos_loss)
layer_num += 1
if 'total_cosine_loss' not in cos_loss_dict:
cos_loss_dict['total_cosine_loss'] = []
cos_loss_dict['total_cosine_loss'].append(total_cosine_loss/num_layers)
else:
cos_loss_dict['total_cosine_loss'].append(total_cosine_loss/num_layers)
return
def plot_act_quant_error_statistic(model, save_path):
for layer in model.module.modules():
# if isinstance(m, torch.nn.Conv2d) or isinstance(m, torch.nn.Linear):
if isinstance(layer, actquant.ActQuantBuffers):
i = layer.layer_num
plt.hist(layer.quant_error, bins=256)
file_name = 'layer_' + str(i)
directory = os.path.join(save_path, 'act_quant_error_stats')
if not os.path.isdir(directory):
os.mkdir(directory)
file_name = os.path.join(directory, file_name + '.png')
plt.savefig(file_name)
plt.close()
return
def plot_weight_quant_error_statistic(model, save_path):
i = 0
for layer, stats in model.module.quantize.quant_error.items():
if isinstance(layer, torch.nn.Conv2d) or isinstance(layer, torch.nn.Linear):
plt.hist(np.concatenate(stats), bins=256)
file_name = 'layer_' + str(i)
directory = os.path.join(save_path, 'weight_quant_error_stats')
if not os.path.isdir(directory):
os.mkdir(directory)
full_path = os.path.join(directory, file_name + '.png')
plt.savefig(full_path)
plt.close()
i += 1
return
if __name__ == '__main__':
main()
| 46.228863 | 162 | 0.61634 |
7957c2823e86b078434e2c853af14fbd9e713d31 | 1,803 | py | Python | test/test_command_graph.py | Fuhrmann/qtile | 9760feed411582eeb0379afe7b4b54f73220c1ad | [
"MIT"
] | null | null | null | test/test_command_graph.py | Fuhrmann/qtile | 9760feed411582eeb0379afe7b4b54f73220c1ad | [
"MIT"
] | null | null | null | test/test_command_graph.py | Fuhrmann/qtile | 9760feed411582eeb0379afe7b4b54f73220c1ad | [
"MIT"
] | null | null | null | import pytest
from libqtile.command_graph import CommandGraphCall, CommandGraphObject, CommandGraphRoot
def test_root_path():
node = CommandGraphRoot()
assert node.selectors == []
assert node.selector is None
assert node.parent is None
def test_resolve_nodes():
root_node = CommandGraphRoot()
node_1 = root_node.navigate("layout", None) \
.navigate("screen", None)
assert node_1.selectors == [("layout", None), ("screen", None)]
assert isinstance(node_1, CommandGraphObject)
node_2 = node_1.navigate("layout", None) \
.navigate("window", None) \
.navigate("group", None)
assert node_2.selectors == [
("layout", None), ("screen", None), ("layout", None), ("window", None), ("group", None)
]
assert isinstance(node_2, CommandGraphObject)
with pytest.raises(KeyError, match="Given node is not an object"):
node_1.navigate("widget", None)
def test_resolve_selections():
root_node = CommandGraphRoot()
node_1 = root_node.navigate("layout", None) \
.navigate("screen", "1")
assert node_1.selectors == [("layout", None), ("screen", "1")]
assert isinstance(node_1, CommandGraphObject)
def test_resolve_command():
root_node = CommandGraphRoot()
command_1 = root_node.call("cmd_name")
assert command_1.selectors == []
assert command_1.name == "cmd_name"
assert isinstance(command_1, CommandGraphCall)
command_2 = root_node.navigate("layout", None) \
.navigate("screen", None) \
.call("cmd_name")
assert command_2.name == "cmd_name"
assert command_2.selectors == [("layout", None), ("screen", None)]
assert isinstance(command_2, CommandGraphCall)
| 32.196429 | 95 | 0.636717 |
7957c2facbc963da74787115fc05601d2956fda0 | 33,768 | py | Python | Lib/asyncio/tasks.py | diogommartins/cinder | 79103e9119cbecef3b085ccf2878f00c26e1d175 | [
"CNRI-Python-GPL-Compatible"
] | 1 | 2021-11-04T07:39:43.000Z | 2021-11-04T07:39:43.000Z | Lib/asyncio/tasks.py | diogommartins/cinder | 79103e9119cbecef3b085ccf2878f00c26e1d175 | [
"CNRI-Python-GPL-Compatible"
] | null | null | null | Lib/asyncio/tasks.py | diogommartins/cinder | 79103e9119cbecef3b085ccf2878f00c26e1d175 | [
"CNRI-Python-GPL-Compatible"
] | 1 | 2021-05-11T05:20:30.000Z | 2021-05-11T05:20:30.000Z | """Support for tasks, coroutines and the scheduler."""
__all__ = (
'Task', 'create_task',
'FIRST_COMPLETED', 'FIRST_EXCEPTION', 'ALL_COMPLETED',
'wait', 'wait_for', 'as_completed', 'sleep',
'gather', 'shield', 'ensure_future', 'run_coroutine_threadsafe',
'current_task', 'all_tasks',
'_register_task', '_unregister_task', '_enter_task', '_leave_task',
)
import concurrent.futures
import contextvars
import functools
import inspect
import itertools
import types
import warnings
import weakref
from . import base_tasks
from . import coroutines
from . import events
from . import exceptions
from . import futures
from .coroutines import _is_coroutine
_ASYNC_LAZY_VALUE_TYPE = None
# Helper to generate new task names
# This uses itertools.count() instead of a "+= 1" operation because the latter
# is not thread safe. See bpo-11866 for a longer explanation.
_task_name_counter = itertools.count(1).__next__
def current_task(loop=None):
"""Return a currently executed task."""
if loop is None:
loop = events.get_running_loop()
return _current_tasks.get(loop)
def all_tasks(loop=None):
"""Return a set of all tasks for the loop."""
if loop is None:
loop = events.get_running_loop()
# Looping over a WeakSet (_all_tasks) isn't safe as it can be updated from another
# thread while we do so. Therefore we cast it to list prior to filtering. The list
# cast itself requires iteration, so we repeat it several times ignoring
# RuntimeErrors (which are not very likely to occur). See issues 34970 and 36607 for
# details.
i = 0
while True:
try:
tasks = list(_all_tasks)
except RuntimeError:
i += 1
if i >= 1000:
raise
else:
break
return {t for t in tasks
if futures._get_loop(t) is loop and not t.done()}
_isawaitable = inspect.isawaitable
def _set_task_name(task, name):
if name is not None:
try:
set_name = task.set_name
except AttributeError:
pass
else:
set_name(name)
class Task(futures._PyFuture): # Inherit Python Task implementation
# from a Python Future implementation.
"""A coroutine wrapped in a Future."""
# An important invariant maintained while a Task not done:
#
# - Either _fut_waiter is None, and _step() is scheduled;
# - or _fut_waiter is some Future, and _step() is *not* scheduled.
#
# The only transition from the latter to the former is through
# _wakeup(). When _fut_waiter is not None, one of its callbacks
# must be _wakeup().
# If False, don't log a message if the task is destroyed whereas its
# status is still pending
_log_destroy_pending = True
@classmethod
def current_task(cls, loop=None):
"""Return the currently running task in an event loop or None.
By default the current task for the current event loop is returned.
None is returned when called not in the context of a Task.
"""
warnings.warn("Task.current_task() is deprecated since Python 3.7, "
"use asyncio.current_task() instead",
DeprecationWarning,
stacklevel=2)
if loop is None:
loop = events.get_event_loop()
return current_task(loop)
@classmethod
def all_tasks(cls, loop=None):
"""Return a set of all tasks for an event loop.
By default all tasks for the current event loop are returned.
"""
warnings.warn("Task.all_tasks() is deprecated since Python 3.7, "
"use asyncio.all_tasks() instead",
DeprecationWarning,
stacklevel=2)
if loop is None:
loop = events.get_event_loop()
# NB: set(_all_tasks) is required to protect
# from https://bugs.python.org/issue34970 bug
return {t for t in list(_all_tasks) if futures._get_loop(t) is loop}
def __init__(self, coro, *, loop=None, name=None):
super().__init__(loop=loop)
if self._source_traceback:
del self._source_traceback[-1]
if not coroutines.iscoroutine(coro):
# raise after Future.__init__(), attrs are required for __del__
# prevent logging for pending task in __del__
self._log_destroy_pending = False
raise TypeError(f"a coroutine was expected, got {coro!r}")
if name is None:
self._name = f'Task-{_task_name_counter()}'
else:
self._name = str(name)
self._must_cancel = False
self._fut_waiter = None
self._coro = coro
self._context = contextvars.copy_context()
if not _is_coro_suspended(coro):
self._loop.call_soon(self.__step, context=self._context)
_register_task(self)
def __del__(self):
if self._state == futures._PENDING and self._log_destroy_pending:
context = {
'task': self,
'message': 'Task was destroyed but it is pending!',
}
if self._source_traceback:
context['source_traceback'] = self._source_traceback
self._loop.call_exception_handler(context)
super().__del__()
def _repr_info(self):
return base_tasks._task_repr_info(self)
def get_coro(self):
return self._coro
def get_name(self):
return self._name
def set_name(self, value):
self._name = str(value)
def set_result(self, result):
raise RuntimeError('Task does not support set_result operation')
def set_exception(self, exception):
raise RuntimeError('Task does not support set_exception operation')
def get_stack(self, *, limit=None):
"""Return the list of stack frames for this task's coroutine.
If the coroutine is not done, this returns the stack where it is
suspended. If the coroutine has completed successfully or was
cancelled, this returns an empty list. If the coroutine was
terminated by an exception, this returns the list of traceback
frames.
The frames are always ordered from oldest to newest.
The optional limit gives the maximum number of frames to
return; by default all available frames are returned. Its
meaning differs depending on whether a stack or a traceback is
returned: the newest frames of a stack are returned, but the
oldest frames of a traceback are returned. (This matches the
behavior of the traceback module.)
For reasons beyond our control, only one stack frame is
returned for a suspended coroutine.
"""
return base_tasks._task_get_stack(self, limit)
def print_stack(self, *, limit=None, file=None):
"""Print the stack or traceback for this task's coroutine.
This produces output similar to that of the traceback module,
for the frames retrieved by get_stack(). The limit argument
is passed to get_stack(). The file argument is an I/O stream
to which the output is written; by default output is written
to sys.stderr.
"""
return base_tasks._task_print_stack(self, limit, file)
def cancel(self):
"""Request that this task cancel itself.
This arranges for a CancelledError to be thrown into the
wrapped coroutine on the next cycle through the event loop.
The coroutine then has a chance to clean up or even deny
the request using try/except/finally.
Unlike Future.cancel, this does not guarantee that the
task will be cancelled: the exception might be caught and
acted upon, delaying cancellation of the task or preventing
cancellation completely. The task may also return a value or
raise a different exception.
Immediately after this method is called, Task.cancelled() will
not return True (unless the task was already cancelled). A
task will be marked as cancelled when the wrapped coroutine
terminates with a CancelledError exception (even if cancel()
was not called).
"""
self._log_traceback = False
if self.done():
return False
if self._fut_waiter is not None:
if self._fut_waiter.cancel():
# Leave self._fut_waiter; it may be a Task that
# catches and ignores the cancellation so we may have
# to cancel it again later.
return True
# It must be the case that self.__step is already scheduled.
self._must_cancel = True
return True
def __step(self, exc=None):
if self.done():
raise exceptions.InvalidStateError(
f'_step(): already done: {self!r}, {exc!r}')
if self._must_cancel:
if not isinstance(exc, exceptions.CancelledError):
exc = exceptions.CancelledError()
self._must_cancel = False
coro = self._coro
self._fut_waiter = None
_enter_task(self._loop, self)
# Call either coro.throw(exc) or coro.send(None).
try:
if exc is None:
# We use the `send` method directly, because coroutines
# don't have `__iter__` and `__next__` methods.
result = coro.send(None)
else:
result = coro.throw(exc)
except StopIteration as exc:
if self._must_cancel:
# Task is cancelled right before coro stops.
self._must_cancel = False
super().cancel()
else:
super().set_result(exc.value)
except exceptions.CancelledError as e:
super().cancel() # I.e., Future.cancel(self).
except (KeyboardInterrupt, SystemExit) as exc:
super().set_exception(exc)
raise
except BaseException as exc:
super().set_exception(exc)
else:
self._set_fut_waiter(result)
finally:
_leave_task(self._loop, self)
self = None # Needed to break cycles when an exception occurs.
def _set_fut_waiter(self, result):
blocking = getattr(result, '_asyncio_future_blocking', None)
if blocking is not None:
# Yielded Future must come from Future.__iter__().
if futures._get_loop(result) is not self._loop:
new_exc = RuntimeError(
f'Task {self!r} got Future '
f'{result!r} attached to a different loop')
self._loop.call_soon(
self.__step, new_exc, context=self._context)
elif blocking:
if result is self:
new_exc = RuntimeError(
f'Task cannot await on itself: {self!r}')
self._loop.call_soon(
self.__step, new_exc, context=self._context)
else:
result._asyncio_future_blocking = False
result.add_done_callback(
self.__wakeup, context=self._context)
self._fut_waiter = result
if self._must_cancel:
if self._fut_waiter.cancel():
self._must_cancel = False
else:
new_exc = RuntimeError(
f'yield was used instead of yield from '
f'in task {self!r} with {result!r}')
self._loop.call_soon(
self.__step, new_exc, context=self._context)
elif result is None:
# Bare yield relinquishes control for one event loop iteration.
self._loop.call_soon(self.__step, context=self._context)
elif inspect.isgenerator(result):
# Yielding a generator is just wrong.
new_exc = RuntimeError(
f'yield was used instead of yield from for '
f'generator in task {self!r} with {result!r}')
self._loop.call_soon(
self.__step, new_exc, context=self._context)
else:
# Yielding something else is an error.
new_exc = RuntimeError(f'Task got bad yield: {result!r}')
self._loop.call_soon(
self.__step, new_exc, context=self._context)
# Needed to be compatible with the C version
_step = __step
def __wakeup(self, future):
try:
future.result()
except BaseException as exc:
# This may also be a cancellation.
self.__step(exc)
else:
# Don't pass the value of `future.result()` explicitly,
# as `Future.__iter__` and `Future.__await__` don't need it.
# If we call `_step(value, None)` instead of `_step()`,
# Python eval loop would use `.send(value)` method call,
# instead of `__next__()`, which is slower for futures
# that return non-generator iterators from their `__iter__`.
self.__step()
self = None # Needed to break cycles when an exception occurs.
_PyTask = Task
try:
import _asyncio
except ImportError:
pass
else:
# _CTask is needed for tests.
Task = _CTask = _asyncio.Task
def create_task(coro, *, name=None):
"""Schedule the execution of a coroutine object in a spawn task.
Return a Task object.
"""
loop = events.get_running_loop()
task = loop.create_task(coro)
_set_task_name(task, name)
return task
# wait() and as_completed() similar to those in PEP 3148.
FIRST_COMPLETED = concurrent.futures.FIRST_COMPLETED
FIRST_EXCEPTION = concurrent.futures.FIRST_EXCEPTION
ALL_COMPLETED = concurrent.futures.ALL_COMPLETED
async def wait(fs, *, loop=None, timeout=None, return_when=ALL_COMPLETED):
"""Wait for the Futures and coroutines given by fs to complete.
The sequence futures must not be empty.
Coroutines will be wrapped in Tasks.
Returns two sets of Future: (done, pending).
Usage:
done, pending = await asyncio.wait(fs)
Note: This does not raise TimeoutError! Futures that aren't done
when the timeout occurs are returned in the second set.
"""
if futures.isfuture(fs) or coroutines.iscoroutine(fs):
raise TypeError(f"expect a list of futures, not {type(fs).__name__}")
if not fs:
raise ValueError('Set of coroutines/Futures is empty.')
if return_when not in (FIRST_COMPLETED, FIRST_EXCEPTION, ALL_COMPLETED):
raise ValueError(f'Invalid return_when value: {return_when}')
if loop is None:
loop = events.get_running_loop()
else:
warnings.warn("The loop argument is deprecated since Python 3.8, "
"and scheduled for removal in Python 3.10.",
DeprecationWarning, stacklevel=2)
fs = {ensure_future(f, loop=loop) for f in set(fs)}
return await _wait(fs, timeout, return_when, loop)
def _release_waiter(waiter, *args):
if not waiter.done():
waiter.set_result(None)
async def wait_for(fut, timeout, *, loop=None):
"""Wait for the single Future or coroutine to complete, with timeout.
Coroutine will be wrapped in Task.
Returns result of the Future or coroutine. When a timeout occurs,
it cancels the task and raises TimeoutError. To avoid the task
cancellation, wrap it in shield().
If the wait is cancelled, the task is also cancelled.
This function is a coroutine.
"""
if loop is None:
loop = events.get_running_loop()
else:
warnings.warn("The loop argument is deprecated since Python 3.8, "
"and scheduled for removal in Python 3.10.",
DeprecationWarning, stacklevel=2)
if timeout is None:
return await fut
if timeout <= 0:
fut = ensure_future(fut, loop=loop)
if fut.done():
return fut.result()
fut.cancel()
raise exceptions.TimeoutError()
waiter = loop.create_future()
timeout_handle = loop.call_later(timeout, _release_waiter, waiter)
cb = functools.partial(_release_waiter, waiter)
fut = ensure_future(fut, loop=loop)
fut.add_done_callback(cb)
try:
# wait until the future completes or the timeout
try:
await waiter
except exceptions.CancelledError:
fut.remove_done_callback(cb)
fut.cancel()
raise
if fut.done():
return fut.result()
else:
fut.remove_done_callback(cb)
# We must ensure that the task is not running
# after wait_for() returns.
# See https://bugs.python.org/issue32751
await _cancel_and_wait(fut, loop=loop)
raise exceptions.TimeoutError()
finally:
timeout_handle.cancel()
async def _wait(fs, timeout, return_when, loop):
"""Internal helper for wait().
The fs argument must be a collection of Futures.
"""
assert fs, 'Set of Futures is empty.'
waiter = loop.create_future()
timeout_handle = None
if timeout is not None:
timeout_handle = loop.call_later(timeout, _release_waiter, waiter)
counter = len(fs)
def _on_completion(f):
nonlocal counter
counter -= 1
if (counter <= 0 or
return_when == FIRST_COMPLETED or
return_when == FIRST_EXCEPTION and (not f.cancelled() and
f.exception() is not None)):
if timeout_handle is not None:
timeout_handle.cancel()
if not waiter.done():
waiter.set_result(None)
for f in fs:
f.add_done_callback(_on_completion)
try:
await waiter
finally:
if timeout_handle is not None:
timeout_handle.cancel()
for f in fs:
f.remove_done_callback(_on_completion)
done, pending = set(), set()
for f in fs:
if f.done():
done.add(f)
else:
pending.add(f)
return done, pending
async def _cancel_and_wait(fut, loop):
"""Cancel the *fut* future or task and wait until it completes."""
waiter = loop.create_future()
cb = functools.partial(_release_waiter, waiter)
fut.add_done_callback(cb)
try:
fut.cancel()
# We cannot wait on *fut* directly to make
# sure _cancel_and_wait itself is reliably cancellable.
await waiter
finally:
fut.remove_done_callback(cb)
# This is *not* a @coroutine! It is just an iterator (yielding Futures).
def as_completed(fs, *, loop=None, timeout=None):
"""Return an iterator whose values are coroutines.
When waiting for the yielded coroutines you'll get the results (or
exceptions!) of the original Futures (or coroutines), in the order
in which and as soon as they complete.
This differs from PEP 3148; the proper way to use this is:
for f in as_completed(fs):
result = await f # The 'await' may raise.
# Use result.
If a timeout is specified, the 'await' will raise
TimeoutError when the timeout occurs before all Futures are done.
Note: The futures 'f' are not necessarily members of fs.
"""
if futures.isfuture(fs) or coroutines.iscoroutine(fs):
raise TypeError(f"expect a list of futures, not {type(fs).__name__}")
from .queues import Queue # Import here to avoid circular import problem.
done = Queue(loop=loop)
if loop is None:
loop = events.get_event_loop()
else:
warnings.warn("The loop argument is deprecated since Python 3.8, "
"and scheduled for removal in Python 3.10.",
DeprecationWarning, stacklevel=2)
todo = {ensure_future(f, loop=loop) for f in set(fs)}
timeout_handle = None
def _on_timeout():
for f in todo:
f.remove_done_callback(_on_completion)
done.put_nowait(None) # Queue a dummy value for _wait_for_one().
todo.clear() # Can't do todo.remove(f) in the loop.
def _on_completion(f):
if not todo:
return # _on_timeout() was here first.
todo.remove(f)
done.put_nowait(f)
if not todo and timeout_handle is not None:
timeout_handle.cancel()
async def _wait_for_one():
f = await done.get()
if f is None:
# Dummy value from _on_timeout().
raise exceptions.TimeoutError
return f.result() # May raise f.exception().
for f in todo:
f.add_done_callback(_on_completion)
if todo and timeout is not None:
timeout_handle = loop.call_later(timeout, _on_timeout)
for _ in range(len(todo)):
yield _wait_for_one()
@types.coroutine
def __sleep0():
"""Skip one event loop run cycle.
This is a private helper for 'asyncio.sleep()', used
when the 'delay' is set to 0. It uses a bare 'yield'
expression (which Task.__step knows how to handle)
instead of creating a Future object.
"""
yield
async def sleep(delay, result=None, *, loop=None):
"""Coroutine that completes after a given time (in seconds)."""
if delay <= 0:
await __sleep0()
return result
if loop is None:
loop = events.get_running_loop()
else:
warnings.warn("The loop argument is deprecated since Python 3.8, "
"and scheduled for removal in Python 3.10.",
DeprecationWarning, stacklevel=2)
future = loop.create_future()
h = loop.call_later(delay,
futures._set_result_unless_cancelled,
future, result)
try:
return await future
finally:
h.cancel()
def ensure_future(coro_or_future, *, loop=None):
"""Wrap a coroutine or an awaitable in a future.
If the argument is a Future, it is returned directly.
"""
if _ASYNC_LAZY_VALUE_TYPE is not None and type(coro_or_future) is _ASYNC_LAZY_VALUE_TYPE:
return coro_or_future.ensure_future(loop)
if coroutines.iscoroutine(coro_or_future):
if loop is None:
loop = events.get_event_loop()
task = loop.create_task(coro_or_future)
if task._source_traceback:
del task._source_traceback[-1]
return task
elif futures.isfuture(coro_or_future):
if loop is not None and loop is not futures._get_loop(coro_or_future):
raise ValueError('The future belongs to a different loop than '
'the one specified as the loop argument')
return coro_or_future
elif _isawaitable(coro_or_future):
return ensure_future(_wrap_awaitable(coro_or_future), loop=loop)
else:
raise TypeError('An asyncio.Future, a coroutine or an awaitable is '
'required')
@types.coroutine
def _wrap_awaitable(awaitable):
"""Helper for asyncio.ensure_future().
Wraps awaitable (an object with __await__) into a coroutine
that will later be wrapped in a Task by ensure_future().
"""
return (yield from awaitable.__await__())
_wrap_awaitable._is_coroutine = _is_coroutine
def _is_coro_suspended(coro):
# this version will be used if _asyncio module
# does not export native version of gather
# so coroutine will always be not started
return False
class _GatheringFuture(futures.Future):
"""Helper for gather().
This overrides cancel() to cancel all the children and act more
like Task.cancel(), which doesn't immediately mark itself as
cancelled.
"""
def __init__(self, children, *, loop=None):
super().__init__(loop=loop)
self._children = children
self._cancel_requested = False
def cancel(self):
if self.done():
return False
ret = False
for child in self._children:
if child.cancel():
ret = True
if ret:
# If any child tasks were actually cancelled, we should
# propagate the cancellation request regardless of
# *return_exceptions* argument. See issue 32684.
self._cancel_requested = True
return ret
def gather(*coros_or_futures, loop=None, return_exceptions=False):
"""Return a future aggregating results from the given coroutines/futures.
Coroutines will be wrapped in a future and scheduled in the event
loop. They will not necessarily be scheduled in the same order as
passed in.
All futures must share the same event loop. If all the tasks are
done successfully, the returned future's result is the list of
results (in the order of the original sequence, not necessarily
the order of results arrival). If *return_exceptions* is True,
exceptions in the tasks are treated the same as successful
results, and gathered in the result list; otherwise, the first
raised exception will be immediately propagated to the returned
future.
Cancellation: if the outer Future is cancelled, all children (that
have not completed yet) are also cancelled. If any child is
cancelled, this is treated as if it raised CancelledError --
the outer Future is *not* cancelled in this case. (This is to
prevent the cancellation of one child to cause other children to
be cancelled.)
If *return_exceptions* is False, cancelling gather() after it
has been marked done won't cancel any submitted awaitables.
For instance, gather can be marked done after propagating an
exception to the caller, therefore, calling ``gather.cancel()``
after catching an exception (raised by one of the awaitables) from
gather won't cancel any other awaitables.
"""
if not coros_or_futures:
if loop is None:
loop = events.get_event_loop()
else:
warnings.warn("The loop argument is deprecated since Python 3.8, "
"and scheduled for removal in Python 3.10.",
DeprecationWarning, stacklevel=2)
outer = loop.create_future()
outer.set_result([])
return outer
def _done_callback(fut):
nonlocal nfinished
nfinished += 1
if outer.done():
if not fut.cancelled():
# Mark exception retrieved.
fut.exception()
return
if not return_exceptions:
if fut.cancelled():
# Check if 'fut' is cancelled first, as
# 'fut.exception()' will *raise* a CancelledError
# instead of returning it.
exc = exceptions.CancelledError()
outer.set_exception(exc)
return
else:
exc = fut.exception()
if exc is not None:
outer.set_exception(exc)
return
if nfinished == nfuts:
# All futures are done; create a list of results
# and set it to the 'outer' future.
results = []
for fut in children:
if fut.cancelled():
# Check if 'fut' is cancelled first, as
# 'fut.exception()' will *raise* a CancelledError
# instead of returning it.
res = exceptions.CancelledError()
else:
res = fut.exception()
if res is None:
res = fut.result()
results.append(res)
if outer._cancel_requested:
# If gather is being cancelled we must propagate the
# cancellation regardless of *return_exceptions* argument.
# See issue 32684.
outer.set_exception(exceptions.CancelledError())
else:
outer.set_result(results)
arg_to_fut = {}
children = []
nfuts = 0
nfinished = 0
for arg in coros_or_futures:
if arg not in arg_to_fut:
fut = ensure_future(arg, loop=loop)
if loop is None:
loop = futures._get_loop(fut)
if fut is not arg:
# 'arg' was not a Future, therefore, 'fut' is a new
# Future created specifically for 'arg'. Since the caller
# can't control it, disable the "destroy pending task"
# warning.
fut._log_destroy_pending = False
nfuts += 1
arg_to_fut[arg] = fut
fut.add_done_callback(_done_callback)
else:
# There's a duplicate Future object in coros_or_futures.
fut = arg_to_fut[arg]
children.append(fut)
outer = _GatheringFuture(children, loop=loop)
return outer
def shield(arg, *, loop=None):
"""Wait for a future, shielding it from cancellation.
The statement
res = await shield(something())
is exactly equivalent to the statement
res = await something()
*except* that if the coroutine containing it is cancelled, the
task running in something() is not cancelled. From the POV of
something(), the cancellation did not happen. But its caller is
still cancelled, so the yield-from expression still raises
CancelledError. Note: If something() is cancelled by other means
this will still cancel shield().
If you want to completely ignore cancellation (not recommended)
you can combine shield() with a try/except clause, as follows:
try:
res = await shield(something())
except CancelledError:
res = None
"""
if loop is not None:
warnings.warn("The loop argument is deprecated since Python 3.8, "
"and scheduled for removal in Python 3.10.",
DeprecationWarning, stacklevel=2)
inner = ensure_future(arg, loop=loop)
if inner.done():
# Shortcut.
return inner
loop = futures._get_loop(inner)
outer = loop.create_future()
def _inner_done_callback(inner):
if outer.cancelled():
if not inner.cancelled():
# Mark inner's result as retrieved.
inner.exception()
return
if inner.cancelled():
outer.cancel()
else:
exc = inner.exception()
if exc is not None:
outer.set_exception(exc)
else:
outer.set_result(inner.result())
def _outer_done_callback(outer):
if not inner.done():
inner.remove_done_callback(_inner_done_callback)
inner.add_done_callback(_inner_done_callback)
outer.add_done_callback(_outer_done_callback)
return outer
def run_coroutine_threadsafe(coro, loop):
"""Submit a coroutine object to a given event loop.
Return a concurrent.futures.Future to access the result.
"""
if not coroutines.iscoroutine(coro):
raise TypeError('A coroutine object is required')
future = concurrent.futures.Future()
def callback():
try:
futures._chain_future(ensure_future(coro, loop=loop), future)
except (SystemExit, KeyboardInterrupt):
raise
except BaseException as exc:
if future.set_running_or_notify_cancel():
future.set_exception(exc)
raise
loop.call_soon_threadsafe(callback)
return future
# WeakSet containing all alive tasks.
_all_tasks = weakref.WeakSet()
# Dictionary containing tasks that are currently active in
# all running event loops. {EventLoop: Task}
_current_tasks = {}
def _register_task(task):
"""Register a new task in asyncio as executed by loop."""
_all_tasks.add(task)
def _enter_task(loop, task):
current_task = _current_tasks.get(loop)
if current_task is not None:
raise RuntimeError(f"Cannot enter into task {task!r} while another "
f"task {current_task!r} is being executed.")
_current_tasks[loop] = task
def _leave_task(loop, task):
current_task = _current_tasks.get(loop)
if current_task is not task:
raise RuntimeError(f"Leaving task {task!r} does not match "
f"the current task {current_task!r}.")
del _current_tasks[loop]
def _unregister_task(task):
"""Unregister a task."""
_all_tasks.discard(task)
_py_register_task = _register_task
_py_unregister_task = _unregister_task
_py_enter_task = _enter_task
_py_leave_task = _leave_task
_py_all_tasks = all_tasks
_py_task_all_tasks = _PyTask.all_tasks
try:
from _asyncio import (_register_task, _unregister_task,
_enter_task, _leave_task,
_current_tasks,
all_tasks,
Task as CTask,
AsyncLazyValue as _ASYNC_LAZY_VALUE_TYPE,
ensure_future,
gather,
_is_coro_suspended)
# ensure that all_tasks on PyTask uses the same datasource
# that _register/_unregister (since import shadows them)
_PyTask.all_tasks = CTask.all_tasks
except ImportError:
pass
else:
_c_register_task = _register_task
_c_unregister_task = _unregister_task
_c_enter_task = _enter_task
_c_leave_task = _leave_task
_c_all_tasks = all_tasks
_c_task_all_tasks = CTask.all_tasks
| 34.212766 | 93 | 0.618574 |
7957c3305b1c939ecb79fdaeed8f9867f6248db9 | 2,661 | py | Python | utils/input_pipeline.py | TropComplique/trained-ternary-quantization | 4cd4132124c30e0e868a78eb1b2a2798df5e2a90 | [
"MIT"
] | 97 | 2017-11-06T10:12:44.000Z | 2022-02-09T04:14:29.000Z | utils/input_pipeline.py | ljh-xjtu/trained-ternary-quantization | 4cd4132124c30e0e868a78eb1b2a2798df5e2a90 | [
"MIT"
] | 8 | 2017-09-25T03:49:51.000Z | 2019-02-14T07:04:05.000Z | utils/input_pipeline.py | ljh-xjtu/trained-ternary-quantization | 4cd4132124c30e0e868a78eb1b2a2798df5e2a90 | [
"MIT"
] | 24 | 2017-11-08T12:45:43.000Z | 2021-09-24T05:53:27.000Z | import numpy as np
from PIL import Image, ImageEnhance
from torchvision.datasets import ImageFolder
import torchvision.transforms as transforms
TRAIN_DIR = '/home/ubuntu/data/tiny-imagenet-200/training'
VAL_DIR = '/home/ubuntu/data/tiny-imagenet-200/validation'
"""It assumes that training image data is in the following form:
TRAIN_DIR/class4/image44.jpg
TRAIN_DIR/class4/image12.jpg
...
TRAIN_DIR/class55/image33.jpg
TRAIN_DIR/class55/image543.jpg
...
TRAIN_DIR/class1/image6.jpg
TRAIN_DIR/class1/image99.jpg
...
And the same for validation data.
"""
def get_image_folders():
"""
Build an input pipeline for training and evaluation.
For training data it does data augmentation.
"""
enhancers = {
0: lambda image, f: ImageEnhance.Color(image).enhance(f),
1: lambda image, f: ImageEnhance.Contrast(image).enhance(f),
2: lambda image, f: ImageEnhance.Brightness(image).enhance(f),
3: lambda image, f: ImageEnhance.Sharpness(image).enhance(f)
}
# intensities of enhancers
factors = {
0: lambda: np.clip(np.random.normal(1.0, 0.3), 0.4, 1.6),
1: lambda: np.clip(np.random.normal(1.0, 0.15), 0.7, 1.3),
2: lambda: np.clip(np.random.normal(1.0, 0.15), 0.7, 1.3),
3: lambda: np.clip(np.random.normal(1.0, 0.3), 0.4, 1.6),
}
# randomly change color of an image
def enhance(image):
order = [0, 1, 2, 3]
np.random.shuffle(order)
# random enhancers in random order
for i in order:
f = factors[i]()
image = enhancers[i](image, f)
return image
def rotate(image):
degree = np.clip(np.random.normal(0.0, 15.0), -40.0, 40.0)
return image.rotate(degree, Image.BICUBIC)
# training data augmentation on the fly
train_transform = transforms.Compose([
transforms.Lambda(rotate),
transforms.RandomCrop(56),
transforms.RandomHorizontalFlip(),
transforms.Lambda(enhance),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
),
])
# for validation data
val_transform = transforms.Compose([
transforms.CenterCrop(56),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
),
])
# mean and std are taken from here:
# http://pytorch.org/docs/master/torchvision/models.html
train_folder = ImageFolder(TRAIN_DIR, train_transform)
val_folder = ImageFolder(VAL_DIR, val_transform)
return train_folder, val_folder
| 29.566667 | 70 | 0.639233 |
7957c3564d52a828fd4f7e5d954eef2ec2b984a5 | 1,796 | py | Python | app/user/serializers.py | ricardolira/recipe-app-api | 749c39af92383ecfa208636fc2b09deaaefeeb2a | [
"MIT"
] | null | null | null | app/user/serializers.py | ricardolira/recipe-app-api | 749c39af92383ecfa208636fc2b09deaaefeeb2a | [
"MIT"
] | null | null | null | app/user/serializers.py | ricardolira/recipe-app-api | 749c39af92383ecfa208636fc2b09deaaefeeb2a | [
"MIT"
] | null | null | null | from django.contrib.auth import authenticate, get_user_model
# This is for easing translation if page is going live in several languages
# from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
class UserSerializer(serializers.ModelSerializer):
"""Serializer for the users object"""
class Meta:
model = get_user_model()
fields = ('email', 'password', 'name')
extra_kwargs = {'password': {'write_only': True, 'min_length': 5}}
def create(self, validated_data):
"""Create a new user with encrypted password and return it"""
return get_user_model().objects.create_user(**validated_data)
def update(self, instance, validated_data):
"""update a user, set the password correctly and return it"""
password = validated_data.pop('password', None)
user = super().update(instance, validated_data)
if password:
user.set_password(password)
user.save()
return user
class AuthTokenSerializer(serializers.Serializer):
"""Serializer for the user authentication object"""
email = serializers.CharField()
password = serializers.CharField(
style={'input_type': 'password'},
trim_whitespace=False)
def validate(self, attrs):
"""validate and authenticate the user"""
email = attrs.get('email')
password = attrs.get('password')
user = authenticate(
request=self.context.get('request'),
username=email,
password=password
)
if not user:
msg = 'Unable to authenticate with provided credentials'
raise serializers.ValidationError(msg, code='authentication')
attrs['user'] = user
return attrs
| 31.508772 | 75 | 0.653118 |
7957c429e51a0b6b30c40826e2cebec4afbc7b05 | 9,429 | py | Python | redash/query_runner/mysql.py | keywoshilishuang/redash | 7def30435e7c09c1bb76286f11ebafb3ad34e204 | [
"BSD-2-Clause"
] | null | null | null | redash/query_runner/mysql.py | keywoshilishuang/redash | 7def30435e7c09c1bb76286f11ebafb3ad34e204 | [
"BSD-2-Clause"
] | null | null | null | redash/query_runner/mysql.py | keywoshilishuang/redash | 7def30435e7c09c1bb76286f11ebafb3ad34e204 | [
"BSD-2-Clause"
] | null | null | null | import logging
import os
import threading
import json
import time
from redash.query_runner import TYPE_FLOAT, TYPE_INTEGER, TYPE_DATETIME, TYPE_STRING, TYPE_DATE, BaseSQLQueryRunner, InterruptException, register
from redash.settings import parse_boolean
from redash.utils import json_dumps, json_loads
try:
import MySQLdb
enabled = True
except ImportError:
enabled = False
logger = logging.getLogger(__name__)
types_map = {
0: TYPE_FLOAT,
1: TYPE_INTEGER,
2: TYPE_INTEGER,
3: TYPE_INTEGER,
4: TYPE_FLOAT,
5: TYPE_FLOAT,
7: TYPE_DATETIME,
8: TYPE_INTEGER,
9: TYPE_INTEGER,
10: TYPE_DATE,
12: TYPE_DATETIME,
15: TYPE_STRING,
16: TYPE_INTEGER,
246: TYPE_FLOAT,
253: TYPE_STRING,
254: TYPE_STRING,
}
class Result(object):
def __init__(self):
pass
class Mysql(BaseSQLQueryRunner):
noop_query = "SELECT 1"
@classmethod
def configuration_schema(cls):
show_ssl_settings = parse_boolean(
os.environ.get('MYSQL_SHOW_SSL_SETTINGS', 'true'))
schema = {
'type': 'object',
'properties': {
'host': {
'type': 'string',
'default': '127.0.0.1'
},
'user': {
'type': 'string'
},
'passwd': {
'type': 'string',
'title': 'Password'
},
'db': {
'type': 'string',
'title': 'Database name'
},
'port': {
'type': 'number',
'default': 3306,
}
},
"order": ['host', 'port', 'user', 'passwd', 'db'],
'required': ['db'],
'secret': ['passwd']
}
if show_ssl_settings:
schema['properties'].update({
'use_ssl': {
'type': 'boolean',
'title': 'Use SSL'
},
'ssl_cacert': {
'type':
'string',
'title':
'Path to CA certificate file to verify peer against (SSL)'
},
'ssl_cert': {
'type': 'string',
'title': 'Path to client certificate file (SSL)'
},
'ssl_key': {
'type': 'string',
'title': 'Path to private key file (SSL)'
}
})
return schema
@classmethod
def name(cls):
return "MySQL"
@classmethod
def enabled(cls):
return enabled
def _connection(self):
params = dict(host=self.configuration.get('host', ''),
user=self.configuration.get('user', ''),
passwd=self.configuration.get('passwd', ''),
db=self.configuration['db'],
port=self.configuration.get('port', 3306),
charset='utf8',
use_unicode=True,
connect_timeout=60)
ssl_options = self._get_ssl_parameters()
if ssl_options:
params['ssl'] = ssl_options
connection = MySQLdb.connect(**params)
return connection
def _get_tables(self, schema):
query = """
SELECT col.table_schema as table_schema,
col.table_name as table_name,
col.column_name as column_name
FROM `information_schema`.`columns` col
WHERE col.table_schema NOT IN ('information_schema', 'performance_schema', 'mysql', 'sys');
"""
results, error = self.run_query(query, None)
if error is not None:
raise Exception("Failed getting schema.")
logger.error("mysql get tables result is:%s", results)
results = json_loads(results)
logger.error("mysql get tables result is:%s",results)
for row in results['rows']:
if row['table_schema'] != self.configuration['db']:
table_name = u'{}.{}'.format(row['table_schema'],
row['table_name'])
else:
table_name = row['table_name']
if table_name not in schema:
schema[table_name] = {'name': table_name, 'columns': []}
schema[table_name]['columns'].append(row['column_name'])
return schema.values()
def run_query(self, query, user):
ev = threading.Event()
thread_id = ""
r = Result()
t = None
try:
# time.sleep(10)
connection = self._connection()
thread_id = connection.thread_id()
t = threading.Thread(target=self._run_query,
args=(query, user, connection, r, ev))
t.start()
while not ev.wait(1):
pass
except (KeyboardInterrupt, InterruptException):
logger.error("MySQL Query cancelled by user.")
error = self._cancel(thread_id)
t.join()
r.json_data = None
r.error = "Query cancelled by user."
if error is not None:
r.error = error
return r.json_data, r.error
def _run_query(self, query, user, connection, r, ev):
try:
cursor = connection.cursor()
logger.debug("MySQL running query: %s", query)
cursor.execute(query)
data = cursor.fetchall()
desc = cursor.description
while cursor.nextset():
if cursor.description is not None:
data = cursor.fetchall()
desc = cursor.description
# descStr = json.dumps(desc)
print "mysql query get desc is"
print desc
# print desc.type()
print type(desc)
# logger.error("mysql query get desc is %s and type is:%s", desc, desc.type())
# TODO - very similar to pg.py
if desc is not None:
columns = self.fetch_columns([(i[0], types_map.get(i[1], None))
for i in desc])
rows = [
dict(zip((c['name'] for c in columns), row))
for row in data
]
data = {'columns': columns, 'rows': rows}
r.json_data = json_dumps(data)
r.error = None
else:
r.json_data = None
r.error = "No data was returned."
cursor.close()
except MySQLdb.Error as e:
if cursor:
cursor.close()
r.json_data = None
r.error = e.args[1]
finally:
ev.set()
if connection:
connection.close()
def _get_ssl_parameters(self):
if not self.configuration.get('use_ssl'):
return None
ssl_params = {}
if self.configuration.get('use_ssl'):
config_map = dict(ssl_cacert='ca', ssl_cert='cert', ssl_key='key')
for key, cfg in config_map.items():
val = self.configuration.get(key)
if val:
ssl_params[cfg] = val
return ssl_params
def _cancel(self, thread_id):
connection = None
cursor = None
error = None
try:
connection = self._connection()
cursor = connection.cursor()
query = "KILL %s" % (thread_id)
logging.debug(query)
cursor.execute(query)
except MySQLdb.Error as e:
if cursor:
cursor.close()
error = e.args[1]
finally:
if connection:
connection.close()
return error
class RDSMySQL(Mysql):
@classmethod
def name(cls):
return "MySQL (Amazon RDS)"
@classmethod
def type(cls):
return 'rds_mysql'
@classmethod
def configuration_schema(cls):
return {
'type': 'object',
'properties': {
'host': {
'type': 'string',
},
'user': {
'type': 'string'
},
'passwd': {
'type': 'string',
'title': 'Password'
},
'db': {
'type': 'string',
'title': 'Database name'
},
'port': {
'type': 'number',
'default': 3306,
},
'use_ssl': {
'type': 'boolean',
'title': 'Use SSL'
}
},
"order": ['host', 'port', 'user', 'passwd', 'db'],
'required': ['db', 'user', 'passwd', 'host'],
'secret': ['passwd']
}
def _get_ssl_parameters(self):
if self.configuration.get('use_ssl'):
ca_path = os.path.join(os.path.dirname(__file__),
'./files/rds-combined-ca-bundle.pem')
return {'ca': ca_path}
return None
register(Mysql)
register(RDSMySQL)
| 29.012308 | 145 | 0.467918 |
7957c4950a679af57f45732c7facc114468d171a | 1,476 | py | Python | setup.py | JunShern/lm-evaluation-harness | 84aa15c6e4cb65adf39c2dccf91a799cc7e6440a | [
"MIT"
] | 203 | 2021-01-08T16:39:09.000Z | 2022-03-31T06:03:16.000Z | setup.py | JunShern/lm-evaluation-harness | 84aa15c6e4cb65adf39c2dccf91a799cc7e6440a | [
"MIT"
] | 183 | 2020-12-27T03:41:08.000Z | 2022-03-19T21:56:53.000Z | setup.py | JunShern/lm-evaluation-harness | 84aa15c6e4cb65adf39c2dccf91a799cc7e6440a | [
"MIT"
] | 73 | 2021-01-05T22:37:01.000Z | 2022-03-29T10:14:53.000Z | import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="lm_eval",
version="0.0.1",
author="Leo Gao",
author_email="lg@eleuther.ai",
description="A framework for evaluating autoregressive language models",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/EleutherAI/lm-evaluation-harness",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
install_requires=[
"black==20.8b1",
"best_download>=0.0.6",
"datasets>=1.2.1",
"click>=7.1",
"scikit-learn>=0.24.1",
"torch>=1.7",
"transformers>=4.1",
"sqlitedict==1.6.0",
"pytablewriter==0.58.0",
"sacrebleu==1.5.0",
"pycountry==20.7.3",
"numexpr==2.7.2",
"lm_dataformat==0.0.19",
"pytest==6.2.3",
"pybind11==2.6.2",
"tqdm-multiprocess==0.0.11",
"zstandard==0.15.2",
"jsonlines==2.0.0",
"mock==4.0.3",
"openai==0.6.4",
"jieba==0.42.1",
"nagisa==0.2.7",
"t5==0.7.1",
"bleurt@https://github.com/google-research/bleurt/archive/b610120347ef22b494b6d69b4316e303f5932516.zip#egg=bleurt"
]
)
| 30.122449 | 122 | 0.573171 |
7957c5ca8fe5a8ee76cc88bf7c396e587883e694 | 8,566 | py | Python | PixivBatchHandler.py | itskenny0/PixivUtil2 | 3ff352b18800a129db77715882d61dd78798a428 | [
"BSD-2-Clause"
] | null | null | null | PixivBatchHandler.py | itskenny0/PixivUtil2 | 3ff352b18800a129db77715882d61dd78798a428 | [
"BSD-2-Clause"
] | null | null | null | PixivBatchHandler.py | itskenny0/PixivUtil2 | 3ff352b18800a129db77715882d61dd78798a428 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import demjson
import PixivArtistHandler
import PixivHelper
import PixivImageHandler
import PixivTagsHandler
import PixivUtil2
_default_batch_filename = "./batch_job.json"
class JobOption(object):
filenameFormat = ""
filenameMangaFormat = ""
filenameInfoFormat = ""
filenameMangaInfoFormat = ""
avatarNameFormat = ""
rootDirectory = ""
useTagsAsDir = False
r18mode = False
def __init__(self, job, _config):
if _config is None:
raise Exception("Cannot get default configuration, aborting...")
# set default option from config
self.filenameFormat = _config.filenameFormat
self.filenameMangaFormat = _config.filenameMangaFormat
self.filenameInfoFormat = _config.filenameInfoFormat
self.filenameMangaInfoFormat = _config.filenameMangaInfoFormat
self.avatarNameFormat = _config.avatarNameFormat
self.rootDirectory = _config.rootDirectory
self.useTagsAsDir = _config.useTagsAsDir
self.r18mode = _config.r18mode
if "option" in job and job["option"] is not None:
# need to check if the job option exists for each option
option_data = job["option"]
if "filenameFormat" in option_data:
self.filenameFormat = option_data["filenameFormat"]
if "filenameMangaFormat" in option_data:
self.filenameMangaFormat = option_data["filenameMangaFormat"]
if "filenameInfoFormat" in option_data:
self.filenameInfoFormat = option_data["filenameInfoFormat"]
if "filenameMangaInfoFormat" in option_data:
self.filenameMangaInfoFormat = option_data["filenameMangaInfoFormat"]
if "avatarNameFormat" in option_data:
self.avatarNameFormat = option_data["avatarNameFormat"]
if "rootDirectory" in option_data:
self.rootDirectory = option_data["rootDirectory"]
if "useTagsAsDir" in option_data:
self.useTagsAsDir = option_data["useTagsAsDir"]
if "r18mode" in option_data:
self.r18mode = option_data["r18mode"]
def handle_members(caller, job, job_name, job_option):
member_ids = list()
if "member_ids" in job:
print("Multi Member IDs")
member_ids = job["member_ids"]
elif "member_id" in job:
member_id = job["member_id"]
member_ids.append(member_id)
else:
print(f"No member_id or member_ids found in {job_name}!")
return
start_page = 1
if "start_page" in job:
start_page = int(job["start_page"])
end_page = 0
if "end_page" in job:
end_page = int(job["end_page"])
from_bookmark = False
if "from_bookmark" in job:
from_bookmark = bool(job["from_bookmark"])
tags = None
if "tags" in job and len(job["tags"]) > 0:
tags = job["tags"]
for member_id in member_ids:
PixivArtistHandler.process_member(caller,
member_id=member_id,
user_dir=job_option.rootDirectory,
page=start_page,
end_page=end_page,
bookmark=from_bookmark,
tags=tags,
title_prefix=job_name,
job_option=job_option)
def handle_images(caller: PixivUtil2, job, job_name, job_option):
image_ids = list()
if "image_ids" in job:
image_ids = job["image_ids"]
print(f"Found multiple images: {len(image_ids)}")
elif "image_id" in job:
image_id = job["image_id"]
image_ids.append(image_id)
else:
print(f"No image_id or image_ids found in {job_name}!")
return
for image_id in image_ids:
PixivImageHandler.process_image(caller,
image_id=image_id,
user_dir=job_option.rootDirectory,
title_prefix=job_name,
job_option=job_option)
print("done.")
def handle_tags(caller, job, job_name, job_option):
if "tags" in job and len(job["tags"]) > 0:
tags = job["tags"]
else:
print(f"No tags found or empty tags in {job_name}!")
start_page = 1
if "start_page" in job:
start_page = int(job["start_page"])
end_page = 0
if "end_page" in job:
end_page = int(job["end_page"])
wild_card = True
if "wild_card" in job:
wild_card = bool(job["wild_card"])
title_caption = False
if "title_caption" in job:
title_caption = bool(job["title_caption"])
start_date = None
if "start_date" in job and len(job["start_date"]) == 10:
try:
start_date = PixivHelper.check_date_time(job["start_date"])
except BaseException:
raise Exception(f"Invalid start_date: {job['start_date']} in {job_name}.")
end_date = None
if "end_date" in job and len(job["end_date"]) == 10:
try:
end_date = PixivHelper.check_date_time(job["end_date"])
except BaseException:
raise Exception(f"Invalid end_date: {job['end_date']} in {job_name}.")
member_id = None
if "member_id" in job:
member_id = bool(job["member_id"])
bookmark_count = None
if "bookmark_count" in job:
bookmark_count = int(job["bookmark_count"])
oldest_first = False
if "oldest_first" in job:
oldest_first = bool(job["oldest_first"])
type_mode = "a"
if "type_mode" in job:
if job["type_mode"] in {'a', 'i', 'm'}:
type_mode = job["type_mode"]
else:
raise Exception(f"Invalid type_mode: {job['type_mode']} in {job_name}.")
PixivTagsHandler.process_tags(caller,
tags,
page=start_page,
end_page=end_page,
wild_card=wild_card,
title_caption=title_caption,
start_date=start_date,
end_date=end_date,
use_tags_as_dir=job_option.useTagsAsDir,
member_id=member_id,
bookmark_count=bookmark_count,
oldest_first=oldest_first,
type_mode=type_mode,
job_option=job_option)
def process_batch_job(caller: PixivUtil2):
caller.set_console_title("Batch Menu")
if os.path.exists(_default_batch_filename):
jobs_file = open(_default_batch_filename, encoding="utf-8")
jobs = demjson.decode(jobs_file.read())
for job_name in jobs["jobs"]:
print(f"Processing {job_name}")
curr_job = jobs["jobs"][job_name]
if "enabled" not in curr_job or not bool(curr_job["enabled"]):
print(f"Skipping {job_name} because not enabled.")
continue
if "job_type" not in curr_job:
print(f"Cannot find job_type in {job_name}")
continue
job_option = JobOption(curr_job, caller.__config__)
if curr_job["job_type"] == '1':
handle_members(caller, curr_job, job_name, job_option)
elif curr_job["job_type"] == '2':
handle_images(caller, curr_job, job_name, job_option)
elif curr_job["job_type"] == '3':
handle_tags(caller, curr_job, job_name, job_option)
else:
print(f"Unsupported job_type {curr_job['job_type']} in {job_name}")
else:
print(f"Cannot found {_default_batch_filename} in the application folder, see https://github.com/Nandaka/PixivUtil2/wiki/Using-Batch-Job-(Experimental) for example. ")
# restore original method
# PixivHelper.print_and_log = temp_printer
def notifier(level, msg, exception=None, newline=True, end=None):
if level is None:
level = ""
if level == "debug":
return
msg = msg.replace("\n", "")
msg = "{0:5} - {1}".format(level, msg)
msg = msg.ljust(150)
print(msg, end='\r')
if __name__ == '__main__':
import PixivUtil2
process_batch_job(PixivUtil2)
| 37.735683 | 175 | 0.576815 |
7957c5f1a19a83c4ab3f87a3d345f55baffab2ef | 2,647 | py | Python | surveys/forms/users.py | GeorgeVelikov/Surffee | 1d89f423d9275aa34c5b51ebbf5457078cdc4d71 | [
"MIT"
] | null | null | null | surveys/forms/users.py | GeorgeVelikov/Surffee | 1d89f423d9275aa34c5b51ebbf5457078cdc4d71 | [
"MIT"
] | null | null | null | surveys/forms/users.py | GeorgeVelikov/Surffee | 1d89f423d9275aa34c5b51ebbf5457078cdc4d71 | [
"MIT"
] | null | null | null | from django import forms
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from ..models import Researcher
class ResearcherCreationForm(UserCreationForm):
"""
* Form to create a Researcher type user
*
"""
password1 = forms.CharField(label='Password', widget=forms.PasswordInput)
password2 = forms.CharField(label='Password confirmation', widget=forms.PasswordInput)
def __init__(self, *args, **kwargs):
# first call parent's constructor
super(ResearcherCreationForm, self).__init__(*args, **kwargs)
self.fields['email'].required = True
self.fields['username'].widget.attrs['minlength'] = 6
self.fields['password1'].widget.attrs['minlength'] = 8
class Meta(UserCreationForm):
model = Researcher
fields = ('username', 'password1', 'password2', 'email',)
help_texts = {
'username': None,
}
def clean_password2(self):
# Check that the two password entries match
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError("Passwords don't match")
return password2
def save(self, commit=True):
# Save the provided password in hashed format
user = super(ResearcherCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class ResearcherChangeForm(UserChangeForm):
"""
* Form to change the Researcher type user's password
*
"""
password1 = forms.CharField(label='Password', widget=forms.PasswordInput)
password2 = forms.CharField(label='Password confirmation', widget=forms.PasswordInput)
class Meta:
model = Researcher
fields = ('username', 'password1', 'password2', 'email', )
help_texts = {
'username': None,
}
def clean_password2(self):
# Check that the two password entries match
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError("Passwords don't match")
return password2
def save(self, commit=True):
# Save the provided password in hashed format
user = super(ResearcherChangeForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
| 34.828947 | 90 | 0.653948 |
7957c7418d536c63c3c424f2bdcd96d23b2f14c6 | 264 | py | Python | Pacote de Conteudo/Mundo 02/Exercicios/ex027.py | Michelle-On/ExerciciosPython_2020-21 | 61615237a310c15923138575ebc65f6a15b301ab | [
"MIT"
] | null | null | null | Pacote de Conteudo/Mundo 02/Exercicios/ex027.py | Michelle-On/ExerciciosPython_2020-21 | 61615237a310c15923138575ebc65f6a15b301ab | [
"MIT"
] | null | null | null | Pacote de Conteudo/Mundo 02/Exercicios/ex027.py | Michelle-On/ExerciciosPython_2020-21 | 61615237a310c15923138575ebc65f6a15b301ab | [
"MIT"
] | null | null | null | nome = str(input('Digite seu nome completo: ')).strip()
n = nome.split()
print('Prazer em te conhecer {}'.format(nome))
print('Seu primeiro nome é {}'.format(n[0]))
print('Seu ultimo nome é {} '.format(n[len(n)-1]))
#ou print('Seu ultimo nome é {} '.format(n[-1])) | 44 | 55 | 0.643939 |
7957c79b2b10df815f54dcda10f1d4a016e54093 | 7,617 | py | Python | wb_animation_action/competition.py | cesc-folch/webots-animation-action | fb0636b88413bc04963f55484f9a9e180c8bccd8 | [
"Apache-2.0"
] | 11 | 2020-12-14T10:22:06.000Z | 2022-01-03T09:41:06.000Z | wb_animation_action/competition.py | cesc-folch/webots-animation-action | fb0636b88413bc04963f55484f9a9e180c8bccd8 | [
"Apache-2.0"
] | 12 | 2020-10-05T16:04:09.000Z | 2021-09-03T13:33:46.000Z | wb_animation_action/competition.py | cesc-folch/webots-animation-action | fb0636b88413bc04963f55484f9a9e180c8bccd8 | [
"Apache-2.0"
] | 2 | 2021-04-15T07:04:20.000Z | 2021-05-19T10:39:24.000Z | #!/usr/bin/env python3
#
# Copyright 1996-2020 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import os
import json
import time
import random
import string
import subprocess
from glob import glob
from shutil import copyfile
import wb_animation_action.utils
from distutils.dir_util import copy_tree
from wb_animation_action.config import COMPETITION_TIMEOUT, RESOURCES_DIRECTORY, ADD_DUMMY_TO_COMPETITION
from wb_animation_action.animation import generate_animation_for_world
from wb_animation_action.utils.webots import compile_controllers
from wb_animation_action.utils.github import accept_all_invitations
class Competitor:
def __init__(self, git, rank, controller_name=None):
self.git = git
self.rank = rank
self.username = None
self.repository_name = None
self.controller_name = None
if self.git:
self.username, self.repository_name = re.findall(
r'github\.com\/([a-zA-Z0-9\-\_]*)\/([a-zA-Z0-9\-\_]*)', self.git
)[0]
if controller_name is None:
self.controller_name = self.__get_controller_name()
else:
self.controller_name = controller_name
def __get_id(self):
if self.username and self.repository_name:
return f'{self.username}_{self.repository_name}'
return 'dummy'
def __get_controller_name(self):
chars = string.ascii_uppercase + string.digits + string.ascii_lowercase
hash_string = ''.join(random.choice(chars) for _ in range(5))
return f'wb_{self.__get_id()}_{hash_string}'
def get_dict(self):
return {
'id': self.__get_id(),
'rank': self.rank,
'username': self.username,
'repository_name': self.repository_name
}
def __str__(self):
return self.__get_id()
def _get_competitors():
competitors = []
rank_start = 1
if ADD_DUMMY_TO_COMPETITION:
competitors.append(
Competitor(
git=None,
rank=rank_start,
controller_name='dummy'
)
)
rank_start += 1
with open('competitors.txt', 'r') as f:
for rank, competitor_url in enumerate(f.readlines()):
competitors.append(
Competitor(
git=competitor_url.strip(),
rank=rank+rank_start
)
)
return competitors
def _set_controller_name_to_world(world_file, robot_name, controller_name):
world_content = None
with open(world_file, 'r') as f:
world_content = f.read()
controller_expression = re.compile(rf'(DEF {robot_name}.*?controller\ \")(.*?)(\")', re.MULTILINE | re.DOTALL)
new_world_content = re.sub(controller_expression, rf'\1{controller_name}\3', world_content)
with open(world_file, 'w') as f:
f.write(new_world_content)
def _clone_controllers(competitors):
# Clone controller content
for competitor in competitors:
if competitor.git is not None:
controller_path = os.path.join('controllers', competitor.controller_name)
repo = 'https://{}:{}@github.com/{}/{}'.format(
os.environ['BOT_USERNAME'],
os.environ['BOT_PAT_KEY'],
competitor.username,
competitor.repository_name
)
subprocess.check_output(f'git clone {repo} {controller_path}', shell=True)
# Update controller's internal name (Python)
python_filename = os.path.join(controller_path, 'participant_controller.py')
if os.path.exists(python_filename):
os.rename(python_filename, os.path.join(controller_path, f'{competitor.controller_name}.py'))
def generate_competition(competition_config):
world_file = competition_config['world']
competitors = _get_competitors()
matches = []
# Accept all invitations
accept_all_invitations(os.environ['BOT_PAT_KEY'])
# Prepare directories
os.makedirs('/tmp/output', exist_ok=True)
# Prepare controllers
_clone_controllers(competitors)
compile_controllers()
lower_competitor_index = len(competitors) - 1
while lower_competitor_index > 0:
competitor_a = competitors[lower_competitor_index - 1]
competitor_b = competitors[lower_competitor_index]
# Add two participants to the world
_set_controller_name_to_world(world_file, 'R0', competitor_a.controller_name)
_set_controller_name_to_world(world_file, 'R1', competitor_b.controller_name)
# Run match
match_directory = f'{competitor_a.controller_name}_vs_{competitor_b.controller_name}'
destination_directory = os.path.join(
'/tmp',
'animation',
match_directory
)
generate_animation_for_world(world_file, COMPETITION_TIMEOUT, destination_directory=destination_directory)
json_file = glob(os.path.join(destination_directory, '*.json')).pop()
os.rename(json_file, os.path.join(destination_directory, match_directory + '.json'))
x3d_file = glob(os.path.join(destination_directory, '*.x3d')).pop()
os.rename(x3d_file, os.path.join(destination_directory, match_directory + '.x3d'))
html_file = glob(os.path.join(destination_directory, '*.html')).pop()
os.remove(html_file)
copy_tree(destination_directory, '/tmp/output')
# Update ranks
winner = None
points = []
with open('/tmp/results.txt', 'r') as f:
for line in f.readlines():
pair = line.split(':')
if len(pair) != 2 or line.startswith('#'):
continue
key, value = pair
if key == 'winner':
winner = int(value)
elif key == 'points':
points = [float(x) for x in value.split(',')]
if winner == 1:
competitor_a.rank, competitor_b.rank = competitor_b.rank, competitor_a.rank
competitors = sorted(competitors, key=lambda c: c.rank)
# Store the results
matches.append({
'id': match_directory,
'competitor_a': str(competitor_a),
'competitor_b': str(competitor_b),
'winner': 'competitor_b' if winner == 1 else 'competitor_a',
'points': points
})
# Prepare next iteration
lower_competitor_index -= 1
# Write animation
wb_animation_action.utils.git.push_directory_to_branch('/tmp/output', clean=True)
# Write results
os.makedirs('/tmp/results', exist_ok=True)
results = {
'ranking': [c.get_dict() for c in competitors],
'matches': matches,
'timestamp': time.time()
}
with open(os.path.join('/tmp/results', 'results.json'), 'w') as f:
f.write(json.dumps(results))
copyfile(os.path.join(RESOURCES_DIRECTORY, 'competition.html'), '/tmp/results/index.html')
wb_animation_action.utils.git.push_directory_to_branch('/tmp/results')
| 36.444976 | 114 | 0.642641 |
7957c79ec717a3f40f22ff057c0191968712aee4 | 6,360 | py | Python | cvxpy/constraints/constraint.py | Hennich/cvxpy | 4dfd6d69ace76abf57d8b1d63db0556dee96e24f | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | cvxpy/constraints/constraint.py | Hennich/cvxpy | 4dfd6d69ace76abf57d8b1d63db0556dee96e24f | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | cvxpy/constraints/constraint.py | Hennich/cvxpy | 4dfd6d69ace76abf57d8b1d63db0556dee96e24f | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | """
Copyright 2013 Steven Diamond
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import cvxpy.utilities as u
import cvxpy.lin_ops.lin_utils as lu
from cvxpy.expressions import cvxtypes
import abc
import numpy as np
class Constraint(u.Canonical):
"""The base class for constraints.
A constraint is an equality, inequality, or more generally a generalized
inequality that is imposed upon a mathematical expression or a list of
thereof.
Parameters
----------
args : list
A list of expression trees.
constr_id : int
A unique id for the constraint.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, args, constr_id=None):
# TODO cast constants.
# self.args = [cvxtypes.expression().cast_to_const(arg) for arg in args]
self.args = args
if constr_id is None:
self.constr_id = lu.get_id()
else:
self.constr_id = constr_id
self.dual_variables = [cvxtypes.variable()(arg.shape) for arg in args]
super(Constraint, self).__init__()
def __str__(self):
"""Returns a string showing the mathematical constraint.
"""
return self.name()
def __repr__(self):
"""Returns a string with information about the constraint.
"""
return "%s(%s)" % (self.__class__.__name__,
repr(self.args[0]))
@property
def shape(self):
"""int : The shape of the constrained expression."""
return self.args[0].shape
@property
def size(self):
"""int : The size of the constrained expression."""
return self.args[0].size
def is_real(self):
"""Is the Leaf real valued?
"""
return not self.is_complex()
def is_imag(self):
"""Is the Leaf imaginary?
"""
return all(arg.is_imag() for arg in self.args)
def is_complex(self):
"""Is the Leaf complex valued?
"""
return any(arg.is_complex() for arg in self.args)
@abc.abstractmethod
def is_dcp(self):
"""Checks whether the constraint is DCP.
Returns
-------
bool
True if the constraint is DCP, False otherwise.
"""
return NotImplemented
@abc.abstractproperty
def residual(self):
"""The residual of the constraint.
Returns
-------
NumPy.ndarray
The residual, or None if the constrained expression does not have
a value.
"""
return NotImplemented
def violation(self):
"""The numeric residual of the constraint.
The violation is defined as the distance between the constrained
expression's value and its projection onto the domain of the
constraint:
.. math::
||\Pi(v) - v||_2^2
where :math:`v` is the value of the constrained expression and
:math:`\\Pi` is the projection operator onto the constraint's domain .
Returns
-------
NumPy.ndarray
The residual value.
Raises
------
ValueError
If the constrained expression does not have a value associated
with it.
"""
residual = self.residual
if residual is None:
raise ValueError("Cannot compute the violation of an constraint "
"whose expression is None-valued.")
return residual
def value(self, tolerance=1e-8):
"""Checks whether the constraint violation is less than a tolerance.
Parameters
----------
tolerance : float
The absolute tolerance to impose on the violation.
Returns
-------
bool
True if the violation is less than ``tolerance``, False
otherwise.
Raises
------
ValueError
If the constrained expression does not have a value associated
with it.
"""
residual = self.residual
if residual is None:
raise ValueError("Cannot compute the value of an constraint "
"whose expression is None-valued.")
return np.all(residual <= tolerance)
@property
def id(self):
"""Wrapper for compatibility with variables.
"""
return self.constr_id
def get_data(self):
"""Data needed to copy.
"""
return [self.id]
def __nonzero__(self):
"""Raises an exception when called.
Python 2 version.
Called when evaluating the truth value of the constraint.
Raising an error here prevents writing chained constraints.
"""
return self._chain_constraints()
def _chain_constraints(self):
"""Raises an error due to chained constraints.
"""
raise Exception(
("Cannot evaluate the truth value of a constraint or "
"chain constraints, e.g., 1 >= x >= 0.")
)
def __bool__(self):
"""Raises an exception when called.
Python 3 version.
Called when evaluating the truth value of the constraint.
Raising an error here prevents writing chained constraints.
"""
return self._chain_constraints()
# The value of the dual variable.
@property
def dual_value(self):
"""NumPy.ndarray : The value of the dual variable.
"""
return self.dual_variables[0].value
# TODO(akshayka): Rename to save_dual_value to avoid collision with
# value as defined above.
def save_value(self, value):
"""Save the value of the dual variable for the constraint's parent.
Args:
value: The value of the dual variable.
"""
self.dual_variables[0].save_value(value)
| 28.520179 | 80 | 0.59827 |
7957c7ba4ce6a5d34fa81576399cf0d5958e2a63 | 805 | py | Python | StudentManager/student.py | sudeep0901/python | 7a50af12e72d21ca4cad7f2afa4c6f929552043f | [
"MIT"
] | null | null | null | StudentManager/student.py | sudeep0901/python | 7a50af12e72d21ca4cad7f2afa4c6f929552043f | [
"MIT"
] | 3 | 2019-12-26T05:13:55.000Z | 2020-03-07T06:59:56.000Z | StudentManager/student.py | sudeep0901/python | 7a50af12e72d21ca4cad7f2afa4c6f929552043f | [
"MIT"
] | null | null | null | students = []
# all methods are public no access modifiers
class Student:
pass
# Class Attribute
schoolName = "New Delhli Public School"
# adding methods to classs
# Constructer Method
def __init__(self, name, student_id=1000):
# student = {"name": name, "student_id": student_id}
self.name = name
self.studentId = student_id
students.append(self)
# def add_student(self, name, student_id=1000):
# student = {"name": name, "student_id": student_id}
# students.append(student)
def __str__(self):
return "Students name {0} and student ID {1}".format(self.name, self.studentId)
def get_name_captalize(self):
return self.name.capitalize()
def get_School_name(self):
return self.schoolName
| 25.967742 | 87 | 0.645963 |
7957c7d80674027cca9cf267e8c49474d9fc1c49 | 1,401 | py | Python | stac_fastapi_sqlalchemy/setup.py | TomAugspurger/stac-fastapi | d660438dd0f6d4e0ecb69bf806d6af07fb31fc7f | [
"MIT"
] | null | null | null | stac_fastapi_sqlalchemy/setup.py | TomAugspurger/stac-fastapi | d660438dd0f6d4e0ecb69bf806d6af07fb31fc7f | [
"MIT"
] | null | null | null | stac_fastapi_sqlalchemy/setup.py | TomAugspurger/stac-fastapi | d660438dd0f6d4e0ecb69bf806d6af07fb31fc7f | [
"MIT"
] | null | null | null | """stac-fastapi sqlalchemy submodule."""
import os
from glob import glob
from imp import load_source
from os.path import basename, splitext
from setuptools import find_namespace_packages, setup
name = "stac-sqlalchemy-sqlalchemy"
description = "Sqlalchemy subpackage of fastapi-stac, contains a postgres backend implementation using sqlalchemy."
__version__ = load_source(
"stac_fastapi.sqlalchemy.version",
os.path.join(os.path.dirname(__file__), "stac_fastapi/sqlalchemy/version.py"),
).__version__ # type:ignore
install_requires = [
"stac-fastapi-types",
"sqlakeyset",
"geoalchemy2<0.8.0",
"sqlalchemy==1.3.23",
"shapely",
"psycopg2-binary",
]
with open(
os.path.join(os.path.abspath(os.path.dirname(__file__)), "README.md")
) as readme_file:
readme = readme_file.read()
setup(
name=name,
description=description,
version=__version__,
long_description=readme,
long_description_content_type="text/markdown",
author=u"Arturo Engineering",
author_email="engineering@arturo.ai",
url="https://github.com/stac-utils/stac-fastapi.git",
packages=find_namespace_packages(),
py_modules=[splitext(basename(path))[0] for path in glob("stac_fastapi/*.py")],
include_package_data=False,
install_requires=install_requires,
license="MIT",
keywords=["stac", "fastapi", "imagery", "raster", "catalog", "STAC"],
)
| 29.808511 | 115 | 0.723769 |
7957c979266bb6a6d63c2167ad4118516aef6a89 | 1,282 | py | Python | web_app/fta_solutions_app/migrations/0025_auto_20170620_1508.py | mycyzs/fta | a50a3c498c39b14e7df4a0a960c2a1499b1ec6bb | [
"Apache-2.0"
] | null | null | null | web_app/fta_solutions_app/migrations/0025_auto_20170620_1508.py | mycyzs/fta | a50a3c498c39b14e7df4a0a960c2a1499b1ec6bb | [
"Apache-2.0"
] | null | null | null | web_app/fta_solutions_app/migrations/0025_auto_20170620_1508.py | mycyzs/fta | a50a3c498c39b14e7df4a0a960c2a1499b1ec6bb | [
"Apache-2.0"
] | 2 | 2020-02-12T03:59:28.000Z | 2021-05-27T05:34:01.000Z | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.
Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
""" # noqa
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fta_solutions_app', '0024_auto_20170620_1440'),
]
operations = [
migrations.AlterField(
model_name='advice',
name='status',
field=models.CharField(max_length=32, verbose_name='\u4f18\u5316\u9879\u72b6\u6001', choices=[(b'ok', '\u5df2\u7ecf\u7ebf\u4e0b\u5904\u7406\u8be5\u98ce\u9669'), (b'no', '\u672a\u5904\u7406')]),
),
]
| 47.481481 | 305 | 0.728549 |
7957ca04f58a8d32518e92aabb7f3bb03f95c425 | 26,053 | py | Python | falcon/api.py | swistakm/falcon | 229918c6e34aece9234466fa255ae8ac2d4a7c16 | [
"Apache-2.0"
] | null | null | null | falcon/api.py | swistakm/falcon | 229918c6e34aece9234466fa255ae8ac2d4a7c16 | [
"Apache-2.0"
] | null | null | null | falcon/api.py | swistakm/falcon | 229918c6e34aece9234466fa255ae8ac2d4a7c16 | [
"Apache-2.0"
] | null | null | null | # Copyright 2013 by Rackspace Hosting, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import re
import six
from falcon import api_helpers as helpers
from falcon import DEFAULT_MEDIA_TYPE
from falcon.http_error import HTTPError
from falcon.http_status import HTTPStatus
from falcon.request import Request, RequestOptions
from falcon.response import Response
import falcon.responders
from falcon import routing
import falcon.status_codes as status
class API(object):
"""This class is the main entry point into a Falcon-based app.
Each API instance provides a callable WSGI interface and a routing engine.
Args:
media_type (str, optional): Default media type to use as the value for
the Content-Type header on responses (default 'application/json').
middleware(object or list, optional): One or more objects
(instantiated classes) that implement the following middleware
component interface::
class ExampleComponent(object):
def process_request(self, req, resp):
\"""Process the request before routing it.
Args:
req: Request object that will eventually be
routed to an on_* responder method.
resp: Response object that will be routed to
the on_* responder.
\"""
def process_resource(self, req, resp, resource, params):
\"""Process the request and resource *after* routing.
Note:
This method is only called when the request matches
a route to a resource.
Args:
req: Request object that will be passed to the
routed responder.
resp: Response object that will be passed to the
responder.
resource: Resource object to which the request was
routed. May be None if no route was found for
the request.
params: A dict-like object representing any
additional params derived from the route's URI
template fields, that will be passed to the
resource's responder method as keyword
arguments.
\"""
def process_response(self, req, resp, resource)
\"""Post-processing of the response (after routing).
Args:
req: Request object.
resp: Response object.
resource: Resource object to which the request was
routed. May be None if no route was found
for the request.
\"""
See also :ref:`Middleware <middleware>`.
request_type (Request, optional): ``Request``-like class to use instead
of Falcon's default class. Among other things, this feature
affords inheriting from ``falcon.request.Request`` in order
to override the ``context_type`` class variable.
(default ``falcon.request.Request``)
response_type (Response, optional): ``Response``-like class to use
instead of Falcon's default class. (default
``falcon.response.Response``)
router (object, optional): An instance of a custom router
to use in lieu of the default engine.
See also: :ref:`Routing <routing>`.
Attributes:
req_options (RequestOptions): A set of behavioral options related to
incoming requests.
"""
# PERF(kgriffs): Reference via self since that is faster than
# module global...
_BODILESS_STATUS_CODES = set([
status.HTTP_100,
status.HTTP_101,
status.HTTP_204,
status.HTTP_304
])
_STREAM_BLOCK_SIZE = 8 * 1024 # 8 KiB
__slots__ = ('_request_type', '_response_type',
'_error_handlers', '_media_type', '_router', '_sinks',
'_serialize_error', 'req_options', '_middleware')
def __init__(self, media_type=DEFAULT_MEDIA_TYPE,
request_type=Request, response_type=Response,
middleware=None, router=None):
self._sinks = []
self._media_type = media_type
# set middleware
self._middleware = helpers.prepare_middleware(middleware)
self._router = router or routing.DefaultRouter()
self._request_type = request_type
self._response_type = response_type
self._error_handlers = []
self._serialize_error = helpers.default_serialize_error
self.req_options = RequestOptions()
def __call__(self, env, start_response):
"""WSGI `app` method.
Makes instances of API callable from a WSGI server. May be used to
host an API or called directly in order to simulate requests when
testing the API.
See also PEP 3333.
Args:
env (dict): A WSGI environment dictionary
start_response (callable): A WSGI helper function for setting
status and headers on a response.
"""
req = self._request_type(env, options=self.req_options)
resp = self._response_type()
resource = None
middleware_stack = [] # Keep track of executed components
params = {}
try:
# NOTE(kgriffs): Using an inner try..except in order to
# address the case when err_handler raises HTTPError.
# NOTE(kgriffs): Coverage is giving false negatives,
# so disabled on relevant lines. All paths are tested
# afaict.
try:
# NOTE(ealogar): The execution of request middleware should be
# before routing. This will allow request mw to modify path.
self._call_req_mw(middleware_stack, req, resp)
# NOTE(warsaw): Moved this to inside the try except because it
# is possible when using object-based traversal for
# _get_responder() to fail. An example is a case where an
# object does not have the requested next-hop child resource.
# In that case, the object being asked to dispatch to its
# child will raise an HTTP exception signalling the problem,
# e.g. a 404.
responder, params, resource = self._get_responder(req)
# NOTE(kgriffs): If the request did not match any route,
# a default responder is returned and the resource is
# None.
if resource is not None:
self._call_rsrc_mw(middleware_stack, req, resp, resource,
params)
responder(req, resp, **params)
self._call_resp_mw(middleware_stack, req, resp, resource)
except Exception as ex:
for err_type, err_handler in self._error_handlers:
if isinstance(ex, err_type):
err_handler(ex, req, resp, params)
self._call_resp_mw(middleware_stack, req, resp,
resource)
break
else:
# PERF(kgriffs): This will propagate HTTPError to
# the handler below. It makes handling HTTPError
# less efficient, but that is OK since error cases
# don't need to be as fast as the happy path, and
# indeed, should perhaps be slower to create
# backpressure on clients that are issuing bad
# requests.
# NOTE(ealogar): This will executed remaining
# process_response when no error_handler is given
# and for whatever exception. If an HTTPError is raised
# remaining process_response will be executed later.
self._call_resp_mw(middleware_stack, req, resp, resource)
raise
except HTTPStatus as ex:
self._compose_status_response(req, resp, ex)
self._call_resp_mw(middleware_stack, req, resp, resource)
except HTTPError as ex:
self._compose_error_response(req, resp, ex)
self._call_resp_mw(middleware_stack, req, resp, resource)
#
# Set status and headers
#
if req.method == 'HEAD' or resp.status in self._BODILESS_STATUS_CODES:
body = []
else:
body, length = self._get_body(resp, env.get('wsgi.file_wrapper'))
if length is not None:
resp._headers['content-length'] = str(length)
# NOTE(kgriffs): Based on wsgiref.validate's interpretation of
# RFC 2616, as commented in that module's source code. The
# presence of the Content-Length header is not similarly
# enforced.
if resp.status in (status.HTTP_204, status.HTTP_304):
media_type = None
else:
media_type = self._media_type
headers = resp._wsgi_headers(media_type)
# Return the response per the WSGI spec
start_response(resp.status, headers)
return body
def add_route(self, uri_template, resource, *args, **kwargs):
"""Associates a templatized URI path with a resource.
A resource is an instance of a class that defines various
"responder" methods, one for each HTTP method the resource
allows. Responder names start with `on_` and are named according to
which HTTP method they handle, as in `on_get`, `on_post`, `on_put`,
etc.
If your resource does not support a particular
HTTP method, simply omit the corresponding responder and
Falcon will reply with "405 Method not allowed" if that
method is ever requested.
Responders must always define at least two arguments to receive
request and response objects, respectively. For example::
def on_post(self, req, resp):
pass
In addition, if the route's template contains field
expressions, any responder that desires to receive requests
for that route must accept arguments named after the respective
field names defined in the template. A field expression consists
of a bracketed field name.
For example, given the following template::
/user/{name}
A PUT request to "/user/kgriffs" would be routed to::
def on_put(self, req, resp, name):
pass
Individual path segments may contain one or more field expressions.
For example::
/repos/{org}/{repo}/compare/{usr0}:{branch0}...{usr1}:{branch1}
Args:
uri_template (str): A templatized URI. Care must be
taken to ensure the template does not mask any sink
patterns, if any are registered (see also `add_sink`).
resource (instance): Object which represents a REST
resource. Falcon will pass "GET" requests to on_get,
"PUT" requests to on_put, etc. If any HTTP methods are not
supported by your resource, simply don't define the
corresponding request handlers, and Falcon will do the right
thing.
Note:
Any additional args and kwargs not defined above are passed
through to the underlying router's ``add_route()`` method. The
default router does not expect any additional arguments, but
custom routers may take advantage of this feature to receive
additional options when setting up routes.
"""
# NOTE(richardolsson): Doing the validation here means it doesn't have
# to be duplicated in every future router implementation.
if not isinstance(uri_template, six.string_types):
raise TypeError('uri_template is not a string')
if not uri_template.startswith('/'):
raise ValueError("uri_template must start with '/'")
if '//' in uri_template:
raise ValueError("uri_template may not contain '//'")
method_map = routing.create_http_method_map(resource)
self._router.add_route(uri_template, method_map, resource, *args,
**kwargs)
def add_sink(self, sink, prefix=r'/'):
"""Registers a sink method for the API.
If no route matches a request, but the path in the requested URI
matches a sink prefix, Falcon will pass control to the
associated sink, regardless of the HTTP method requested.
Using sinks, you can drain and dynamically handle a large number
of routes, when creating static resources and responders would be
impractical. For example, you might use a sink to create a smart
proxy that forwards requests to one or more backend services.
Args:
sink (callable): A callable taking the form ``func(req, resp)``.
prefix (str): A regex string, typically starting with '/', which
will trigger the sink if it matches the path portion of the
request's URI. Both strings and precompiled regex objects
may be specified. Characters are matched starting at the
beginning of the URI path.
Note:
Named groups are converted to kwargs and passed to
the sink as such.
Warning:
If the prefix overlaps a registered route template,
the route will take precedence and mask the sink
(see also `add_route`).
"""
if not hasattr(prefix, 'match'):
# Assume it is a string
prefix = re.compile(prefix)
# NOTE(kgriffs): Insert at the head of the list such that
# in the case of a duplicate prefix, the last one added
# is preferred.
self._sinks.insert(0, (prefix, sink))
def add_error_handler(self, exception, handler=None):
"""Registers a handler for a given exception error type.
Args:
exception (type): Whenever an error occurs when handling a request
that is an instance of this exception class, the associated
handler will be called.
handler (callable): A function or callable object taking the form
``func(ex, req, resp, params)``.
If not specified explicitly, the handler will default to
``exception.handle``, where ``exception`` is the error
type specified above, and ``handle`` is a static method
(i.e., decorated with @staticmethod) that accepts
the same params just described. For example::
class CustomException(CustomBaseException):
@staticmethod
def handle(ex, req, resp, params):
# TODO: Log the error
# Convert to an instance of falcon.HTTPError
raise falcon.HTTPError(falcon.HTTP_792)
Note:
A handler can either raise an instance of ``HTTPError``
or modify `resp` manually in order to communicate
information about the issue to the client.
"""
if handler is None:
try:
handler = exception.handle
except AttributeError:
raise AttributeError('handler must either be specified '
'explicitly or defined as a static'
'method named "handle" that is a '
'member of the given exception class.')
# Insert at the head of the list in case we get duplicate
# adds (will cause the most recently added one to win).
self._error_handlers.insert(0, (exception, handler))
def set_error_serializer(self, serializer):
"""Override the default serializer for instances of HTTPError.
When a responder raises an instance of HTTPError, Falcon converts
it to an HTTP response automatically. The default serializer
supports JSON and XML, but may be overridden by this method to
use a custom serializer in order to support other media types.
The ``falcon.HTTPError`` class contains helper methods, such as
`to_json()` and `to_dict()`, that can be used from within
custom serializers. For example::
def my_serializer(req, resp, exception):
representation = None
preferred = req.client_prefers(('application/x-yaml',
'application/json'))
if preferred is not None:
if preferred == 'application/json':
representation = exception.to_json()
else:
representation = yaml.dump(exception.to_dict(),
encoding=None)
resp.body = representation
resp.content_type = preferred
Note:
If a custom media type is used and the type includes a
"+json" or "+xml" suffix, the default serializer will
convert the error to JSON or XML, respectively. If this
is not desirable, a custom error serializer may be used
to override this behavior.
Args:
serializer (callable): A function taking the form
``func(req, resp, exception)``, where `req` is the request
object that was passed to the responder method, `resp` is
the response object, and `exception` is an instance of
``falcon.HTTPError``.
"""
if len(inspect.getargspec(serializer).args) == 2:
serializer = helpers.wrap_old_error_serializer(serializer)
self._serialize_error = serializer
# ------------------------------------------------------------------------
# Helpers that require self
# ------------------------------------------------------------------------
def _get_responder(self, req):
"""Searches routes for a matching responder.
Args:
req: The request object.
Returns:
A 3-member tuple consisting of a responder callable,
a ``dict`` containing parsed path fields (if any were specified in
the matching route's URI template), and a reference to the
responder's resource instance.
Note:
If a responder was matched to the given URI, but the HTTP
method was not found in the method_map for the responder,
the responder callable element of the returned tuple will be
`falcon.responder.bad_request`.
Likewise, if no responder was matched for the given URI, then
the responder callable element of the returned tuple will be
`falcon.responder.path_not_found`
"""
path = req.path
method = req.method
route = self._router.find(path)
if route is not None:
resource, method_map, params = route
else:
# NOTE(kgriffs): Older routers may indicate that no route
# was found by returning (None, None, None). Therefore, we
# normalize resource as the flag to indicate whether or not
# a route was found, for the sake of backwards-compat.
resource = None
if resource is not None:
try:
responder = method_map[method]
except KeyError:
responder = falcon.responders.bad_request
else:
params = {}
for pattern, sink in self._sinks:
m = pattern.match(path)
if m:
params = m.groupdict()
responder = sink
break
else:
responder = falcon.responders.path_not_found
return (responder, params, resource)
def _compose_status_response(self, req, resp, http_status):
"""Composes a response for the given HTTPStatus instance."""
# PERF(kgriffs): The code to set the status and headers is identical
# to that used in _compose_error_response(), but refactoring in the
# name of DRY isn't worth the extra CPU cycles.
resp.status = http_status.status
if http_status.headers is not None:
resp.set_headers(http_status.headers)
# NOTE(kgriffs): If http_status.body is None, that's OK because
# it's acceptable to set resp.body to None (to indicate no body).
resp.body = http_status.body
def _compose_error_response(self, req, resp, error):
"""Composes a response for the given HTTPError instance."""
resp.status = error.status
if error.headers is not None:
resp.set_headers(error.headers)
if error.has_representation:
self._serialize_error(req, resp, error)
def _call_req_mw(self, stack, req, resp):
"""Run process_request middleware methods."""
for component in self._middleware:
process_request, _, _ = component
if process_request is not None:
process_request(req, resp)
# Put executed component on the stack
stack.append(component) # keep track from outside
def _call_rsrc_mw(self, stack, req, resp, resource, params):
"""Run process_resource middleware methods."""
for component in self._middleware:
_, process_resource, _ = component
if process_resource is not None:
process_resource(req, resp, resource, params)
def _call_resp_mw(self, stack, req, resp, resource):
"""Run process_response middleware."""
while stack:
_, _, process_response = stack.pop()
if process_response is not None:
process_response(req, resp, resource)
# PERF(kgriffs): Moved from api_helpers since it is slightly faster
# to call using self, and this function is called for most
# requests.
def _get_body(self, resp, wsgi_file_wrapper=None):
"""Converts resp content into an iterable as required by PEP 333
Args:
resp: Instance of falcon.Response
wsgi_file_wrapper: Reference to wsgi.file_wrapper from the
WSGI environ dict, if provided by the WSGI server. Used
when resp.stream is a file-like object (default None).
Returns:
A two-member tuple of the form (iterable, content_length).
The length is returned as ``None`` when unknown. The
iterable is determined as follows:
* If resp.body is not ``None``, returns [resp.body],
encoded as UTF-8 if it is a Unicode string.
Bytestrings are returned as-is.
* If resp.data is not ``None``, returns [resp.data]
* If resp.stream is not ``None``, returns resp.stream
iterable using wsgi.file_wrapper, if possible.
* Otherwise, returns []
"""
body = resp.body
if body is not None:
if not isinstance(body, bytes):
body = body.encode('utf-8')
return [body], len(body)
data = resp.data
if data is not None:
return [data], len(data)
stream = resp.stream
if stream is not None:
# NOTE(kgriffs): Heuristic to quickly check if stream is
# file-like. Not perfect, but should be good enough until
# proven otherwise.
if hasattr(stream, 'read'):
if wsgi_file_wrapper is not None:
# TODO(kgriffs): Make block size configurable at the
# global level, pending experimentation to see how
# useful that would be. See also the discussion on
# this GitHub PR: http://goo.gl/XGrtDz
iterable = wsgi_file_wrapper(stream,
self._STREAM_BLOCK_SIZE)
else:
iterable = iter(
lambda: stream.read(self._STREAM_BLOCK_SIZE),
b''
)
else:
iterable = stream
# NOTE(kgriffs): If resp.stream_len is None, content_length
# will be as well; the caller of _get_body must handle this
# case by not setting the Content-Length header.
return iterable, resp.stream_len
return [], 0
| 40.963836 | 79 | 0.577784 |
7957ca11dfd6c1bf7c370652ff5f3778d4bde7fe | 23,849 | py | Python | e2e_tests/E2ETests.py | ucgmsim/slurm_gm_workflow | 6fd7e11f3c3163dbd219b6783c32fa8085db5d35 | [
"MIT"
] | null | null | null | e2e_tests/E2ETests.py | ucgmsim/slurm_gm_workflow | 6fd7e11f3c3163dbd219b6783c32fa8085db5d35 | [
"MIT"
] | 114 | 2018-10-11T02:49:32.000Z | 2022-03-30T01:28:21.000Z | e2e_tests/E2ETests.py | ucgmsim/slurm_gm_workflow | 6fd7e11f3c3163dbd219b6783c32fa8085db5d35 | [
"MIT"
] | 2 | 2021-10-05T07:10:20.000Z | 2022-03-16T23:26:51.000Z | """Contains class and helper functions for end to end test"""
import signal
import sys
import os
import json
import shutil
import time
import glob
import subprocess
from collections import namedtuple
from typing import List
from threading import Thread
from queue import Queue, Empty
import numpy.random as nprdm
import pandas as pd
import sqlite3 as sql
from pandas.testing import assert_frame_equal
from scripts.management.db_helper import connect_db_ctx
from scripts.management.MgmtDB import SchedulerTask, MgmtDB
import qcore.constants as const
import qcore.simulation_structure as sim_struct
from qcore.shared import non_blocking_exe, exe
from scripts.schedulers.scheduler_factory import Scheduler
def get_sim_dirs(runs_dir):
"""Gets all simualation dirs under the specified Runs dir.
Also returns the fault dirs. Full paths.
"""
sim_dirs = []
fault_dirs = get_faults(runs_dir)
for fault in fault_dirs:
fault_name = os.path.basename(fault)
entries = os.listdir(fault)
for entry in entries:
entry_path = os.path.join(fault, entry)
if entry.startswith(fault_name) and os.path.isdir(entry_path):
sim_dirs.append(entry_path)
return fault_dirs, sim_dirs
def get_faults(runs_dir: str):
"""Gets all the fault directories in the specified Runs dir.
Full path.
"""
return [
os.path.join(runs_dir, entry)
for entry in os.listdir(runs_dir)
if os.path.isdir(os.path.join(runs_dir, entry))
]
Error = namedtuple("Error", ["location", "error"])
Warning = namedtuple("Warning", ["location", "warning"])
class E2ETests(object):
"""Class responsible for setting up, running and checking end-to-end tests
based on the input config file
"""
# Config keys
cf_test_dir_key = "test_dir"
cf_data_dir_key = "data_dir"
cf_cybershake_config_key = "cybershake_config"
cf_fault_list_key = "fault_list"
cf_bench_folder_key = "bench_dir"
cf_version_key = "version"
test_checkpoint_key = "test_checkpoint"
timeout_key = "timeout"
# Benchmark folders
bench_IM_csv_folder = "IM_csv"
# Log files
install_out_file = "install_out_log.txt"
install_err_file = "install_err_log.txt"
submit_out_file = "submit_out_log.txt"
submit_err_file = "submit_err_log.txt"
warnings_file = "warnings_log.txt"
errors_file = "errors_log.txt"
# Error Keywords
error_keywords = ["error", "traceback", "exception"]
# Templates to check for
expected_templates = [
"run_bb_mpi.sl.template",
"run_emod3d.sl.template",
"run_hf_mpi.sl.template",
"sim_im_calc.sl.template",
"post_emod3d_merge_ts.sl.template",
]
def __init__(self, config_file: str):
"""Constructor, reads input config."""
try:
assert_frame_equal(pd.DataFrame([1]), pd.DataFrame([1]), atol=1e-03)
except TypeError as e:
print(
"Please ensure pandas is at least version 1.1.0. "
"The command 'pip install -U pandas' should help you. "
"If this still occurs please contact the software team."
)
exit(1)
with open(config_file, "r") as f:
self.config_dict = json.load(f)
self.version = self.config_dict[self.cf_version_key]
# Add tmp directory
self.stage_dir = os.path.join(
self.config_dict[self.cf_test_dir_key], "tmp_{}".format(const.timestamp)
)
self.im_bench_folder = os.path.join(
self.config_dict[self.cf_bench_folder_key], self.bench_IM_csv_folder
)
self.timeout = self.config_dict[self.timeout_key] * 60
self.warnings, self.errors = [], []
self.fault_dirs, self.sim_dirs = [], []
self.runs_dir = None
self._sim_passed, self._sim_failed = set(), set()
self._stop_on_error, self._test_restart = None, None
self.canceled_running = []
# Resources that need to be dealt with on close
self._processes = []
self._files = []
def run(
self,
user: str,
sleep_time: int = 10,
stop_on_error: bool = True,
stop_on_warning: bool = False,
no_clean_up: bool = False,
test_restart: bool = False,
):
"""
Runs the full automated workflow and checks that everything works as
expected. Prints out a list of errors, if there are any.
The test directory is deleted if there are no errors, unless no_clean_up
is set.
Parameters
----------
user: str
The username under which to run the tasks
"""
self._stop_on_error = stop_on_error
self._test_restart = test_restart
# Setup folder structure
self.setup()
# Run install script
self.install()
if self.warnings and stop_on_warning:
print("Quitting due to warnings following warnings:")
self.print_warnings()
return False
# Run automated workflow
if not self._run_auto(user, sleep_time=sleep_time):
return False
# Only check that everything is completed, when auto submit does not
# exit early
else:
self.check_mgmt_db()
if self.errors:
print("The following errors occurred during the automated workflow:")
self.print_errors()
else:
print("It appears there were no errors during the automated workflow!")
if not no_clean_up:
self.teardown()
return True
def print_warnings(self):
with open(os.path.join(self.stage_dir, self.warnings_file), "a") as f:
for warn in self.warnings:
text = "WARNING: {}, {}".format(warn.location, warn.warning)
print(text)
f.write(text)
def print_errors(self):
with open(os.path.join(self.stage_dir, self.errors_file), "a") as f:
for err in self.errors:
text = "ERROR: {}, {}\n".format(err.location, err.error)
print(text)
f.write(text)
def setup(self):
"""Setup for automatic workflow
Change this to use the qcore simulation structure functions!!
"""
print("Running setup...")
print("Using directory {}".format(self.stage_dir))
# Create tmp dir
os.mkdir(self.stage_dir)
# Data
data_dir = os.path.join(self.stage_dir, "Data")
shutil.copytree(self.config_dict[self.cf_data_dir_key], data_dir)
# Fault list
shutil.copy(self.config_dict[self.cf_fault_list_key], self.stage_dir)
# Create runs folder
os.mkdir(os.path.join(self.stage_dir, "Runs"))
# Mgmt queue
os.mkdir(os.path.join(self.stage_dir, "mgmt_db_queue"))
self.runs_dir = sim_struct.get_runs_dir(self.stage_dir)
def install(self):
"""Install the automated workflow
Runs install bash script, saves output into log files in the
staging directory. Also checks for error keywords in the output
and saves warnings accordingly.
"""
script_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"../scripts/cybershake/install_cybershake.py",
)
cmd = "python {} {} {} {} --seed {} --stat_file_path {}".format(
script_path,
self.stage_dir,
os.path.join(
self.stage_dir,
os.path.basename(self.config_dict[self.cf_fault_list_key]),
),
self.version,
self.config_dict[const.RootParams.seed.value],
self.config_dict["stat_file"],
)
cmd = (
cmd + " --extended_period"
if self.config_dict.get("extended_period") is True
else cmd
)
cmd = (
cmd + " --keep_dup_stations"
if self.config_dict.get("keep_dup_stations") is True
else cmd
)
print("Running install...\nCmd: {}".format(cmd))
out_file = os.path.join(self.stage_dir, self.install_out_file)
err_file = os.path.join(self.stage_dir, self.install_err_file)
with open(out_file, "w") as out_f, open(err_file, "w") as err_f:
exe(cmd, debug=False, stdout=out_f, stderr=err_f)
# Check for errors
# Get these straight from execution?
output = open(out_file, "r").read()
error = open(err_file, "r").read()
if any(cur_str in output.lower() for cur_str in self.error_keywords):
msg = "There appears to be errors in the install. Error keyword found in stdout!"
print(msg)
print("##### INSTALL OUTPUT #####")
print(output)
print("##########################")
self.warnings.append(Warning("Install - Stdout", msg))
if any(cur_str in error.lower() for cur_str in self.error_keywords):
msg = "There appears to be errors in the install. Error keyword found in stderr!"
print(msg)
print("##### INSTALL OUTPUT #####")
print(error)
print("##########################")
self.errors.append(Error("Install - Stderr", msg))
self.fault_dirs, self.sim_dirs = get_sim_dirs(self.runs_dir)
def _check_true(self, check: bool, location: str, error_msg: str):
if not check:
self.errors.append(Error(location, error_msg))
def check_install(self):
"""Checks that all required templates exists, along with the yaml params"""
for sim_dir in self.sim_dirs:
# Check sim_params.yaml are there
self._check_true(
"sim_params.yaml" in os.listdir(sim_dir),
"Install - Sim params",
"Sim params file is missing in {}".format(sim_dir),
)
# Check fault params
for fault in self.fault_dirs:
self._check_true(
"fault_params.yaml" in os.listdir(fault),
"Install - Fault params",
"Fault params are missing in {}".format(fault),
)
# Check root params
self._check_true(
"root_params.yaml" in os.listdir(self.runs_dir),
"Install - root params",
"Root params are missing in {}".format(self.runs_dir),
)
def _run_auto(self, user: str, sleep_time: int = 10):
"""
Runs auto submit
Parameters
----------
user: str
The username under which to run the tasks
sleep_time: int
Time (in seconds) between progress checks
"""
submit_cmd = "python {} {} {} {} --sleep_time 2".format(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"../scripts/cybershake/run_cybershake.py",
),
self.stage_dir,
user,
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
self.config_dict["wrapper_config"],
),
)
# Different process types for which canceling/resume is tested
proc_type_cancel = None
if self.config_dict[self.test_checkpoint_key]:
proc_type_cancel = [
const.ProcessType.EMOD3D,
const.ProcessType.HF,
const.ProcessType.BB,
]
def run_wrapper(command: str):
p_submit = non_blocking_exe(
command,
debug=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
)
self._processes.append(p_submit)
p_submit_out_nbsr = NonBlockingStreamReader(p_submit.stdout)
p_submit_err_nbsr = NonBlockingStreamReader(p_submit.stderr)
# Create and open the log files
out_submit_f = open(os.path.join(self.stage_dir, self.submit_out_file), "w")
err_submit_f = open(os.path.join(self.stage_dir, self.submit_err_file), "w")
self._files.extend((out_submit_f, err_submit_f))
return (
p_submit,
[(out_submit_f, p_submit_out_nbsr), (err_submit_f, p_submit_err_nbsr)],
)
def restart_command(process: subprocess.Popen, command: str):
print("Restarting command: {}".format(command))
process.send_signal(signal.SIGINT)
process.wait(5)
if process.poll() is None:
raise RuntimeError("Process {} would not die".format(process.args))
return run_wrapper(command)
def get_laps_till_restart():
return nprdm.poisson(3)
laps_till_restart = 5
# Have to put this in a massive try block, to ensure that
# the run_queue_and_auto_submit process is terminated on any errors.
try:
print("Starting cybershake wrapper...")
p_submit, outputs_to_check = run_wrapper(submit_cmd)
# Monitor mgmt db
print("Progress: ")
start_time = time.time()
while time.time() - start_time < self.timeout:
if self._test_restart:
laps_till_restart -= 1
if laps_till_restart < 1:
p_submit, outputs_to_check = restart_command(
p_submit, submit_cmd
)
laps_till_restart = get_laps_till_restart()
try:
(
total_count,
comp_count,
failed_count,
) = self.check_mgmt_db_progress()
if not self.check_completed():
return False
except sql.OperationalError as ex:
print(
"Operational error while accessing database. "
"Retrying in {} seconds\n{}".format(sleep_time, ex)
)
time.sleep(sleep_time)
continue
print(
"Completed: {}, Failed: {}, Total: {}".format(
comp_count, failed_count, total_count
)
)
# Get the log data
for file, reader in outputs_to_check:
lines = reader.readlines()
if lines:
file.writelines(lines)
file.flush()
if proc_type_cancel:
proc_type_cancel = self.cancel_running(proc_type_cancel)
if total_count == (comp_count + failed_count):
break
else:
time.sleep(sleep_time)
if time.time() - start_time >= self.timeout:
print("The auto-submit timeout expired.")
self.errors.append(
Error("Auto-submit timeout", "The auto-submit timeout expired.")
)
return False
# Still display the exception
except Exception as ex:
raise ex
# Clean up
finally:
self.close()
return True
def close(self):
"""Terminates any running processes and closes any open files"""
for p in self._processes:
if p is not None:
p.terminate()
for f in self._files:
if f is not None:
f.close()
def cancel_running(self, proc_types: List[const.ProcessType]):
"""Looks for any running task of the specified process types
and attempts to cancel one of each.
"""
# Get all running jobs in the mgmt db
db = MgmtDB(sim_struct.get_mgmt_db(self.stage_dir))
entries = db.command_builder(
allowed_tasks=proc_types, allowed_states=[const.Status.running]
)
# Cancel one for each process type
for entry in entries:
if entry.proc_type in proc_types:
print(
f"Checkpoint testing: Cancelling job-id {entry.job_id} "
"for {entry.run_name} and process type {entry.proc_type}"
)
out, err = Scheduler.get_scheduler().cancel_job(entry.job_id)
print("Scancel out: ", out, err)
if "error" not in out.lower() and "error" not in err.lower():
self.canceled_running.append(str(entry.job_id))
proc_types.remove(entry.proc_type)
print("Cancelled job-id {}".format(entry.job_id))
return proc_types
def check_mgmt_db(self):
"""Create errors for all entries in management db that did not complete"""
base_proc_types = [
const.ProcessType.EMOD3D,
const.ProcessType.HF,
const.ProcessType.BB,
const.ProcessType.IM_calculation,
]
db = MgmtDB(sim_struct.get_mgmt_db(self.stage_dir))
entries = db.command_builder(
allowed_tasks=base_proc_types,
allowed_states=[const.Status.unknown, const.Status.failed],
blocked_ids=self.canceled_running,
)
for entry in entries:
self.errors.append(
Error(
"Slurm task",
"Run {} did not complete task {} "
"(Status {}, JobId {}".format(
entry.run_name,
const.ProcessType(entry.proc_type),
const.Status(entry.status),
entry.job_id,
),
)
)
def check_sim_result(self, sim_dir: str):
"""Checks that all the LF, HF and BB binaries are there and that the
IM values match up with the benchmark IMs
"""
result = True
# Check HF binary
hf_bin = sim_struct.get_hf_bin_path(sim_dir)
if not os.path.isfile(hf_bin):
self.errors.append(
Error("HF - Binary", "The HF binary is not at {}".format(hf_bin))
)
result = False
# Check BB binary
bb_bin = sim_struct.get_bb_bin_path(sim_dir)
if not os.path.isfile(bb_bin):
self.errors.append(
Error("BB - Binary", "The BB binary is not at {}".format(hf_bin))
)
result = False
# Check IM
im_csv = sim_struct.get_IM_csv(sim_dir)
if not os.path.isfile(im_csv):
self.errors.append(
Error(
"IM_calc - CSV", "The IM_calc csv file is not at {}".format(im_csv)
)
)
result = False
else:
bench_csv = os.path.join(
self.im_bench_folder,
"{}.csv".format(os.path.basename(sim_dir).split(".")[0]),
)
bench_df = pd.read_csv(bench_csv)
cur_df = pd.read_csv(im_csv)
try:
assert_frame_equal(cur_df, bench_df, atol=1e-04, rtol=1e-03)
except AssertionError:
self.errors.append(
Error(
"IM - Values",
"The IMs for {} are not equal to the benchmark {}".format(
im_csv, bench_csv
),
)
)
result = False
return result
def check_mgmt_db_progress(self):
"""Checks auto submit progress in the management db"""
base_proc_types = [
const.ProcessType.EMOD3D,
const.ProcessType.HF,
const.ProcessType.BB,
const.ProcessType.IM_calculation,
]
db = MgmtDB(sim_struct.get_mgmt_db(self.stage_dir))
total_count = len(db.command_builder(allowed_tasks=base_proc_types))
comp_count = len(
db.command_builder(
allowed_tasks=base_proc_types, allowed_states=[const.Status.completed]
)
)
failed_count = len(
db.command_builder(
allowed_tasks=base_proc_types,
allowed_states=[const.Status.failed, const.Status.unknown],
)
)
return total_count, comp_count, failed_count
def check_completed(self):
"""Checks all simulations that have completed"""
base_proc_types = [const.ProcessType.IM_calculation]
db = MgmtDB(sim_struct.get_mgmt_db(self.stage_dir))
entries = db.command_builder(
allowed_tasks=base_proc_types, allowed_states=[const.Status.completed]
)
completed_sims = [sim_t.run_name for sim_t in entries]
# Only check the ones that haven't been checked already
completed_new = set(completed_sims) - (self._sim_passed | self._sim_failed)
for sim in completed_new:
result = self.check_sim_result(
os.path.join(
self.runs_dir, sim_struct.get_fault_from_realisation(sim), sim
)
)
if not result:
self._sim_failed.add(sim)
if self._stop_on_error:
print("Quitting as the following errors occured: ")
self.print_errors()
return False
else:
print("The following error occured for simulation {}:".format(sim))
print(
"ERROR: {}, {}\n".format(
self.errors[-1].location, self.errors[-1].error
)
)
else:
self._sim_passed.add(sim)
print(
"Passed/Failed/Total simulations: {}/{}/{}, ".format(
len(self._sim_passed), len(self._sim_failed), len(self.sim_dirs)
)
)
return True
def teardown(self):
"""Remove all files created during the end-to-end test"""
print("Deleting everything under {}".format(self.stage_dir))
shutil.rmtree(self.stage_dir)
class NonBlockingStreamReader:
"""A non-blocking stream reader.
Based on http://eyalarubas.com/python-subproc-nonblock.html
"""
def __init__(self, stream):
"""
stream: the stream to read from.
Usually a process' stdout or stderr.
"""
self._s = stream
self._q = Queue()
def _populate_queue(stream, queue):
"""
Collect lines from 'stream' and put them in 'queue'.
"""
while True:
line = stream.readline()
if line:
queue.put(line)
else:
print("Stream has been closed.")
sys.exit()
self._t = Thread(target=_populate_queue, args=(self._s, self._q))
self._t.daemon = True
self._t.start() # start collecting lines from the stream
def readlines(self):
"""Reads the lines from the queue, returns None if the queue is empty"""
lines = []
cur_line = ""
while cur_line is not None:
try:
cur_line = self._q.get(block=False)
except Empty:
cur_line = None
if cur_line is not None:
lines.append(cur_line)
if lines:
return lines
return None
| 33.685028 | 93 | 0.554195 |
7957cbd5fa9a03fb753bb79313289edb105db546 | 10,229 | py | Python | cirq/ops/controlled_operation.py | matpompili/Cirq | b9ce387a7fc1f571b3d6e903c46543c3578677cb | [
"Apache-2.0"
] | 1 | 2020-07-16T07:09:10.000Z | 2020-07-16T07:09:10.000Z | cirq/ops/controlled_operation.py | matpompili/Cirq | b9ce387a7fc1f571b3d6e903c46543c3578677cb | [
"Apache-2.0"
] | 1 | 2020-08-11T15:45:17.000Z | 2020-08-11T15:45:17.000Z | cirq/ops/controlled_operation.py | matpompili/Cirq | b9ce387a7fc1f571b3d6e903c46543c3578677cb | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import (Any, cast, Collection, Dict, List, Optional, Sequence,
Tuple, Union, TYPE_CHECKING)
import itertools
import numpy as np
from cirq import protocols, qis, value
from cirq.ops import raw_types, gate_operation, controlled_gate
from cirq.type_workarounds import NotImplementedType
if TYPE_CHECKING:
import cirq
@value.value_equality
class ControlledOperation(raw_types.Operation):
"""Augments existing operations to have one or more control qubits.
This object is typically created via `operation.controlled_by(*qubits)`.
"""
def __init__(self,
controls: Sequence['cirq.Qid'],
sub_operation: 'cirq.Operation',
control_values: Optional[Sequence[
Union[int, Collection[int]]]] = None):
if control_values is None:
control_values = ((1,),) * len(controls)
if len(control_values) != len(controls):
raise ValueError('len(control_values) != len(controls)')
# Convert to sorted tuples
self.control_values = cast(
Tuple[Tuple[int, ...], ...],
tuple((val,) if isinstance(val, int) else tuple(sorted(val))
for val in control_values))
# Verify control values not out of bounds
for q, val in zip(controls, self.control_values):
if not all(0 <= v < q.dimension for v in val):
raise ValueError(
'Control values <{!r}> outside of range for qubit '
'<{!r}>.'.format(val, q))
if not isinstance(sub_operation, ControlledOperation):
self.controls = tuple(controls)
self.sub_operation = sub_operation
else:
# Auto-flatten nested controlled operations.
self.controls = tuple(controls) + sub_operation.controls
self.sub_operation = sub_operation.sub_operation
self.control_values += sub_operation.control_values
@property
def gate(self) -> Optional['cirq.ControlledGate']:
if self.sub_operation.gate is None:
return None
return controlled_gate.ControlledGate(
self.sub_operation.gate,
control_values=self.control_values,
control_qid_shape=[q.dimension for q in self.controls])
@property
def qubits(self):
return self.controls + self.sub_operation.qubits
def with_qubits(self, *new_qubits):
n = len(self.controls)
return ControlledOperation(
new_qubits[:n], self.sub_operation.with_qubits(*new_qubits[n:]),
self.control_values)
def _decompose_(self):
result = protocols.decompose_once(self.sub_operation, NotImplemented)
if result is NotImplemented:
return NotImplemented
return [
ControlledOperation(self.controls, op, self.control_values)
for op in result
]
def _value_equality_values_(self):
return (frozenset(zip(self.controls,
self.control_values)), self.sub_operation)
def _apply_unitary_(self, args: 'protocols.ApplyUnitaryArgs') -> np.ndarray:
n = len(self.controls)
sub_n = len(args.axes) - n
sub_axes = args.axes[n:]
for control_vals in itertools.product(*self.control_values):
active = (..., *(slice(v, v + 1) for v in control_vals),
*(slice(None),) * sub_n)
target_view = args.target_tensor[active]
buffer_view = args.available_buffer[active]
result = protocols.apply_unitary(self.sub_operation,
protocols.ApplyUnitaryArgs(
target_view, buffer_view,
sub_axes),
default=NotImplemented)
if result is NotImplemented:
return NotImplemented
if result is not target_view:
# HACK: assume they didn't somehow escape the slice view and
# edit the rest of target_tensor.
target_view[...] = result
return args.target_tensor
def _has_unitary_(self) -> bool:
return protocols.has_unitary(self.sub_operation)
def _extend_matrix(self, sub_matrix: np.ndarray) -> np.ndarray:
qid_shape = protocols.qid_shape(self)
sub_n = len(qid_shape) - len(self.controls)
tensor = qis.eye_tensor(qid_shape, dtype=sub_matrix.dtype)
sub_tensor = sub_matrix.reshape(qid_shape[len(self.controls):] * 2)
for control_vals in itertools.product(*self.control_values):
active = (*(v for v in control_vals), *(slice(None),) * sub_n) * 2
tensor[active] = sub_tensor
return tensor.reshape((np.prod(qid_shape, dtype=int),) * 2)
def _unitary_(self) -> Union[np.ndarray, NotImplementedType]:
sub_matrix = protocols.unitary(self.sub_operation, None)
if sub_matrix is None:
return NotImplemented
return self._extend_matrix(sub_matrix)
def _has_mixture_(self) -> bool:
return protocols.has_mixture(self.sub_operation)
def _mixture_(self) -> Optional[List[Tuple[float, np.ndarray]]]:
sub_mixture = protocols.mixture(self.sub_operation, None)
if sub_mixture is None:
return None
return [(p, self._extend_matrix(m)) for p, m in sub_mixture]
def __str__(self) -> str:
if set(self.control_values) == {(1,)}:
def get_prefix(control_vals):
return 'C'
else:
def get_prefix(control_vals):
control_vals_str = ''.join(map(str, sorted(control_vals)))
return f'C{control_vals_str}'
prefix = ''.join(map(get_prefix, self.control_values))
if isinstance(self.sub_operation, gate_operation.GateOperation):
qubits = ', '.join(map(str, self.qubits))
return f'{prefix}{self.sub_operation.gate}({qubits})'
controls = ', '.join(str(q) for q in self.controls)
return f'{prefix}({controls}, {self.sub_operation})'
def __repr__(self):
if all(q.dimension == 2 for q in self.controls):
if self.control_values == ((1,) * len(self.controls),):
if self == self.sub_operation.controlled_by(*self.controls):
qubit_args = ', '.join(repr(q) for q in self.controls)
return f'{self.sub_operation!r}.controlled_by({qubit_args})'
return (f'cirq.ControlledOperation('
f'sub_operation={self.sub_operation!r},'
f'control_values={self.control_values!r},'
f'controls={self.controls!r})')
def _is_parameterized_(self) -> bool:
return protocols.is_parameterized(self.sub_operation)
def _resolve_parameters_(self, resolver) -> 'ControlledOperation':
new_sub_op = protocols.resolve_parameters(self.sub_operation, resolver)
return ControlledOperation(self.controls, new_sub_op,
self.control_values)
def _trace_distance_bound_(self) -> Optional[float]:
if self._is_parameterized_():
return None
u = protocols.unitary(self.sub_operation, default=None)
if u is None:
return NotImplemented
angle_list = np.append(np.angle(np.linalg.eigvals(u)), 0)
return protocols.trace_distance_from_angle_list(angle_list)
def __pow__(self, exponent: Any) -> 'ControlledOperation':
new_sub_op = protocols.pow(self.sub_operation,
exponent,
NotImplemented)
if new_sub_op is NotImplemented:
return NotImplemented
return ControlledOperation(self.controls, new_sub_op,
self.control_values)
def _circuit_diagram_info_(self, args: 'cirq.CircuitDiagramInfoArgs'
) -> Optional['protocols.CircuitDiagramInfo']:
n = len(self.controls)
sub_args = protocols.CircuitDiagramInfoArgs(
known_qubit_count=(args.known_qubit_count - n
if args.known_qubit_count is not None else None),
known_qubits=(args.known_qubits[n:]
if args.known_qubits is not None else None),
use_unicode_characters=args.use_unicode_characters,
precision=args.precision,
qubit_map=args.qubit_map)
sub_info = protocols.circuit_diagram_info(self.sub_operation,
sub_args,
None)
if sub_info is None:
return NotImplemented
def get_symbol(vals):
if tuple(vals) == (1,):
return '@'
return '({})'.format(','.join(map(str, vals)))
wire_symbols = (*(get_symbol(vals) for vals in self.control_values),
*sub_info.wire_symbols)
return protocols.CircuitDiagramInfo(
wire_symbols=wire_symbols,
exponent=sub_info.exponent,
exponent_qubit_index=None if sub_info.exponent_qubit_index is None
else sub_info.exponent_qubit_index + 1)
def _json_dict_(self) -> Dict[str, Any]:
return {
'cirq_type': self.__class__.__name__,
'controls': self.controls,
'control_values': self.control_values,
'sub_operation': self.sub_operation,
}
| 41.922131 | 80 | 0.607586 |
7957cc1fcde3dd32c7088f4c41669dfe7081f00a | 453 | py | Python | project/urls.py | flarno11/teslarent | 388b5fefdc7bca0a460c5db5ea24b233467e5183 | [
"Apache-2.0"
] | null | null | null | project/urls.py | flarno11/teslarent | 388b5fefdc7bca0a460c5db5ea24b233467e5183 | [
"Apache-2.0"
] | 13 | 2020-02-11T21:56:50.000Z | 2022-02-17T14:37:34.000Z | project/urls.py | flarno11/teslarent | 388b5fefdc7bca0a460c5db5ea24b233467e5183 | [
"Apache-2.0"
] | 2 | 2019-06-27T20:38:48.000Z | 2019-06-27T21:15:54.000Z | from django.contrib import admin
from django.urls import include, path
import teslarent.urls
from teslarent import manage_views
admin.autodiscover()
urlpatterns = [
path('', teslarent.views.index, name='index'),
path('metrics', manage_views.metrics, name='metrics'),
path('rental/', include('teslarent.urls')),
path('manage/', include(('teslarent.manage_urls', 'manage'), namespace='manage')),
path('admin/', admin.site.urls),
]
| 26.647059 | 86 | 0.706402 |
7957cd26f88f86e7c40e18ba368caa1460c41045 | 85,283 | py | Python | src/encoded/vis_defines.py | utsw-bicf/cognitionexplorer | df7cbca95870d3776d5b22d062b6a5be864f0948 | [
"MIT"
] | 1 | 2019-07-18T21:57:10.000Z | 2019-07-18T21:57:10.000Z | src/encoded/vis_defines.py | utsw-bicf/pandiseased | ecb2c305a5c4bf468b0964137984d1800c798f01 | [
"MIT"
] | 321 | 2019-08-20T19:32:17.000Z | 2021-10-15T20:00:02.000Z | src/encoded/vis_defines.py | Lattice-Data/encoded | 94bb4f7cb51970523715e0598d84699a28f90861 | [
"MIT"
] | 2 | 2019-09-20T19:58:08.000Z | 2020-01-28T15:04:31.000Z | from pyramid.response import Response
from pyramid.view import view_config
from pyramid.compat import bytes_
from snovault import Item
from collections import OrderedDict
from copy import deepcopy
import json
import os
from urllib.parse import (
parse_qs,
urlencode,
)
from snovault.elasticsearch.interfaces import ELASTIC_SEARCH
import time
from pkg_resources import resource_filename
import logging
log = logging.getLogger(__name__)
#log.setLevel(logging.DEBUG)
log.setLevel(logging.INFO)
IHEC_DEEP_DIG = True # ihec requires pipeline and aligner information which is not easy to get
IHEC_LIB_STRATEGY = {
'ATAC-seq': 'ATAC-seq',
'ChIP-seq': 'ChIP-Seq',
'DNase-seq': 'DNase-Hypersensitivity',
'MeDIP-seq': 'MeDIP-Seq',
'microRNA-seq': 'miRNA-Seq',
'microRNA counts': 'miRNA-Seq',
'MRE-seq': 'MRE-Seq',
'RNA-seq': 'RNA-Seq',
'RRBS': 'Bisulfite-Seq',
'whole-genome shotgun bisulfite sequencing': 'Bisulfite-Seq'
}
ASSEMBLY_DETAILS = {
'GRCh38': { 'species': 'Homo sapiens', 'assembly_reference': 'GRCh38',
'common_name': 'human',
'ucsc_assembly': 'hg38',
'ensembl_host': 'www.ensembl.org',
'quickview': True,
'hic': True,
'comment': 'Ensembl works'
},
'GRCh38-minimal': { 'species': 'Homo sapiens', 'assembly_reference': 'GRCh38',
'common_name': 'human',
'ucsc_assembly': 'hg38',
'ensembl_host': 'www.ensembl.org',
},
'hg19': { 'species': 'Homo sapiens', 'assembly_reference': 'GRCh37',
'common_name': 'human',
'ucsc_assembly': 'hg19',
'NA_ensembl_host': 'grch37.ensembl.org',
'quickview': True,
'hic': True,
'comment': 'Ensembl DOES NOT WORK'
},
'mm10': { 'species': 'Mus musculus', 'assembly_reference': 'GRCm38',
'common_name': 'mouse',
'ucsc_assembly': 'mm10',
'ensembl_host': 'www.ensembl.org',
'quickview': True,
'comment': 'Ensembl works'
},
'mm10-minimal': { 'species': 'Mus musculus', 'assembly_reference': 'GRCm38',
'common_name': 'mouse',
'ucsc_assembly': 'mm10',
'ensembl_host': 'www.ensembl.org',
'quickview': True,
'comment': 'Should this be removed?'
},
'mm9': { 'species': 'Mus musculus', 'assembly_reference': 'NCBI37',
'common_name': 'mouse',
'ucsc_assembly': 'mm9',
'NA_ensembl_host': 'may2012.archive.ensembl.org',
'quickview': True,
'comment': 'Ensembl DOES NOT WORK'
},
'dm6': { 'species': 'Drosophila melanogaster', 'assembly_reference': 'BDGP6',
'common_name': 'fruit fly',
'ucsc_assembly': 'dm6',
'NA_ensembl_host': 'www.ensembl.org',
'quickview': True,
'comment': 'Ensembl DOES NOT WORK'
},
'dm3': { 'species': 'Drosophila melanogaster', 'assembly_reference': 'BDGP5',
'common_name': 'fruit fly',
'ucsc_assembly': 'dm3',
'NA_ensembl_host': 'dec2014.archive.ensembl.org',
'quickview': True,
'comment': 'Ensembl DOES NOT WORK'
},
'ce11': { 'species': 'Caenorhabditis elegans', 'assembly_reference': 'WBcel235',
'common_name': 'worm',
'ucsc_assembly': 'ce11',
'NA_ensembl_host': 'www.ensembl.org',
'quickview': True,
'comment': 'Ensembl DOES NOT WORK'
},
'ce10': { 'species': 'Caenorhabditis elegans', 'assembly_reference': 'WS220',
'common_name': 'worm',
'ucsc_assembly': 'ce10',
'quickview': True,
'comment': 'Never Ensembl'
},
'ce6': { 'species': 'Caenorhabditis elegans', 'assembly_reference': 'WS190',
'common_name': 'worm',
'ucsc_assembly': 'ce6',
'comment': 'Never Ensembl, not found in encoded'
},
'J02459.1': { 'species': 'Escherichia virus Lambda', 'assembly_reference': 'J02459.1',
'common_name': 'lambda phage',
'comment': 'Never visualized'
},
}
BROWSER_FILE_TYPES = {
'ucsc': {'bigWig', 'bigBed'},
'ensembl': {'bigWig', 'bigBed'},
'quickview': {'bigWig', 'bigBed'},
'hic': {'hic'},
}
# Distinct from ASSEMBLY_DETAILS['ucsc_assembly'] as that defines allowed mappings
ASSEMBLY_TO_UCSC_ID = {
'GRCh38-minimal': 'hg38',
'GRCh38': 'hg38',
'GRCh37': 'hg19',
'mm10-minimal': 'mm10',
'GRCm38': 'mm10',
'NCBI37': 'mm9',
'BDGP6': 'dm6',
'BDGP5': 'dm3',
'WBcel235': 'ce11'
}
QUICKVIEW_STATUSES_BLOCKED = ["deleted", "revoked", "replaced"]
VISIBLE_DATASET_STATUSES = ["released"]
VISIBLE_FILE_STATUSES = ["released"]
BIGWIG_FILE_TYPES = ['bigWig']
BIGBED_FILE_TYPES = ['bigBed']
HIC_FILE_TYPES = ['hic']
VISIBLE_FILE_FORMATS = BIGBED_FILE_TYPES + BIGWIG_FILE_TYPES + HIC_FILE_TYPES
VISIBLE_DATASET_TYPES = ["Experiment", "Annotation"]
VISIBLE_DATASET_TYPES_LC = ["experiment", "annotation"]
# Supported tokens are the only tokens the code currently knows how to look up.
SUPPORTED_MASK_TOKENS = [
"{replicate}", # replicate that that will be displayed: ("rep1", "combined")
"{rep_tech}", # The rep_tech if desired ("rep1_1", "combined")
"{replicate_number}", # The replicate number displayed for visualized track: ("1", "0")
"{biological_replicate_number}",
"{technical_replicate_number}",
"{assay_title}",
"{assay_term_name}", # dataset.assay_term_name
"{annotation_type}", # some datasets have annotation type and not assay
"{output_type}", # files.output_type
"{accession}", "{experiment.accession}", # "{accession}" is assumed to be experiment.accession
"{file.accession}",
"{@id}", "{@type}", # dataset only
"{target}", "{target.label}", # Either is acceptible
"{target.title}",
"{target.name}", # Used in metadata URLs
"{target.investigated_as}",
"{biosample_term_name}", "{biosample_term_name|multiple}", # "|multiple": none means multiple
"{output_type_short_label}", # hard-coded translation from output_type to very
# short version
"{replicates.library.biosample.summary}", # Idan, Forrest and Cricket are conspiring to move
# to dataset.biosample_summary & make it shorter
"{replicates.library.biosample.summary|multiple}", # "|multiple": none means multiple
"{assembly}", # you don't need this in titles, but it is crucial
# variable and seems to not be being applied
# # correctly in the html generation
"{lab.title}", # In metadata
"{award.rfa}", # To distinguish vis_defs based upon award
# TODO "{software? or pipeline?}", # Cricket: "I am stumbling over the fact that we
# # can't distinguish tophat and star produced files"
# TODO "{phase}", # Cricket: "If we get to the point of being fancy
# # in the replication timing, then we need this,
# # otherwise it bundles up in the biosample summary now"
]
# Simple tokens are a straight lookup, no questions asked
SIMPLE_DATASET_TOKENS = ["{accession}", "{assay_title}",
"{assay_term_name}", "{annotation_type}", "{@id}", "{@type}"]
# static group defs are keyed by group title (or special token) and consist of
# tag: (optional) unique terse key for referencing group
# groups: (optional) { subgroups keyed by subgroup title }
# group_order: (optional) [ ordered list of subgroup titles ]
# other definitions
# live group defs are keyed by tag and are the transformed in memory version of static defs
# title: (required) same as the static group's key
# groups: (if appropriate) { subgroups keyed by subgroup tag }
# group_order: (if appropriate) [ ordered list of subgroup tags ]
VIS_DEFS_FOLDER = "static/vis_defs/"
VIS_DEFS_BY_TYPE = {}
VIS_DEFS_DEFAULT = {}
# vis_defs may not have the default experiment group defined
EXP_GROUP = "Experiment"
DEFAULT_EXPERIMENT_GROUP = {"tag": "EXP", "groups": {"one": {"title_mask": "{accession}",
"url_mask": "experiments/{accession}"}}}
# Pennants are flags that display at UCSC next to composite definitions
PENNANTS = {
"NHGRI": ("https://www.encodeproject.org/static/img/pennant-nhgri.png "
"https://www.encodeproject.org/ "
"\"This trackhub was automatically generated from the files and metadata found "
"at the ENCODE portal\""),
"ENCODE": ("https://www.encodeproject.org/static/img/pennant-encode.png "
"https://www.encodeproject.org/ "
"\"This trackhub was automatically generated from the ENCODE files and metadata "
"found at the ENCODE portal\""),
"modENCODE": ("https://www.encodeproject.org/static/img/pennant-encode.png "
"https://www.encodeproject.org/ "
"\"This trackhub was automatically generated from the modENCODE files and "
"metadata found at the ENCODE portal\""),
"GGR": ("https://www.encodeproject.org/static/img/pennant-ggr.png "
"https://www.encodeproject.org/ "
"\"This trackhub was automatically generated from the Genomics of "
"Gene Regulation files files and metadata found at the "
"ENCODE portal\""),
"REMC": ("https://www.encodeproject.org/static/img/pennant-remc.png "
"https://www.encodeproject.org/ "
"\"This trackhub was automatically generated from the Roadmap Epigentics files "
"and metadata found at the ENCODE portal\"")
# "Roadmap": "encodeThumbnail.jpg "
# "https://www.encodeproject.org/ "
# "\"This trackhub was automatically generated from the Roadmap files and "
# "metadata found at https://www.encodeproject.org/\"",
# "modERN": "encodeThumbnail.jpg "
# "https://www.encodeproject.org/ "
# "\"This trackhub was automatically generated from the modERN files and "
# "metadata found at https://www.encodeproject.org/\"",
}
# supported groups for arranging/sorting files in a visualization
SUPPORTED_SUBGROUPS = ["Biosample", "Targets", "Assay", "Replicates", "Views", EXP_GROUP]
# UCSC trackDb settings that are supported
SUPPORTED_TRACK_SETTINGS = [
"type", "visibility", "longLabel", "shortLabel", "color", "altColor", "allButtonPair", "html",
"scoreFilter", "spectrum", "minGrayLevel", "itemRgb", "viewLimits",
"autoScale", "negateValues", "maxHeightPixels", "windowingFunction", "transformFunc",
"signalFilter", "signalFilterLimits", "pValueFilter", "pValueFilterLimits",
"qValueFilter", "qValueFilterLimits" ]
VIEW_SETTINGS = SUPPORTED_TRACK_SETTINGS
# UCSC trackDb settings that are supported
COMPOSITE_SETTINGS = ["longLabel", "shortLabel", "visibility", "pennantIcon", "allButtonPair",
"html"]
# UCSC settings for individual files (tracks)
TRACK_SETTINGS = ["bigDataUrl", "longLabel", "shortLabel", "type", "color", "altColor"]
# This dataset terms (among others) are needed in vis_dataset formatting
ENCODED_DATASET_TERMS = ['biosample_ontology.term_name',
'biosample_ontology.term_id', 'biosample_summary',
'biosample_ontology.classification', 'assay_term_id',
'assay_term_name']
# This dataset terms (among others) are needed in vis_dataset formatting
ENCODED_DATASET_EMBEDDED_TERMS = {
'biosample_accession': 'replicates.library.biosample.accession',
'sex': 'replicates.library.biosample.sex',
'taxon_id': 'replicates.library.biosample.organism.taxon_id'
}
# Abbeviations for output_type to fit in UCSC shortLabel
OUTPUT_TYPE_8CHARS = {
# "idat green channel": "idat gr", # raw data
# "idat red channel": "idat rd", # raw data
# "reads":"reads", # raw data
# "intensity values": "intnsty", # raw data
# "reporter code counts": "rcc", # raw data
# "alignments":"aln", # our plan is not to visualize alignments for now
# "unfiltered alignments":"unflt aln", # our plan is not to visualize alignments for now
# "transcriptome alignments":"tr aln", # our plan is not to visualize alignments for now
"minus strand signal of all reads": "all -",
"plus strand signal of all reads": "all +",
"signal of all reads": "all sig",
"normalized signal of all reads": "normsig",
# "raw minus strand signal":"raw -", # these are all now minus signal of all reads
# "raw plus strand signal":"raw +", # these are all now plus signal of all reads
"raw signal": "raw sig",
"raw normalized signal": "nraw",
"read-depth normalized signal": "rdnorm",
"control normalized signal": "ctlnorm",
"minus strand signal of unique reads": "unq -",
"plus strand signal of unique reads": "unq +",
"signal of unique reads": "unq sig",
"signal p-value": "pval sig",
"fold change over control": "foldchg",
"exon quantifications": "exon qt",
"gene quantifications": "gene qt",
"microRNA quantifications": "miRNA qt",
"transcript quantifications": "trsct qt",
"library fraction": "lib frac",
"methylation state at CpG": "mth CpG",
"methylation state at CHG": "mth CHG",
"methylation state at CHH": "mth CHH",
"enrichment": "enrich",
"replication timing profile": "repli tm",
"variant calls": "vars",
"filtered SNPs": "f SNPs",
"filtered indels": "f indel",
"hotspots": "hotspt",
"long range chromatin interactions": "lrci",
"chromatin interactions": "ch int",
"topologically associated domains": "tads",
"genome compartments": "compart",
"open chromatin regions": "open ch",
"filtered peaks": "filt pk",
"filtered regions": "filt reg",
"DHS peaks": "DHS pk",
"peaks": "peaks",
"replicated peaks": "rep pk",
"RNA-binding protein associated mRNAs": "RBP RNA",
"splice junctions": "splice",
"transcription start sites": "tss",
"predicted enhancers": "pr enh",
"candidate enhancers": "can enh",
"candidate promoters": "can pro",
"predicted forebrain enhancers": "fb enh", # plan to fix these
"predicted heart enhancers": "hrt enh", # plan to fix these
"predicted whole brain enhancers": "wb enh", # plan to fix these
"candidate Cis-Regulatory Elements": "cCRE",
# "genome reference":"ref", # references not to be viewed
# "transcriptome reference":"tr ref", # references not to be viewed
# "transcriptome index":"tr rix", # references not to be viewed
# "tRNA reference":"tRNA", # references not to be viewed
# "miRNA reference":"miRNA", # references not to be viewed
# "snRNA reference":"snRNA", # references not to be viewed
# "rRNA reference":"rRNA", # references not to be viewed
# "TSS reference":"TSS", # references not to be viewed
# "reference variants":"var", # references not to be viewed
# "genome index":"ref ix", # references not to be viewed
# "female genome reference":"XX ref", # references not to be viewed
# "female genome index":"XX rix", # references not to be viewed
# "male genome reference":"XY ref", # references not to be viewed
# "male genome index":"XY rix", # references not to be viewed
# "spike-in sequence":"spike", # references not to be viewed
"IDR thresholded peaks": "IDRt pk",
"optimal IDR thresholded peaks": "oIDR pk",
"conservative IDR thresholded peaks": "cIDR pk",
"enhancer validation": "enh val",
"semi-automated genome annotation": "saga"
}
# Track coloring is defined by biosample
BIOSAMPLE_COLOR = {
"GM12878": {"color": "153,38,0", "altColor": "115,31,0"}, # Dark Orange-Red
"H1-hESC": {"color": "0,107,27", "altColor": "0,77,20"}, # Dark Green
"K562": {"color": "46,0,184", "altColor": "38,0,141"}, # Dark Blue
"keratinocyte": {"color": "179,0,134", "altColor": "154,0,113"}, # Darker Pink-Purple
"HepG2": {"color": "189,0,157", "altColor": "189,76,172"}, # Pink-Purple
"HeLa-S3": {"color": "0,119,158", "altColor": "0,94,128"}, # Greenish-Blue
"HeLa": {"color": "0,119,158", "altColor": "0,94,128"}, # Greenish-Blue
"A549": {"color": "204,163,0", "altColor": "218,205,22"}, # Dark Yellow
"endothelial cell of umbilical vein": {"color": "224,75,0",
"altColor": "179,60,0"}, # Pink
"MCF-7": {"color": "22,219,206", "altColor": "18,179,168"}, # Cyan
"SK-N-SH": {"color": "255,115,7", "altColor": "218,98,7"}, # Orange
"IMR-90": {"color": "6,62,218", "altColor": "5,52,179"}, # Blue
"CH12.LX": {"color": "86,180,233", "altColor": "76,157,205"}, # Dark Orange-Red
"MEL cell line": {"color": "46,0,184", "altColor": "38,0,141"}, # Dark Blue
"brain": {"color": "105,105,105", "altColor": "77,77,77"}, # Grey
"eye": {"color": "105,105,105", "altColor": "77,77,77"}, # Grey
"spinal cord": {"color": "105,105,105", "altColor": "77,77,77"}, # Grey
"olfactory organ": {"color": "105,105,105", "altColor": "77,77,77"}, # Grey
"esophagus": {"color": "230,159,0", "altColor": "179,125,0"}, # Mustard
"stomach": {"color": "230,159,0", "altColor": "179,125,0"}, # Mustard
"liver": {"color": "230,159,0", "altColor": "179,125,0"}, # Mustard
"pancreas": {"color": "230,159,0", "altColor": "179,125,0"}, # Mustard
"large intestine": {"color": "230,159,0", "altColor": "179,125,0"}, # Mustard
"small intestine": {"color": "230,159,0", "altColor": "179,125,0"}, # Mustard
"gonad": {"color": "0.0,158,115", "altColor": "0.0,125,92"}, # Darker Aquamarine
"mammary gland": {"color": "0.0,158,115", "altColor": "0.0,125,92"}, # Darker Aquamarine
"prostate gland": {"color": "0.0,158,115", "altColor": "0.0,125,92"}, # Darker Aquamarine
"ureter": {"color": "204,121,167", "altColor": "166,98,132"}, # Grey-Pink
"urinary bladder": {"color": "204,121,167", "altColor": "166,98,132"}, # Grey-Pink
"kidney": {"color": "204,121,167", "altColor": "166,98,132"}, # Grey-Pink
"muscle organ": {"color": "102,50,200 ", "altColor": "81,38,154"}, # Violet
"tongue": {"color": "102,50,200", "altColor": "81,38,154"}, # Violet
"adrenal gland": {"color": "189,0,157", "altColor": "154,0,128"}, # Pink-Purple
"thyroid gland": {"color": "189,0,157", "altColor": "154,0,128"}, # Pink-Purple
"lung": {"color": "145,235,43", "altColor": "119,192,35"}, # Mossy green
"bronchus": {"color": "145,235,43", "altColor": "119,192,35"}, # Mossy green
"trachea": {"color": "145,235,43", "altColor": "119,192,35"}, # Mossy green
"nose": {"color": "145,235,43", "altColor": "119,192,35"}, # Mossy green
"placenta": {"color": "153,38,0", "altColor": "102,27,0"}, # Orange-Brown
"extraembryonic structure": {"color": "153,38,0",
"altColor": "102,27,0"}, # Orange-Brown
"thymus": {"color": "86,180,233", "altColor": "71,148,192"}, # Baby Blue
"spleen": {"color": "86,180,233", "altColor": "71,148,192"}, # Baby Blue
"bone element": {"color": "86,180,233", "altColor": "71,148,192"}, # Baby Blue
"blood": {"color": "86,180,233", "altColor": "71,148,192"}, # Baby Blue (red?)
"blood vessel": {"color": "214,0,0", "altColor": "214,79,79"}, # Red
"heart": {"color": "214,0,0", "altColor": "214,79,79"}, # Red
"lymphatic vessel": {"color": "214,0,0", "altColor": "214,79,79"}, # Red
"skin of body": {"color": "74,74,21", "altColor": "102,102,44"}, # Brown
}
VIS_CACHE_INDEX = "vis_cache"
class Sanitize(object):
# Tools for sanitizing labels
def escape_char(self, c, exceptions=['_'], htmlize=False, numeralize=False):
'''Pass through for 0-9,A-Z.a-z,_, but then either html encodes, numeralizes or removes special
characters.'''
n = ord(c)
if n >= 47 and n <= 57: # 0-9
return c
if n >= 65 and n <= 90: # A-Z
return c
if n >= 97 and n <= 122: # a-z
return c
if c in exceptions:
return c
if n == 32: # space
return '_'
if htmlize:
return "&#%d;" % n
if numeralize:
return "%d" % n
return ""
def label(self, s):
'''Encodes the string to swap special characters and leaves spaces alone.'''
new_s = "" # longLabel and shorLabel can have spaces and some special characters
for c in s:
new_s += self.escape_char(c, [' ', '_', '.', '-', '(', ')', '+'], htmlize=False)
return new_s
def title(self, s):
'''Encodes the string to swap special characters and replace spaces with '_'.'''
new_s = "" # Titles appear in tag=title pairs and cannot have spaces
for c in s:
new_s += self.escape_char(c, ['_', '.', '-', '(', ')', '+'], htmlize=True)
return new_s
def tag(self, s):
'''Encodes the string to swap special characters and remove spaces.'''
new_s = ""
first = True
for c in s:
new_s += self.escape_char(c, numeralize=True)
if first:
if new_s.isdigit(): # tags cannot start with digit.
new_s = 'z' + new_s
first = False
return new_s
def name(self, s):
'''Encodes the string to remove special characters swap spaces for underscores.'''
new_s = ""
for c in s:
new_s += self.escape_char(c)
return new_s
sanitize = Sanitize()
class VisDefines(object):
# Loads vis_def static files and other defines for vis formatting
# This class is also a swiss army knife of vis formatting conversions
def __init__(self, request, dataset=None):
# Make these global so that the same files are not continually reloaded
self._request = request
global VIS_DEFS_BY_TYPE
global VIS_DEFS_DEFAULT
self.vis_defs = VIS_DEFS_BY_TYPE
self.vis_def_default = VIS_DEFS_DEFAULT
self.vis_type = "opaque"
self.dataset = dataset
if not self.vis_defs:
self.load_vis_defs()
def load_vis_defs(self):
'''Loads 'vis_defs' (visualization definitions by assay type) from a static files.'''
#global VIS_DEFS_FOLDER
global VIS_DEFS_BY_TYPE
global VIS_DEFS_DEFAULT
folder = resource_filename(__name__, VIS_DEFS_FOLDER)
files = os.listdir(folder)
for filename in files:
if filename.endswith('.json'):
with open(folder + filename) as fh:
log.debug('Preparing to load %s' % (filename))
vis_def = json.load(fh)
# Could alter vis_defs here if desired.
if vis_def:
VIS_DEFS_BY_TYPE.update(vis_def)
self.vis_defs = VIS_DEFS_BY_TYPE
VIS_DEFS_DEFAULT = self.vis_defs.get("opaque",{})
self.vis_def_default = VIS_DEFS_DEFAULT
def get_vis_type(self):
'''returns the best visualization definition type, based upon dataset.'''
assert(self.dataset is not None)
assay = self.dataset.get("assay_term_name", 'none')
if isinstance(assay, list):
if len(assay) == 1:
assay = assay[0]
else:
log.debug("assay_term_name for %s is unexpectedly a list %s" %
(self.dataset['accession'], str(assay)))
return "opaque"
# simple rule defined in most vis_defs
for vis_type in sorted(self.vis_defs.keys(), reverse=True): # Reverse pushes anno to bottom
if "rule" in self.vis_defs[vis_type]:
rule = self.vis_defs[vis_type]["rule"].replace('{assay_term_name}', assay)
if rule.find('{') != -1:
rule = self.convert_mask(rule)
if eval(rule):
self.vis_type = vis_type
return self.vis_type
# Ugly rules:
vis_type = None
if assay in ["RNA-seq", "PAS-seq", "microRNA-seq", \
"shRNA knockdown followed by RNA-seq", \
"CRISPR genome editing followed by RNA-seq", \
"CRISPRi followed by RNA-seq", \
"single-cell RNA sequencing assay", \
"siRNA knockdown followed by RNA-seq"]:
reps = self.dataset.get("replicates", []) # NOTE: overly cautious
if len(reps) < 1:
log.debug("Could not distinguish between long and short RNA for %s because there are "
"no replicates. Defaulting to short." % (self.dataset.get("accession")))
vis_type = "SRNA" # this will be more noticed if there is a mistake
else:
size_range = reps[0].get("library", {}).get("size_range", "")
if size_range.startswith('>'):
try:
min_size = int(size_range[1:])
max_size = min_size
except:
log.debug("Could not distinguish between long and short RNA for %s. "
"Defaulting to short." % (self.dataset.get("accession")))
vis_type = "SRNA" # this will be more noticed if there is a mistake
elif size_range.startswith('<'):
try:
max_size = int(size_range[1:]) - 1
min_size = 0
except:
log.debug("Could not distinguish between long and short RNA for %s. "
"Defaulting to short." % (self.dataset.get("accession")))
self.vis_type = "SRNA" # this will be more noticed if there is a mistake
return self.vis_type
else:
try:
sizes = size_range.split('-')
min_size = int(sizes[0])
max_size = int(sizes[1])
except:
log.debug("Could not distinguish between long and short RNA for %s. "
"Defaulting to short." % (self.dataset.get("accession")))
vis_type = "SRNA" # this will be more noticed if there is a mistake
if vis_type is None:
if min_size == 120 and max_size == 200: # Another ugly exception!
vis_type = "LRNA"
elif max_size <= 200 and max_size != min_size:
vis_type = "SRNA"
elif min_size >= 150:
vis_type = "LRNA"
elif (min_size + max_size)/2 >= 235: # This is some wicked voodoo (SRNA:108-347=227; LRNA:155-315=235)
vis_type = "SRNA"
if vis_type is None:
log.debug("%s (assay:'%s') has undefined vis_type" % (self.dataset['accession'], assay))
vis_type = "opaque" # This becomes a dict key later so None is not okay
self.vis_type = vis_type
return self.vis_type
def get_vis_def(self, vis_type=None):
'''returns the visualization definition set, based upon dataset.'''
if vis_type is None:
vis_type = self.vis_type
vis_def = self.vis_defs.get(vis_type, self.vis_def_default)
if "other_groups" in vis_def and EXP_GROUP not in vis_def["other_groups"]["groups"]:
vis_def["other_groups"]["groups"][EXP_GROUP] = DEFAULT_EXPERIMENT_GROUP
if "sortOrder" in vis_def and EXP_GROUP not in vis_def["sortOrder"]:
vis_def["sortOrder"].append(EXP_GROUP)
return vis_def
def visible_file_statuses(self):
return VISIBLE_FILE_STATUSES
def supported_subgroups(self):
return SUPPORTED_SUBGROUPS
def encoded_dataset_terms(self):
return list(ENCODED_DATASET_EMBEDDED_TERMS.keys()) + ENCODED_DATASET_TERMS
def pennants(self, project):
return PENNANTS.get(project, PENNANTS['NHGRI'])
def find_pennent(self):
'''Returns an appropriate pennantIcon given dataset's award'''
assert(self.dataset is not None)
project = self.dataset.get("award", {}).get("project", "NHGRI")
return self.pennants(project)
def lookup_colors(self):
'''Using the mask, determine which color table to use.'''
assert(self.dataset is not None)
color = None
altColor = None
coloring = {}
ontology = self.dataset.get('biosample_ontology')
term = "unknown" # really only seen in test data!
if ontology is not None:
if not isinstance(ontology, list):
ontology = [ontology]
if len(ontology) == 1:
if isinstance(ontology[0], dict):
term = ontology[0]['term_name']
else:
log.debug("%s has biosample_ontology %s that is unexpectedly a list",
self.dataset['accession'],
str([bo['@id'] for bo in ontology]))
coloring = BIOSAMPLE_COLOR.get(term, {})
if not coloring:
for organ_slim in (os for bo in ontology
for os in bo['organ_slims']):
coloring = BIOSAMPLE_COLOR.get(organ_slim, {})
if coloring:
break
if coloring:
assert("color" in coloring)
if "altColor" not in coloring:
color = coloring["color"]
shades = color.split(',')
red = int(shades[0]) / 2
green = int(shades[1]) / 2
blue = int(shades[2]) / 2
altColor = "%d,%d,%d" % (red, green, blue)
coloring["altColor"] = altColor
return coloring
def add_living_color(self, live_format):
'''Adds color and altColor. Note that altColor is only added if color is found.'''
colors = self.lookup_colors()
if colors and "color" in colors:
live_format["color"] = colors["color"]
if "altColor" in colors:
live_format["altColor"] = colors["altColor"]
def rep_for_file(self, a_file):
'''Determines best rep_tech or rep for a file.'''
# Starting with a little cheat for rare cases where techreps are compared instead of bioreps
if a_file.get("file_format_type", "none") in ["idr_peak"]:
return "combined"
if a_file['output_type'].endswith("IDR thresholded peaks"):
return "combined"
bio_rep = 0
tech_rep = 0
if "replicate" in a_file:
bio_rep = a_file["replicate"]["biological_replicate_number"]
tech_rep = a_file["replicate"]["technical_replicate_number"]
elif "tech_replicates" in a_file:
# Do we want to make rep1_1.2.3 ? Not doing it now
tech_reps = a_file["tech_replicates"]
if len(tech_reps) == 1:
bio_rep = int(tech_reps[0].split('_')[0])
tech_reps = tech_reps[0][2:]
if len(tech_reps) == 1:
tech_rep = int(tech_reps)
elif len(tech_reps) > 1:
bio = 0
for tech in tech_reps:
if bio == 0:
bio = int(tech.split('_')[0])
elif bio != int(tech.split('_')[0]):
bio = 0
break
if bio > 0:
bio_rep = bio
elif "biological_replicates" in a_file:
bio_reps = a_file["biological_replicates"]
if len(bio_reps) == 1:
bio_rep = bio_reps[0]
if bio_rep == 0:
return "combined"
rep = "rep%d" % bio_rep
if tech_rep > 0:
rep += "_%d" % tech_rep
return rep
def lookup_embedded_token(self, name, obj):
'''Encodes the string to swap special characters and remove spaces.'''
token = ENCODED_DATASET_EMBEDDED_TERMS.get(name, name)
if token[0] == '{' and token[-1] == '}':
token = token[1:-1]
terms = token.split('.')
cur_obj = obj
while len(terms) > 0:
term = terms.pop(0)
cur_obj = cur_obj.get(term)
if len(terms) == 0 or cur_obj is None:
return cur_obj
if isinstance(cur_obj,list):
if len(cur_obj) == 0:
return None
cur_obj = cur_obj[0] # Can't presume to use any but first
return None
def lookup_token(self, token, dataset, a_file=None):
'''Encodes the string to swap special characters and remove spaces.'''
# dataset might not be self.dataset
if token not in SUPPORTED_MASK_TOKENS:
log.warn("Attempting to look up unexpected token: '%s'" % token)
return "unknown token"
if token in SIMPLE_DATASET_TOKENS:
term = dataset.get(token[1:-1])
if term is None:
return "Unknown " + token[1:-1].split('_')[0].capitalize()
elif isinstance(term,list) and len(term) > 3:
return "Collection of %d %ss" % (len(term),token[1:-1].split('_')[0].capitalize())
return term
elif token == "{experiment.accession}":
return dataset['accession']
elif token in ["{target}", "{target.label}", "{target.name}", "{target.title}", "{target.investigated_as}"]:
if token == '{target}':
token = '{target.label}'
term = self.lookup_embedded_token(token, dataset)
if term is None and token == '{target.name}':
term = self.lookup_embedded_token('{target.label}', dataset)
if term is not None:
if isinstance(term, list) and len(term) > 0:
return term[0]
return term
return "Unknown Target"
elif token in ["{replicates.library.biosample.summary}",
"{replicates.library.biosample.summary|multiple}"]:
term = self.lookup_embedded_token('{replicates.library.biosample.summary}', dataset)
if term is None:
term = dataset.get("{biosample_term_name}")
if term is not None:
return term
if token.endswith("|multiple}"):
return "multiple biosamples"
return "Unknown Biosample"
elif token == "{biosample_term_name}":
biosample_ontology = dataset.get('biosample_ontology')
if biosample_ontology is None:
return "Unknown Biosample"
if isinstance(biosample_ontology, dict):
return biosample_ontology['term_name']
if isinstance(biosample_ontology, list) and len(biosample_ontology) > 3:
return "Collection of %d Biosamples" % (len(biosample_ontology))
# The following got complicated because general Dataset objects
# cannot have biosample_ontology embedded properly. As a base class,
# some of the children, PublicationData, Project and 8 Series
# objects, have biosample_ontology embedded as array of objects,
# while experiment and annotation have it embedded as one single
# object. This becomes a problem when File object linkTo Dataset in
# general rather than one specific type. Current embedding system
# don't know how to map a property with type = ["array", "string"]
# in elasticsearch. Therefore, it is possible the
# "biosample_ontology" we got here is @id which should be embedded
# with the following code.
if not isinstance(biosample_ontology, list):
biosample_ontology = [biosample_ontology]
term_names = []
for type_obj in biosample_ontology:
if isinstance(type_obj, str):
term_names.append(
self._request.embed(type_obj, '@@object')['term_name']
)
elif 'term_name' in type_obj:
term_names.append(type_obj['term_name'])
if len(term_names) == 1:
return term_names[0]
else:
return term_names
elif token == "{biosample_term_name|multiple}":
biosample_ontology = dataset.get('biosample_ontology')
if biosample_ontology is None:
return "multiple biosamples"
return biosample_ontology.get('term_name')
# TODO: rna_species
# elif token == "{rna_species}":
# if replicates.library.nucleic_acid = polyadenylated mRNA
# rna_species = "polyA RNA"
# elif replicates.library.nucleic_acid == "RNA":
# if "polyadenylated mRNA" in replicates.library.depleted_in_term_name
# rna_species = "polyA depleted RNA"
# else
# rna_species = "total RNA"
elif a_file is not None:
if token == "{file.accession}":
return a_file['accession']
#elif token == "{output_type}":
# return a_file['output_type']
elif token == "{output_type_short_label}":
output_type = a_file['output_type']
return OUTPUT_TYPE_8CHARS.get(output_type, output_type)
elif token == "{replicate}":
rep_tag = a_file.get("rep_tag")
if rep_tag is not None:
while len(rep_tag) > 4:
if rep_tag[3] != '0':
break
rep_tag = rep_tag[0:3] + rep_tag[4:]
return rep_tag
rep_tech = a_file.get("rep_tech")
if rep_tech is not None:
return rep_tech.split('_')[0] # Should truncate tech_rep
rep_tech = self.rep_for_file(a_file)
return rep_tech.split('_')[0] # Should truncate tech_rep
elif token == "{replicate_number}":
rep_tag = a_file.get("rep_tag", a_file.get("rep_tech", self.rep_for_file(a_file)))
if not rep_tag.startswith("rep"):
return "0"
return rep_tag[3:].split('_')[0]
elif token == "{biological_replicate_number}":
rep_tech = a_file.get("rep_tech", self.rep_for_file(a_file))
if not rep_tech.startswith("rep"):
return "0"
return rep_tech[3:].split('_')[0]
elif token == "{technical_replicate_number}":
rep_tech = a_file.get("rep_tech", self.rep_for_file(a_file))
if not rep_tech.startswith("rep"):
return "0"
return rep_tech.split('_')[1]
elif token == "{rep_tech}":
return a_file.get("rep_tech", self.rep_for_file(a_file))
else:
val = self.lookup_embedded_token(token, a_file)
if val is not None and isinstance(val, str):
return val
return ""
else:
val = self.lookup_embedded_token(token, dataset)
if val is not None and isinstance(val, str):
return val
log.debug('Untranslated token: "%s"' % token)
return "unknown"
def convert_mask(self, mask, dataset=None, a_file=None):
'''Given a mask with one or more known {term_name}s, replaces with values.'''
working_on = mask
# dataset might not be self.dataset
if dataset is None:
dataset = self.dataset
chars = len(working_on)
while chars > 0:
beg_ix = working_on.find('{')
if beg_ix == -1:
break
end_ix = working_on.find('}')
if end_ix == -1:
break
term = self.lookup_token(working_on[beg_ix:end_ix+1], dataset, a_file=a_file)
new_mask = []
if beg_ix > 0:
new_mask = working_on[0:beg_ix]
new_mask += "%s%s" % (term, working_on[end_ix+1:])
chars = len(working_on[end_ix+1:])
working_on = ''.join(new_mask)
return working_on
def ucsc_single_composite_trackDb(self, vis_format, title):
'''Given a single vis_format (vis_dataset or vis_by_type dict, returns single UCSC trackDb composite text'''
if vis_format is None or len(vis_format) == 0:
return "# Empty composite for %s. It cannot be visualized at this time.\n" % title
blob = ""
# First the composite structure
blob += "track %s\n" % vis_format["name"]
blob += "compositeTrack on\n"
blob += "type bed 3\n"
for var in COMPOSITE_SETTINGS:
val = vis_format.get(var)
if val:
blob += "%s %s\n" % (var, val)
views = vis_format.get("view", [])
if len(views) > 0:
blob += "subGroup1 view %s" % views["title"]
for view_tag in views["group_order"]:
view_title = views["groups"][view_tag]["title"]
blob += " %s=%s" % (view_tag, sanitize.title(view_title))
blob += '\n'
dimA_checked = vis_format.get("dimensionAchecked", "all")
dimA_tag = ""
if dimA_checked == "first": # All will leave dimA_tag & dimA_checked empty, default to all on
dimA_tag = vis_format.get("dimensions", {}).get("dimA", "")
dimA_checked = None
subgroup_ix = 2
for group_tag in vis_format["group_order"]:
group = vis_format["groups"][group_tag]
blob += "subGroup%d %s %s" % (subgroup_ix, group_tag, sanitize.title(group["title"]))
subgroup_ix += 1
subgroup_order = None # group.get("group_order")
if subgroup_order is None or not isinstance(subgroup_order, list):
subgroup_order = sorted(group["groups"].keys())
for subgroup_tag in subgroup_order:
subgroup_title = group["groups"][subgroup_tag]["title"]
blob += " %s=%s" % (subgroup_tag, sanitize.title(subgroup_title))
if group_tag == dimA_tag and dimA_checked is None:
dimA_checked = subgroup_tag
blob += '\n'
# sortOrder
sort_order = vis_format.get("sortOrder")
if sort_order:
blob += "sortOrder"
for sort_tag in sort_order:
if title.startswith("ENCSR") and sort_tag == "EXP":
continue # Single exp composites do not need to sort on EMP
blob += " %s=+" % sort_tag
blob += '\n'
# dimensions
actual_group_tags = ["view"] # Not all groups will be used in composite, depending upon content
dimensions = vis_format.get("dimensions", {})
if dimensions:
pairs = ""
XY_skipped = []
XY_added = []
for dim_tag in sorted(dimensions.keys()):
group = vis_format["groups"].get(dimensions[dim_tag])
if group is None: # e.g. "Targets" may not exist
continue
if dimensions[dim_tag] != "REP":
if len(group.get("groups", {})) <= 1:
if dim_tag[-1] in ['X', 'Y']:
XY_skipped.append(dim_tag)
continue
elif dim_tag[-1] in ['X', 'Y']:
XY_added.append(dim_tag)
pairs += " %s=%s" % (dim_tag, dimensions[dim_tag])
actual_group_tags.append(dimensions[dim_tag])
# Getting too fancy for our own good:
# If one XY dimension has more than one member then we must add both X and Y
if len(XY_skipped) > 0 and len(XY_added) > 0:
for dim_tag in XY_skipped:
pairs += " %s=%s" % (dim_tag, dimensions[dim_tag])
actual_group_tags.append(dimensions[dim_tag])
if len(pairs) > 0:
blob += "dimensions%s\n" % pairs
# filterComposite
filter_composite = vis_format.get("filterComposite")
if filter_composite:
filterfish = ""
for filter_tag in sorted(filter_composite.keys()):
group = vis_format["groups"].get(filter_composite[filter_tag])
if group is None or len(group.get("groups", {})) <= 1: # e.g. "Targets" may not exist
continue
filterfish += " %s" % filter_tag
if filter_composite[filter_tag] == "one":
filterfish += "=one"
if len(filterfish) > 0:
blob += 'filterComposite%s\n' % filterfish
elif dimA_checked is not None:
blob += 'dimensionAchecked %s\n' % dimA_checked
blob += '\n'
# Now cycle through views
for view_tag in views["group_order"]:
view = views["groups"][view_tag]
tracks = view.get("tracks", [])
if len(tracks) == 0:
continue
blob += " track %s_%s_view\n" % (vis_format["name"], view["tag"])
blob += " parent %s on\n" % vis_format["name"]
blob += " view %s\n" % view["tag"]
for var in VIEW_SETTINGS:
val = view.get(var)
if val:
blob += " %s %s\n" % (var, val)
blob += '\n'
# Now cycle through tracks in view
for track in tracks:
blob += " track %s\n" % (track["name"])
blob += " parent %s_%s_view" % (vis_format["name"], view["tag"])
dimA_subgroup = track.get("membership", {}).get(dimA_tag)
if dimA_subgroup is not None and dimA_subgroup != dimA_checked:
blob += " off\n"
else:
# Can set individual tracks off. Used when remodelling
blob += " %s\n" % track.get("checked", "on")
if "type" not in track:
blob += " type %s\n" % (view["type"])
for var in TRACK_SETTINGS:
val = track.get(var)
if val:
blob += " %s %s\n" % (var, val)
# Now membership
membership = track.get("membership")
if membership:
blob += " subGroups"
for member_tag in sorted(membership):
blob += " %s=%s" % (member_tag, membership[member_tag])
blob += '\n'
# metadata line?
metadata_pairs = track.get("metadata_pairs")
if metadata_pairs is not None:
metadata_line = ""
for meta_tag in sorted(metadata_pairs.keys()):
metadata_line += ' %s=%s' % (meta_tag.lower(), metadata_pairs[meta_tag])
if len(metadata_line) > 0:
blob += " metadata%s\n" % metadata_line
blob += '\n'
blob += '\n'
return blob
class IhecDefines(object):
# Defines and formatting code for IHEC JSON
def __init__(self, request):
self._request = request
self.samples = {}
self.vis_defines = None
def molecule(self, dataset):
# ["total RNA", "polyA RNA", "cytoplasmic RNA", "nuclear RNA", "genomic DNA", "protein", "other"]
replicates = dataset.get("replicates", [])
if len(replicates) == 0:
return None
molecule = replicates[0].get("library", {}).get("nucleic_acid_term_name")
if not molecule:
return None
if molecule == "DNA":
return "genomic DNA"
if molecule == "RNA":
# TODO: Can/should we distinguish "cytoplasmic RNA" and "nuclear RNA"
#descr = dataset.get('assay_term_name', '').lower()
#if 'nuclear' in descr:
# return "nuclear RNA"
#if 'cyto' in descr:
# return "cytoplasmic RNA"
return "total RNA"
if molecule == "polyadenylated mRNA":
return "polyA RNA"
if molecule == "miRNA":
return "other" # TODO: should this be something else
if molecule == "protein":
return "protein"
return "genomic DNA" # default
def lineage(self, biosample, default=None):
# TODO: faking lineage
dev_slims = biosample.get("developmental_slims",[])
if len(dev_slims) > 0:
return ','.join(dev_slims)
return default
def differentiation(self, biosample, default=None):
# TODO: faking differentiation
diff_slims = biosample.get("organ_slims",[])
if len(diff_slims) > 0:
return '.'.join(diff_slims)
return default
def exp_type(self, vis_type, dataset):
# For IHEC, a simple experiment type is needed:
# TODO: EpiRR experiment type: ChIP-Seq Input, Histone H3K27ac, mRNA-Seq, total-RNA-Seq, Stranded Total RNA-Seq
# /Ihec_metadata_specification.md: Chromatin Accessibility, Bisulfite-Seq, MeDIP-Seq, MRE-Seq, ChIP-Seq, mRNA-Seq, smRNA-Seq
# DNA Methylation --> DNA Methylation
# DNA accessibility --> Chromatin Accessibility
# if assay_slims=Transcription, get assay_title
# polyA RNA-seq --> mRNA-Seq
# total RNA-seq --> total-RNA-Seq
# small RNA-seq --> smRNA-Seq
# microRNA-seq/transcription profiling by array assay/microRNA counts/ - I have to ask them
# if assay_slims=DNA Binding, then get the target.label
# control --> ChIP-Seq Input
# if not control, then look at target.investigated_as to contain 'histone' or 'transcription factor'
# Then either 'Histone <target.label>' or 'Transcription Factor <target.label>' (example: 'Histone H3K4me3')
if vis_type in ["ChIP", "GGRChIP", "HIST"]:
# Controls have no visualizable files so we shouldn't see them, however...
# Chip-seq Controls would have target.investigated_as=Control
if dataset.get('control_type'):
return "ChIP-Seq Input"
target = dataset.get('target',{}).get('label','unknown')
if vis_type == "HIST":
return "Histone " + target
if target == "unknown":
return "ChIP-seq"
return "ChIP-Seq Input: Transcription factor " + target
if vis_type == "DNASE":
return "Chromatin Accessibility"
if vis_type == "ATAC":
return "Chromatin Accessibility" # TODO Confirm
if vis_type == "WGBS":
return "DNA Methylation"
# IHEC only allow smRNA for microRNA-seq which is different from our
# get_vis_type logic
if 'assay_term_name' not in dataset:
return None
assay = dataset['assay_term_name']
if assay == 'microRNA-seq':
return 'smRNA-Seq'
if assay == 'polyA plus RNA-seq':
return 'mRNA-Seq'
if assay == 'RNA-seq':
assay_title = dataset.get('assay_title')
if assay_title == 'total RNA-seq':
return 'total-RNA-Seq'
return 'RNA-Seq'
#if vis_type == "ChIA":
# return "ChIA-pet"
#if vis_type == "HiC":
# return "Hi-C"
#if vis_type == "TSS":
# return "Rampage"
#if vis_type == "eCLIP":
# return "e-CLIP"
#if vis_type == "ANNO":
# return "Annotation"
return None # vis_dataset.get('assay_term_name','Unknown')
def experiment_attributes(self, vis_dataset):
assay_id = vis_dataset.get('assay_term_id')
if not assay_id:
return {}
attributes = {}
experiment_type = vis_dataset.get('ihec_exp_type')
if experiment_type is None:
return {}
attributes["experiment_type"] = experiment_type
attributes["experiment_ontology_uri"] = 'http://purl.obolibrary.org/obo/' + assay_id.replace(':','_')
assay_name = vis_dataset.get('assay_term_name')
if assay_name:
attributes["assay_type"] = assay_name
attributes['library_strategy'] = IHEC_LIB_STRATEGY[assay_name]
query = (
'/search/?type=ReferenceEpigenome&related_datasets.accession={}'
'&status=released&field=dbxrefs&limit=all'
).format(vis_dataset['name'])
for ref_epi in self._request.embed(query)['@graph']:
for dbxref in ref_epi.get('dbxrefs', []):
if dbxref.startswith('IHEC:IHECRE'):
attributes['reference_registry_id'] = dbxref[5:].split('.')[0]
break
return attributes
def analysis_attributes(self, vis_dataset):
# find/create analysis_attributes:
# WARNING: This could go crazy!
# NOTE: single pipeline version AND single aligner only allowed for the whole exp!
# NOTE: Ugly defaults
attributes = { "analysis_software": 'ENCODE',
"analysis_group": 'ENCODE DCC',
"analysis_software_version": '1',
"alignment_software": 'unknown',
"alignment_software_version": '1'
}
if IHEC_DEEP_DIG:
pipeline = vis_dataset.get('pipeline')
if pipeline and 'title' in pipeline:
attributes["analysis_software"] = pipeline['title']
attributes["analysis_group"] = pipeline.get('lab')
attributes["analysis_software_version"] = pipeline.get('version')
aligner = vis_dataset.get('aligner')
if aligner:
attributes["alignment_software"] = aligner.get('name')
attributes["alignment_software_version"] = aligner.get('version')
return attributes
def biomaterial_type(self, biosample_type):
# For IHEC, biomaterial type: "Cell Line", "Primary Cell" "Primary Cell Culture" "Primary Tissue"
if biosample_type:
biosample_type = biosample_type.lower()
if biosample_type in ["tissue", "whole organism"]: # "whole organism" (but really they should add another enum) - hitz
return "Primary Tissue"
if biosample_type in ["primary cell"]:
return "Primary Cell Culture"
return "Cell Line"
def sample(self, dataset, vis_defines=None):
# returns an ihec sample appropriate for the dataset
if vis_defines is None:
if self.vis_defines is None:
self.vis_defines = VisDefines(self._request)
vis_defines = self.vis_defines
sample = {}
biosample = vis_defines.lookup_embedded_token('replicates.library.biosample', dataset)
if biosample is None:
return {}
sample_id = biosample['accession']
if sample_id in self.samples:
return self.samples[sample_id]
molecule = self.molecule(dataset)
if molecule is None:
return {}
sample['molecule'] = molecule
sample['lineage'] = self.lineage(biosample, 'unknown')
sample['differentiation_stage'] = self.differentiation(biosample, 'unknown')
term_id = biosample.get('biosample_ontology', {}).get('term_id')
if term_id:
sample["sample_ontology_uri"] = term_id
sample["biomaterial_type"] = self.biomaterial_type(biosample.get('biosample_ontology', {}).get('classification')) # ["Cell Line","Primary Cell", ...
sample["line"] = biosample.get('biosample_ontology', {}).get('term_name', 'none')
sample["medium"] = "unknown" # We don't have
sample["disease"] = biosample.get('health_status',"Healthy").capitalize() # assume all samples are healthy - hitz
if sample["disease"] == "Healthy":
sample["disease_ontology_uri"] = "http://ncit.nci.nih.gov/ncitbrowser/ConceptReport.jsp?dictionary=NCI_Thesaurus&code=C115935&ns=NCI_Thesaurus"
else:
# Note only term for disease ontology is healthy=C115935. No search url syntax known
sample["disease_ontology_uri"] = "https://ncit.nci.nih.gov/ncitbrowser/pages/multiple_search.jsf?nav_type=terminologies"
sample["sex"] = biosample.get('sex','unknown').capitalize()
if sample["biomaterial_type"] in ["Primary Tissue", "Primary Cell Culture"]:
sample["donor_sex"] = sample["sex"]
donor = biosample.get('donor')
if donor is not None:
sample["donor_id"] = donor['accession']
if donor.get('age', 'NA').isdigit():
sample["donor_age"] = int(donor['age'])
elif donor.get('age', 'NA') == 'unknown':
sample["donor_age"] = 'NA'
else:
sample["donor_age"] = donor.get('age', 'NA')
sample["donor_age_unit"] = donor.get('age_units','year') # unknwn is not supported
sample["donor_life_stage"] = donor.get('life_stage','unknown')
sample["donor_health_status"] = sample["disease"]
if donor.get('organism',{}).get('name','unknown') == 'human':
sample["donor_ethnicity"] = donor.get('ethnicity','unknown')
else:
sample["donor_ethnicity"] = 'NA'
if sample["biomaterial_type"] == "Primary Tissue":
sample["tissue_type"] = sample["line"]
sample["tissue_depot"] = biosample.get('source',{}).get('description','unknown')
elif sample["biomaterial_type"] == "Primary Cell Culture":
sample["cell_type"] = sample["line"]
sample["culture_conditions"] = "unknwon" # applied_modifications=[], treatments=[], genetic_modifications=[], characterizations=[]
self.samples[sample_id] = sample
return sample
def view_type(self, view, track):
# For IHEC, dataset track view type needed: signal_unstranded, peak_calls, methylation_profile, signal_forward, signal_reverse,
# rpkm_forward, rpkm_reverse, rpkm_unstranded, reads_per_million_ miRNA_mapped, copy_number_variation
# https://github.com/IHEC/ihec-ecosystems/blob/master/docs/minimum_required_track_types.md
track_type = track.get('type','').split()[0]
if track_type in ['bigBed', 'bigNarrowPeak']:
return 'peak_calls'
view_title = view.get('title').lower()
if 'minus signal' in view_title:
return 'signal_reverse'
if 'plus signal' in view_title:
return 'signal_forward'
return 'signal_unstranded'
def remodel_to_json(self, host_url, vis_datasets):
'''Formats this collection of vis_datasets into IHEC hub json structure.'''
if not vis_datasets:
return {}
# ihec_json = {
# 'hub_description': { ... }, similar to hub.txt/genome.txt
# 'datasets': { ... }, one per experiment for ChIP
# and one per replicate for non-ChIP
# 'samples': { ... } one per biosample
# }
self.samples = {}
# {
# 'sample_id_1': { # one per biosample
# 'sample_ontology_uri': '...', # UBERON or CL
# 'molecule': '...',
# # [
# # 'total RNA',
# # 'polyA RNA',
# # 'cytoplasmic RNA',
# # 'nuclear RNA',
# # 'genomic DNA',
# # 'protein',
# # 'other'
# # ]
# 'disease': '...', # TODO
# 'disease_ontology_uri': '...', # TODO
# 'biomaterial_type': '...',
# # [
# # 'Cell Line',
# # 'Primary Cell',
# # 'Primary Cell Culture',
# # 'Primary Tissue'
# # ]
# 'line': '...', # biosample_term_name
# 'lineage': '?',
# 'differentiation_stage': '?',
# 'medium': '?',
# 'sex': '...', # experiment.replicate.library.biosample.sex
# },
# 'sample_id_2': {...}
# }
datasets = {}
# {
# 'experiment_1': {
# # one per experiment for ChIP and one per replicate for non-ChIP
# 'sample_id': '...', # biosample accession
# 'experiment_attributes': {
# 'experiment_type': '...',
# 'assay_type': '...', # assay_term_name (e.g. 'DNA Methylation')
# 'experiment_ontology_uri': '...', # assay_term_id (e.g. OBI:0000716)
# 'reference_registry_id': '...', # EpiRR ID
# },
# 'analysis_attributes': {
# 'analysis_software': '...', # pipeline
# 'analysis_software_version': '...', # pipeline version
# 'analysis_group': '...', # pipeline laboratory
# 'alignment_software': '...', # software ugly lookup; one per experiment
# 'alignment_software_version': '...', # software_version
# },
# 'browser': {
# 'signal_forward': [ # view
# {
# 'big_data_url': '...',
# 'description_url': '...',
# 'md5sum': '...',
# 'subtype': '...',
# 'sample_source': '...',
# 'primary': '?',
# },
# {...}
# ],
# 'signal_reverse': [{...}],
# }
# },
# 'experiment_2': {...},
# }
# TODO: If properties aren't found then warn and skip dataset!
assembly = ''
taxon_id = 0
included_accessions = []
for accession in vis_datasets.keys():
vis_dataset = vis_datasets[accession]
if vis_dataset is None or len(vis_dataset) == 0:
continue
# From any vis_dataset, update these:
assembly = vis_dataset.get('ucsc_assembly') or assembly
taxon_id = vis_dataset.get('taxon_id') or taxon_id
dataset = {}
analysis_attributes = self.analysis_attributes(vis_dataset)
if analysis_attributes:
dataset['analysis_attributes'] = analysis_attributes
else:
log.warn('Could not determine IHEC analysis attributes for %s', accession)
# Check if experiment is IHEC-able first
experiment_attributes = self.experiment_attributes(vis_dataset)
if experiment_attributes:
dataset['experiment_attributes'] = experiment_attributes
else:
log.warn('Could not determine IHEC experiment attributes for %s', accession)
# Find/create sample:
biosample_accession = vis_dataset.get('biosample_accession')
if biosample_accession is None:
log.warn('vis_dataset %s is missing biosample', accession)
else:
dataset['sample_id'] = biosample_accession
if biosample_accession not in self.samples:
sample = vis_dataset.get('ihec_sample', {})
if not sample:
log.warn('vis_dataset %s is missing sample', accession)
else:
self.samples[biosample_accession] = sample
# create browser, which hold tracks:
browser = {}
views = vis_dataset.get('view', [])
for view_tag in views['group_order']:
view = views['groups'][view_tag]
# Add tracks to views
tracks = view.get('tracks', [])
if len(tracks) == 0:
continue
for track in tracks:
ihec_track = {
'big_data_url': track['bigDataUrl'],
'description_url': '{}/experiments/{}/'.format(
host_url, accession
),
# "primary" is required;
# default to False first and worry about it later
'primary': False,
'subtype': track['longLabel'],
}
md5sum = track.get('md5sum')
if md5sum:
ihec_track['md5sum'] = md5sum
# TODO: clean up the following logic
# rep_membership = track.get('membership', {}).get('REP')
# rep_group = vis_dataset.get('groups', {}).get('REP')
# if rep_membership and rep_group:
# if rep_membership in rep_group:
# ihec_track['sample_source'] = rep_group[rep_membership]['title']
# subgroup_order = sorted(rep_group['groups'].keys())
# ihec_track['primary'] = (rep_membership == subgroup_order[0])
# extra fields
for term in ['type', 'color', 'altColor']:
if term in track:
ihec_track[term] = track[term]
ihec_track['view'] = self.view_type(view, track)
metadata_pairs = track.get('metadata_pairs', {})
for meta_key in metadata_pairs:
ihec_track[meta_key.replace(' ', ' ')] = metadata_pairs[meta_key][1:-1]
# Add IHEC tracks:
# For ChIP-seq experiments, label the first track as
# primary for each track type.
# For non-ChIP-seq experiments, split experiments as one
# dataset per replicate.
if vis_dataset.get('assay_term_name', '') == 'ChIP-seq':
if ihec_track['view'] not in browser.keys():
browser[ihec_track['view']] = []
browser[ihec_track['view']].append(ihec_track)
else:
rep = (
ihec_track.get('replicate (bio_tech)')
or ihec_track.get('replicate (bio_tech)', '')
)
experiment_key = '{}_rep{}'.format(accession, rep)
if experiment_key not in datasets:
datasets[experiment_key] = deepcopy(dataset)
datasets[experiment_key]['browser'] = {}
if ihec_track['view'] not in datasets[experiment_key][
'browser'
]:
# Tracks are sorted based on "group_order" in
# vis_defs. So the first track for a certain track
# type should be primary
ihec_track['primary'] = True
datasets[experiment_key]['browser'][
ihec_track['view']
] = [ihec_track]
else:
datasets[experiment_key]['browser'][
ihec_track['view']
].append(ihec_track)
# Add ChIP-seq tracks and assign one primary track
if vis_dataset.get('assay_term_name', '') == 'ChIP-seq':
# For experiment like ENCSR000ALI, there are no peak_calls but
# only singals according to vis_defs. In another word, there
# is no peak to guide selecting primary track. Thus simply
# select the first track.
primary_rep_val = ''
if 'peak_calls' in browser:
browser['peak_calls'][0]['primary'] = True
primary_rep_val = (
browser['peak_calls'][0].get('replicate (bio_tech)')
or browser['peak_calls'][0].get('replicates (bio_tech)')
or ''
)
for track_type in browser:
for track in browser[track_type]:
track_rep_val = (
track.get('replicate (bio_tech)')
or track.get('replicates (bio_tech)', '')
)
if (
not primary_rep_val
or track_rep_val == primary_rep_val
):
track['primary'] = True
break
dataset['browser'] = browser
datasets[accession] = dataset
included_accessions.append(accession)
hub_description = { # similar to hub.txt/genome.txt
'publishing_group': 'ENCODE',
'name': 'ENCODE reference epigenomes',
'description': 'ENCODE reference epigenomes',
'description_url': '{}/search/?type=ReferenceEpigenome'.format(
host_url
),
'email': 'encode-help@lists.stanford.edu',
'date': time.strftime('%Y-%m-%d', time.gmtime()),
# 'taxon_id': ..., # Species taxonomy id. (human: 9606, mouse: 10090)
# 'assembly': '...', # UCSC: hg19, hg38
}
if assembly:
hub_description['assembly'] = assembly
if taxon_id:
hub_description['taxon_id'] = int(taxon_id)
# Find corresponding reference epigenome
query = (
'/search/?type=ReferenceEpigenome'
'&{}&status=released&field=accession&limit=all'
).format(
'&'.join(
'related_datasets.accession={}'.format(acc)
for acc in included_accessions
)
)
ref_epi_accs = [
ref_epi['accession']
for ref_epi in self._request.embed(query)['@graph']
]
return {
'hub_description': hub_description,
'datasets': datasets,
'samples': self.samples,
}
# TODO: move to separate vis_cache module?
class VisCache(object):
# Stores and recalls vis_dataset formatted json to/from es vis_cache
def __init__(self, request):
self.request = request
self.es = self.request.registry.get(ELASTIC_SEARCH, None)
self.index = VIS_CACHE_INDEX
def create_cache(self):
if not self.es:
return None
if not self.es.indices.exists(self.index):
one_shard = {'index': {'number_of_shards': 1, 'max_result_window': 99999 }}
mapping = {'default': {"enabled": False}}
self.es.indices.create(index=self.index, body=one_shard, wait_for_active_shards=1)
self.es.indices.put_mapping(index=self.index, doc_type='default', body=mapping)
log.debug("created %s index" % self.index)
def add(self, vis_id, vis_dataset):
'''Adds a vis_dataset (aka vis_blob) json object to elastic-search'''
if not self.es:
return
if not self.es.indices.exists(self.index):
self.create_cache() # Only bother creating on add
self.es.index(index=self.index, doc_type='default', body=vis_dataset, id=vis_id)
def get(self, vis_id=None, accession=None, assembly=None):
'''Returns the vis_dataset json object from elastic-search, or None if not found.'''
if vis_id is None and accession is not None and assembly is not None:
vis_id = accession + '_' + ASSEMBLY_TO_UCSC_ID.get(assembly, assembly)
if self.es:
try:
result = self.es.get(index=self.index, doc_type='default', id=vis_id)
return result['_source']
except:
pass # Missing index will return None
return None
def search(self, accessions, assembly):
'''Returns a list of composites from elastic-search, or None if not found.'''
if self.es:
ucsc_assembly = ASSEMBLY_TO_UCSC_ID.get(assembly, assembly) # Normalized accession
vis_ids = [accession + "_" + ucsc_assembly for accession in accessions]
try:
query = {"query": {"ids": {"values": vis_ids}}}
res = self.es.search(body=query, index=self.index, doc_type='default', size=99999) # size=200?
hits = res.get("hits", {}).get("hits", [])
results = {}
for hit in hits:
results[hit["_id"]] = hit["_source"] # make this a generator? No... len(results)
log.debug("ids found: %d" % (len(results)))
return results
except:
pass
return {}
# Not referenced in any other module
def visualizable_assemblies(
assemblies,
files,
visible_statuses=VISIBLE_FILE_STATUSES
):
'''Returns just the assemblies with visualizable files.'''
file_assemblies = set() # sets for comparing
assemblies_set = set(assemblies)
for afile in files:
afile_assembly = afile.get('assembly')
if afile_assembly is None or afile_assembly in file_assemblies:
continue # more efficient than simply relying on set()
if (afile['status'] in visible_statuses and
afile.get('file_format', '') in VISIBLE_FILE_FORMATS):
file_assemblies.add(afile_assembly)
if file_assemblies == assemblies_set:
break # Try not to go through the whole file list!
return list(file_assemblies)
def is_file_visualizable(file):
'''Determines whether a file can be visualized in a genome browser.
Needs to be kept in sync with isFileVisualizable in objectutils.js.
Keyword arguments:
file -- file object including props to test for visualizability
'''
conditions = [
file.get('file_format') in [
'bigWig',
'bigBed',
],
file.get('file_format_type') not in [
'bedMethyl',
'bedLogR',
'idr_peak',
'tss_peak',
'pepMap',
'modPepMap',
],
file.get('status') in [
'released',
'in progress',
'archived',
],
]
return all(conditions)
def _file_to_format(process_file):
'''Used with map to convert list of files to their types'''
return process_file['file_format']
# Currently called in types/shared_calculated_properties.py
def browsers_available(
status,
assemblies,
types,
item_type=None,
files=None,
accession=None,
request=None
):
'''Returns list of browsers based upon vis_blobs or else files list.'''
# NOTES:When called by visualize calculated property,
# vis_blob should be in vis_cache, but if not files are used.
# When called by visindexer, neither vis_cache nor files are
# used (could be called 'browsers_might_work').
if "Dataset" not in types:
return []
if item_type is None:
visualizable_types = set(VISIBLE_DATASET_TYPES)
if visualizable_types.isdisjoint(types):
return []
elif item_type not in VISIBLE_DATASET_TYPES_LC:
return []
browsers = set()
full_set = {'ucsc', 'ensembl', 'hic'}
file_assemblies = None
file_types = None
if request is not None:
vis_cache = VisCache(request)
if files is not None:
# Make a set of all file types in all dataset files
file_types = set(map(_file_to_format, files))
for assembly in assemblies:
mapped_assembly = ASSEMBLY_DETAILS.get(assembly)
if not mapped_assembly:
continue
vis_blob = None
if (request is not None
and accession is not None
and status in VISIBLE_FILE_STATUSES):
# use of find_or_make_acc_composite() will recurse!
vis_blob = vis_cache.get(accession=accession, assembly=assembly)
if not vis_blob and file_assemblies is None and files is not None:
file_assemblies = visualizable_assemblies(assemblies, files)
if file_types is None:
continue
if ('ucsc' not in browsers
and 'ucsc_assembly' in mapped_assembly.keys()
and not BROWSER_FILE_TYPES['ucsc'].isdisjoint(file_types)):
if vis_blob or files is None or assembly in file_assemblies:
browsers.add('UCSC')
if ('ensembl' not in browsers
and 'ensembl_host' in mapped_assembly.keys()
and not BROWSER_FILE_TYPES['ensembl'].isdisjoint(file_types)):
if vis_blob or files is None or assembly in file_assemblies:
browsers.add('Ensembl')
if ('hic' not in browsers
and 'hic' in mapped_assembly.keys()
and not BROWSER_FILE_TYPES['hic'].isdisjoint(file_types)):
if file_assemblies is not None and assembly in file_assemblies:
browsers.add('hic')
if browsers == full_set: # No use continuing
break
return list(browsers)
# Currently called in visualization.py and in search.py
def object_is_visualizable(
obj,
assembly=None,
check_files=False,
exclude_quickview=False
):
'''Returns true if it is likely that this object can be visualized.'''
if 'accession' not in obj:
return False
if assembly is not None:
assemblies = [ assembly ]
else:
assemblies = obj.get('assembly',[])
files = None
if check_files:
# Returning [] instead of None is important
files = obj.get('files', [])
browsers = browsers_available(obj.get('status', 'none'), assemblies,
obj.get('@type', []), files=files)
if exclude_quickview and 'quickview' in browsers:
return len(browsers) > 1
else:
return len(browsers) > 0
# Currently called in search.py
def vis_format_url(browser, path, assembly, position=None):
'''Given a url to hub.txt, returns the url to an external browser or None.'''
mapped_assembly = ASSEMBLY_DETAILS[assembly]
if not mapped_assembly:
return None
if browser == "ucsc":
ucsc_assembly = mapped_assembly.get('ucsc_assembly')
if ucsc_assembly is not None:
external_url = 'http://genome.ucsc.edu/cgi-bin/hgTracks?hubClear='
external_url += path + '&db=' + ucsc_assembly
if position is not None:
external_url += '&position={}'.format(position)
return external_url
elif browser == "ensembl":
ensembl_host = mapped_assembly.get('ensembl_host')
if ensembl_host is not None:
external_url = 'http://' + ensembl_host + '/Trackhub?url='
external_url += path + ';species=' + mapped_assembly.get('species').replace(' ','_')
### TODO: remove redirect=no when Ensembl fixes their mirrors
#external_url += ';redirect=no'
### TODO: remove redirect=no when Ensembl fixes their mirrors
if position is not None:
if position.startswith('chr'):
position = position[3:] # ensembl position r=19:7069444-7087968
external_url += '&r={}'.format(position)
# GRCh38: http://www.ensembl.org/Trackhub?url=https://www.encodeproject.org/experiments/ENCSR596NOF/@@hub/hub.txt
# GRCh38: http://www.ensembl.org/Trackhub?url=https://www.encodeproject.org/experiments/ENCSR596NOF/@@hub/hub.txt;species=Homo_sapiens
# hg19/GRCh37: http://grch37.ensembl.org/Trackhub?url=https://www.encodeproject.org/experiments/ENCSR596NOF/@@hub/hub.txt;species=Homo_sapiens
# mm10/GRCm38: http://www.ensembl.org/Trackhub?url=https://www.encodeproject.org/experiments/ENCSR475TDY@@hub/hub.txt;species=Mus_musculus
# mm9/NCBIM37: http://may2012.archive.ensembl.org/Trackhub?url=https://www.encodeproject.org/experiments/ENCSR000CNV@@hub/hub.txt;species=Mus_musculus
# BDGP6: http://www.ensembl.org/Trackhub?url=https://www.encodeproject.org/experiments/ENCSR040UNE@@hub/hub.txt;species=Drosophila_melanogaster
# BDGP5: http://dec2014.archive.ensembl.org/Trackhub?url=https://www.encodeproject.org/experiments/ENCSR040UNE@@hub/hub.txt;species=Drosophila_melanogaster
# ce11/WBcel235: http://www.ensembl.org/Trackhub?url=https://www.encodeproject.org/experiments/ENCSR475TDY@@hub/hub.txt;species=Caenorhabditis_elegans
return external_url
elif browser == "quickview":
file_formats = '&file_format=bigBed&file_format=bigWig'
file_inclusions = '&status=released&status=in+progress'
return ('/search/?type=File&assembly=%s&dataset=%s%s%s#browser' % (assembly,path,file_formats,file_inclusions))
#else:
# ERROR: not supported at this time
return None
| 47.353137 | 170 | 0.539099 |
7957cd7cfef0c97e81e31844b6ae37001b015c9b | 3,379 | py | Python | reddit_pocket_sync.py | vihanggodbole/reddit-pocket-sync | 773eefc9fc652af46764f09a54958623df9034e5 | [
"MIT"
] | 43 | 2017-02-08T14:05:11.000Z | 2022-03-01T02:10:37.000Z | reddit_pocket_sync.py | vihanggodbole/reddit-pocket-sync | 773eefc9fc652af46764f09a54958623df9034e5 | [
"MIT"
] | 1 | 2018-03-28T05:02:07.000Z | 2018-03-28T17:38:24.000Z | reddit_pocket_sync.py | vihanggodbole/reddit-pocket-sync | 773eefc9fc652af46764f09a54958623df9034e5 | [
"MIT"
] | 4 | 2017-02-09T15:00:20.000Z | 2018-02-11T03:09:55.000Z | import praw
import prawcore
import argparse
import sys
from PocketUser import PocketUser
consumer_key = ''
def parse_args():
parser = argparse.ArgumentParser(
description='Sync your reddit account with Pocket.')
parser.add_argument(
'--consumer', type=str, help='Consumer key for your Pocket app')
args = parser.parse_args()
if args.consumer:
return args.consumer
else:
return None
def reddit_login():
'''logs in the user using OAuth 2.0 and returns a redditor object for use'''
username = input('Username: ')
user_agent = 'reddit_saved_posts_search: v1.0 (for /u/{})'.format(
username)
r = praw.Reddit('mysettings', user_agent=user_agent)
try:
return r.user.me()
except prawcore.exceptions.Forbidden:
print('\nIt seems your credentials are invalid. Please check whether your praw.ini file is properly setup.')
return None
def get_consumer():
'''gets consumer key if stored locally.'''
global consumer_key
try:
with open('pocket_config', 'r') as f:
consumer_key = f.read()
return True
except FileNotFoundError:
return False
def main():
redditor = reddit_login()
if redditor is None:
print('\nStopping script...')
return # exit the script if unable to log in to reddit
print('Welcome /u/{}. I will help you backup your saved posts on reddit to pocket :)'.format(redditor))
saved = redditor.saved(limit=None)
pocket_user = PocketUser(consumer_key)
print('\nLogging into pocket...')
pocket_user.login()
saved_posts = []
saved_comments = []
for post in saved: # separate out posts and commets
if isinstance(post, praw.models.Submission):
saved_posts.append(post)
elif isinstance(post, praw.models.Comment):
saved_comments.append(post)
batch_add_list = []
for post in saved_posts:
add_dict = {'action': 'add', 'url': post.url, 'tags': 'reddit self post', 'item_id': None}
batch_add_list.append(add_dict)
print('Done creating a list of self posts...')
pocket_user.batch_add(batch_add_list)
print('Done syncing self posts with tag \'reddit self post\'.')
batch_add_list = []
for comment in saved_comments:
comment_url = comment.link_url + comment.id
add_dict = {'action': 'add', 'url': comment_url, 'tags': 'reddit comment', 'item_id': None}
batch_add_list.append(add_dict)
print('Done creating a list of comments...')
pocket_user.batch_add(batch_add_list)
print('Done syncing comments with tag \'reddit comment\'.')
print('Successfully synced all your saved posts to your pocket account!')
if __name__ == '__main__':
consumer_present = get_consumer()
if not consumer_present:
consumer_key = parse_args()
if not consumer_key:
print('Could not find a consumer key locally. Please provide one if this is your first run.')
sys.exit() # exit if a key isn't provided on the first run.
else:
choice = input('This seems like your first run. Would you like to save your consumer key for future use? [Y/N]')
if choice == 'Y' or choice == 'y':
with open('pocket_config', 'w') as f:
f.write(consumer_key)
main()
| 33.79 | 124 | 0.645753 |
7957cddeb86d26c034dd25acbf7ccd0a14590c29 | 175 | py | Python | models/state.py | Hezbon12/AirBnB_clone | c6a8b52a2d1279baef09245dcafd16ae1c87260c | [
"MIT"
] | null | null | null | models/state.py | Hezbon12/AirBnB_clone | c6a8b52a2d1279baef09245dcafd16ae1c87260c | [
"MIT"
] | null | null | null | models/state.py | Hezbon12/AirBnB_clone | c6a8b52a2d1279baef09245dcafd16ae1c87260c | [
"MIT"
] | null | null | null | #!/usr/bin/python3
from models import *
class State(BaseModel):
name = ""
def __init__(self, *args, **kwargs):
super(State, self).__init__(*args, **kwargs)
| 17.5 | 52 | 0.622857 |
7957cf1d5942a505c6b0057854adbc84b207ebca | 25,532 | py | Python | ocio/museos/views.py | AntonioMelgar/X-Serv-Practica-Museos | 6423c2d17173035efeaa493b10a69be279c774ad | [
"Apache-2.0"
] | null | null | null | ocio/museos/views.py | AntonioMelgar/X-Serv-Practica-Museos | 6423c2d17173035efeaa493b10a69be279c774ad | [
"Apache-2.0"
] | null | null | null | ocio/museos/views.py | AntonioMelgar/X-Serv-Practica-Museos | 6423c2d17173035efeaa493b10a69be279c774ad | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
from django.shortcuts import render
from django.http import HttpResponseRedirect, HttpResponse
from django.views.decorators.csrf import csrf_exempt
from .models import Museo, Usuario, Comentario, Pagina_Personal
from lxml import etree
from django.template.loader import get_template
from django.template import Context
# Create your views here.
def extraer_elemento(dic, elemento):
try:
elemento = dic[elemento]
except KeyError:
elemento = ""
return elemento
def guardar_datos(dic):
dicc_datos = {}
lista_aux = ['ID_ENTIDAD', 'NOMBRE', 'DESCRIPCION_ENTIDAD', 'HORARIO', 'TRANSPORTE', 'ACCESIBILIDAD', 'CONTENT_URL', 'NOMBRE_VIA', 'CLASE_VIAL', 'TIPO_NUM', 'NUM', 'LOCALIDAD', 'CODIGO_POSTAL','PLANTA', 'BARRIO', 'DISTRITO', 'COORDENADA_X', 'COORDENADA_Y', 'LATITUD', 'LONGITUD', 'TELEFONO', 'FAX', 'EMAIL', 'EQUIPAMIENTO']
for elemento in lista_aux:
dicc_datos[elemento] = extraer_elemento(dic, elemento)
g = Museo(ID_ENTIDAD = dicc_datos['ID_ENTIDAD'], NOMBRE = dicc_datos['NOMBRE'], DESCRIPCION_ENTIDAD = dicc_datos['DESCRIPCION_ENTIDAD'], HORARIO = dicc_datos['HORARIO'], TRANSPORTE = dicc_datos['TRANSPORTE'], ACCESIBILIDAD = dicc_datos['ACCESIBILIDAD'], CONTENT_URL = dicc_datos['CONTENT_URL'], NOMBRE_VIA = dicc_datos['NOMBRE_VIA'], CLASE_VIAL = dicc_datos['CLASE_VIAL'], TIPO_NUM = dicc_datos['TIPO_NUM'], NUM = dicc_datos['NUM'], LOCALIDAD = dicc_datos['LOCALIDAD'], CODIGO_POSTAL = dicc_datos['CODIGO_POSTAL'], PLANTA = dicc_datos['PLANTA'], BARRIO = dicc_datos['BARRIO'], DISTRITO = dicc_datos['DISTRITO'], COORDENADA_X = dicc_datos['COORDENADA_X'], COORDENADA_Y = dicc_datos['COORDENADA_Y'], LATITUD = dicc_datos['LATITUD'], LONGITUD = dicc_datos['LONGITUD'], TELEFONO = dicc_datos['TELEFONO'], FAX = dicc_datos['FAX'], EMAIL = dicc_datos['EMAIL'], EQUIPAMIENTO = dicc_datos['EQUIPAMIENTO'], NUMERO_COMENTARIOS = 0)
g.save()
return None
def parsear(doc):
contenidos = doc.getroot()
for k in range(1, len(contenidos)):
contenido = contenidos[k]
atributo = contenido[1]
dic = {}
for i in range(0, len(atributo)-1):
nombre = atributo[i].attrib.get("nombre")
if nombre == "LOCALIZACION":
for j in range(0, len(atributo[i])):
nombre = atributo[i][j].attrib.get("nombre")
val = nombre.find("-")
if val != -1:
nombre = nombre.replace("-", "_")
dic[nombre] = atributo[i][j].text
elif nombre == "DATOSCONTACTOS":
for j in range(0, len(atributo[i])):
nombre = atributo[i][j].attrib.get("nombre")
val = nombre.find("-")
if val != -1:
nombre = nombre.replace("-", "_")
dic[nombre] = atributo[i][j].text
else:
val = nombre.find("-")
if val != -1:
nombre = nombre.replace("-", "_")
dic[nombre] = atributo[i].text
guardar_datos(dic)
return None
def crear_database():
doc = etree.parse('museos/201132-0-museos.xml')
parsear(doc)
return None
@csrf_exempt
def mostrar_principal(request):
if request.user.is_authenticated():
logged = 'Logged in as ' + request.user.username + '.'
name_link = 'Logout'
link = '/logout'
else:
logged = 'Not logged in.'
name_link = 'Login'
link = '/login'
list_mus = Museo.objects.all()
if len(list_mus) != 0:
mostrar_cargar = False
else:
mostrar_cargar = True
lista_museos = Museo.objects.exclude(NUMERO_COMENTARIOS__in="0").order_by('-NUMERO_COMENTARIOS')
if len(lista_museos) < 5:
lista_museos = lista_museos[0:len(lista_museos)]
else:
lista_museos = lista_museos[0:5]
cuatro = False
tres = False
dos = False
uno = False
cero = False
#Un tricky para cuadrar dimensiones de la interfaz
if len(lista_museos) == 0:
cero = True
elif len(lista_museos) == 1:
uno = True
elif len(lista_museos) == 2:
dos = True
elif len(lista_museos) == 3:
tres = True
elif len(lista_museos) == 4:
cuatro = True
paginas_usuarios = Pagina_Personal.objects.all()
if request.method == "GET":
respuesta = render(request, 'museos/index.html', {'lista_museos': lista_museos, 'logged': logged, 'link': link, 'name_link': name_link, 'cero': cero, 'uno': uno, 'dos': dos, 'tres': tres, 'cuatro': cuatro, 'paginas_usuarios': paginas_usuarios, 'mostrar_cargar': mostrar_cargar})
elif request.method == "POST":
if 'Todos' in request.POST:
respuesta = HttpResponseRedirect('/museos')
elif 'About' in request.POST:
respuesta = HttpResponseRedirect('/about')
elif 'Accesibles' in request.POST:
respuesta = HttpResponseRedirect('/accesibles')
elif 'Next' in request.POST:
respuesta = HttpResponseRedirect('/1')
elif 'Cargar' in request.POST: ######
crear_database()
respuesta = HttpResponseRedirect('/')
return respuesta
@csrf_exempt
def mostrar_principal_next(request, numero):
if request.user.is_authenticated():
logged = 'Logged in as ' + request.user.username + '.'
name_link = 'Logout'
link = '/logout'
else:
logged = 'Not logged in.'
name_link = 'Login'
link = '/login'
list_mus = Museo.objects.all()
if len(list_mus) != 0:
mostrar_cargar = False
else:
mostrar_cargar = True
lista_museos = Museo.objects.exclude(NUMERO_COMENTARIOS__in="0").order_by('-NUMERO_COMENTARIOS')
volver = False
if len(lista_museos) <= 5:
lista_museos = lista_museos[0:len(lista_museos)]
elif len(lista_museos) - int(numero)*5 < 5:
lista_museos = lista_museos[int(numero)*5:len(lista_museos)]
volver = True
else:
lista_museos = lista_museos[int(numero)*5:(int(numero)*5+5)]
cuatro = False
tres = False
dos = False
uno = False
cero = False
#Un tricky para cuadrar dimensiones de la interfaz
if len(lista_museos) == 0:
cero = True
elif len(lista_museos) == 1:
uno = True
elif len(lista_museos) == 2:
dos = True
elif len(lista_museos) == 3:
tres = True
elif len(lista_museos) == 4:
cuatro = True
paginas_usuarios = Pagina_Personal.objects.all()
if request.method == "GET":
respuesta = render(request, 'museos/index.html', {'lista_museos': lista_museos, 'logged': logged, 'link': link, 'name_link': name_link, 'cero': cero, 'uno': uno, 'dos': dos, 'tres': tres, 'cuatro': cuatro, 'mostrar_cargar': mostrar_cargar, 'paginas_usuarios': paginas_usuarios})
elif request.method == "POST":
if 'Todos' in request.POST:
respuesta = HttpResponseRedirect('/museos')
elif 'About' in request.POST:
respuesta = HttpResponseRedirect('/about')
elif 'Accesibles' in request.POST:
respuesta = HttpResponseRedirect('/accesibles')
elif 'Next' in request.POST:
if volver:
respuesta = HttpResponseRedirect('/')
else:
respuesta = HttpResponseRedirect('/' + str(int(numero)+1))
elif 'Cargar' in request.POST: ######
crear_database()
respuesta = HttpResponseRedirect('/')
return respuesta
@csrf_exempt
def mostrar_principal_accesibles(request):
if request.user.is_authenticated():
logged = 'Logged in as ' + request.user.username + '.'
name_link = 'Logout'
link = '/logout'
else:
logged = 'Not logged in.'
name_link = 'Login'
link = '/login'
list_mus = Museo.objects.all()
if len(list_mus) != 0:
mostrar_cargar = False
else:
mostrar_cargar = True
lista_museos = Museo.objects.exclude(NUMERO_COMENTARIOS__in="0").filter(ACCESIBILIDAD = '1').order_by('-NUMERO_COMENTARIOS')
if len(lista_museos) < 5:
lista_museos = lista_museos[0:len(lista_museos)]
else:
lista_museos = lista_museos[0:5]
cuatro = False
tres = False
dos = False
uno = False
cero = False
#Un tricky para cuadrar dimensiones de la interfaz
if len(lista_museos) == 0:
cero = True
elif len(lista_museos) == 1:
uno = True
elif len(lista_museos) == 2:
dos = True
elif len(lista_museos) == 3:
tres = True
elif len(lista_museos) == 4:
cuatro = True
paginas_usuarios = Pagina_Personal.objects.all()
if request.method == "GET":
respuesta = render(request, 'museos/index.html', {'lista_museos': lista_museos, 'logged': logged, 'link': link, 'name_link': name_link, 'cero': cero, 'uno': uno, 'dos': dos, 'tres': tres, 'cuatro': cuatro, 'mostrar_cargar': mostrar_cargar, 'paginas_usuarios': paginas_usuarios})
elif request.method == "POST":
if 'Todos' in request.POST:
respuesta = HttpResponseRedirect('/museos')
elif 'About' in request.POST:
respuesta = HttpResponseRedirect('/about')
elif 'Accesibles' in request.POST:
respuesta = HttpResponseRedirect('/')
elif 'Next' in request.POST:
respuesta = HttpResponseRedirect('/accesibles/1')
elif 'Cargar' in request.POST: ######
crear_database()
respuesta = HttpResponseRedirect('/')
return respuesta
@csrf_exempt
def mostrar_principal_accesibles_next(request, numero):
if request.user.is_authenticated():
logged = 'Logged in as ' + request.user.username + '.'
name_link = 'Logout'
link = '/logout'
else:
logged = 'Not logged in.'
name_link = 'Login'
link = '/login'
list_mus = Museo.objects.all()
if len(list_mus) != 0:
mostrar_cargar = False
else:
mostrar_cargar = True
lista_museos = Museo.objects.exclude(NUMERO_COMENTARIOS__in="0").filter(ACCESIBILIDAD = '1').order_by('-NUMERO_COMENTARIOS')
volver = False
if len(lista_museos) <= 5:
lista_museos = lista_museos[0:len(lista_museos)]
elif len(lista_museos) - int(numero)*5 < 5:
lista_museos = lista_museos[int(numero)*5:len(lista_museos)]
volver = True
else:
lista_museos = lista_museos[int(numero)*5:(int(numero)*5+5)]
cuatro = False
tres = False
dos = False
uno = False
cero = False
#Un tricky para cuadrar dimensiones de la interfaz
if len(lista_museos) == 0:
cero = True
elif len(lista_museos) == 1:
uno = True
elif len(lista_museos) == 2:
dos = True
elif len(lista_museos) == 3:
tres = True
elif len(lista_museos) == 4:
cuatro = True
paginas_usuarios = Pagina_Personal.objects.all()
if request.method == "GET":
respuesta = render(request, 'museos/index.html', {'lista_museos': lista_museos, 'logged': logged, 'link': link, 'name_link': name_link, 'cero': cero, 'uno': uno, 'dos': dos, 'tres': tres, 'cuatro': cuatro, 'mostrar_cargar': mostrar_cargar, 'paginas_usuarios': paginas_usuarios})
elif request.method == "POST":
if 'Todos' in request.POST:
respuesta = HttpResponseRedirect('/museos')
elif 'About' in request.POST:
respuesta = HttpResponseRedirect('/about')
elif 'Accesibles' in request.POST:
respuesta = HttpResponseRedirect('/')
elif 'Next' in request.POST:
if volver:
respuesta = HttpResponseRedirect('/accesibles')
else:
respuesta = HttpResponseRedirect('/accesibles/' + str(int(numero)+1))
elif 'Cargar' in request.POST: ######
crear_database()
respuesta = HttpResponseRedirect('/')
return respuesta
@csrf_exempt
def mostrar_museos(request):
if request.user.is_authenticated():
logged = 'Logged in as ' + request.user.username + '.'
name_link = 'Logout'
link = '/logout'
else:
logged = 'Not logged in.'
name_link = 'Login'
link = '/login'
mostrar = True
if request.method == "GET":
lista_museos = Museo.objects.all()
respuesta = render(request, 'museos/museos.html', {'lista_museos': lista_museos, 'logged': logged, 'link': link, 'name_link': name_link, 'mostrar': mostrar})
elif request.method == "POST":
if 'Enviar' in request.POST:
distrito = request.POST['Distrito'].upper()
lista_museos = Museo.objects.filter(DISTRITO=distrito)
mostrar = False
respuesta = render(request, 'museos/museos.html', {'lista_museos': lista_museos, 'logged': logged, 'link': link, 'name_link': name_link, 'mostrar': mostrar})
elif 'Inicio' in request.POST:
respuesta = HttpResponseRedirect('/')
elif 'About' in request.POST:
respuesta = HttpResponseRedirect('/about')
return respuesta
@csrf_exempt
def mostrar_app_museo(request, identificador):
if request.user.is_authenticated():
logged = 'Logged in as ' + request.user.username + '.'
name_link = 'Logout'
link = '/logout'
mostrar_selec = True
else:
logged = 'Not logged in.'
name_link = 'Login'
link = '/login'
mostrar_selec = False
museo = Museo.objects.get(id=int(identificador))
comentarios = Comentario.objects.filter(museo=museo)
lista_vacia = False
if len(comentarios) == 0:
lista_vacia = True
if request.method == "GET":
respuesta = render(request, 'museos/museos_app.html', {'museo': museo, 'logged': logged, 'link': link, 'name_link': name_link, 'mostrar_selec': mostrar_selec, 'comentarios': comentarios, 'lista_vacia' : lista_vacia })
elif request.method == "POST":
if 'About' in request.POST:
respuesta = HttpResponseRedirect('/about')
elif 'Añadir a lista' in request.POST:
museos_usuario = Usuario.objects.filter(nombre=request.user.username)
try:
nombre_pagina = Pagina_Personal.objects.get(nombre_usuario=request.user.username).nombre_pagina
except Pagina_Personal.DoesNotExist:
nombre_pagina = "Página de " + request.user.username
color_cuerpo = "#FFFFFF"
color_cabecera = "#9E4528"
pagina_personal = Pagina_Personal(nombre_pagina = nombre_pagina, nombre_usuario = request.user.username, color_cuerpo = color_cuerpo, color_cabecera = color_cabecera) ##
pagina_personal.save()
if len(museos_usuario.filter(museo=museo)) == 0:
g = Usuario(nombre = request.user.username, comentario = "", museo = museo)
g.save()
respuesta = render(request, 'museos/museos_app.html', {'museo': museo, 'logged': logged, 'link': link, 'name_link': name_link, 'mostrar_selec': mostrar_selec, 'comentarios': comentarios, 'lista_vacia' : lista_vacia })
elif 'Inicio' in request.POST:
respuesta = HttpResponseRedirect('/')
elif 'Todos' in request.POST:
respuesta = HttpResponseRedirect('/museos')
elif 'Enviar' in request.POST:
coment = request.POST['Comentario']
if coment != "":
g = Comentario(text = request.POST['Comentario'], museo = museo)
g.save()
comentarios = Comentario.objects.filter(museo=museo)
museo.NUMERO_COMENTARIOS = museo.NUMERO_COMENTARIOS + 1
museo.save()
lista_vacia = False
respuesta = render(request, 'museos/museos_app.html', {'museo': museo, 'logged': logged, 'link': link, 'name_link': name_link, 'mostrar_selec': mostrar_selec, 'comentarios': comentarios, 'lista_vacia' : lista_vacia })
return respuesta
@csrf_exempt
def mostrar_usuario(request, usuario):
mostrar_selec = False
if request.user.is_authenticated():
logged = 'Logged in as ' + request.user.username + '.'
name_link = 'Logout'
link = '/logout'
if request.user.username == usuario:
mostrar_selec = True
else:
logged = 'Not logged in.'
name_link = 'Login'
link = '/login'
try:
nombre_pagina = Pagina_Personal.objects.get(nombre_usuario=usuario).nombre_pagina
color_cuerpo = Pagina_Personal.objects.get(nombre_usuario=usuario).color_cuerpo ##
color_cabecera = Pagina_Personal.objects.get(nombre_usuario=usuario).color_cabecera ##
except Pagina_Personal.DoesNotExist:
return HttpResponse('Página no encontrada')
museos_usuario = Usuario.objects.filter(nombre=usuario)
if len(museos_usuario) < 5:
museos_usuario = museos_usuario[0:len(museos_usuario)]
else:
museos_usuario = museos_usuario[0:5]
if request.method == "GET":
respuesta = render(request, 'museos/usuario.html', {'lista_museos': museos_usuario, 'logged': logged, 'link': link, 'name_link': name_link, 'nombre_pagina': nombre_pagina, 'mostrar_selec': mostrar_selec, 'color_cuerpo': color_cuerpo, 'color_cabecera': color_cabecera}) ##
elif request.method == "POST":
if 'Todos' in request.POST:
respuesta = HttpResponseRedirect('/museos')
elif 'About' in request.POST:
respuesta = HttpResponseRedirect('/about')
elif 'Inicio' in request.POST:
respuesta = HttpResponseRedirect('/')
elif 'Modificar' in request.POST:
pagina_personal = Pagina_Personal.objects.get(nombre_usuario=usuario)
pagina_personal.nombre_pagina = request.POST['Pagina']
pagina_personal.save()
nombre_pagina = Pagina_Personal.objects.get(nombre_usuario=usuario).nombre_pagina
respuesta = render(request, 'museos/usuario.html', {'lista_museos': museos_usuario, 'logged': logged, 'link': link, 'name_link': name_link, 'nombre_pagina': nombre_pagina, 'mostrar_selec': mostrar_selec, 'color_cuerpo': color_cuerpo, 'color_cabecera': color_cabecera})
elif 'color_cuerpo_boton' in request.POST: ##
pagina_personal = Pagina_Personal.objects.get(nombre_usuario=usuario)
color_cuerpo = request.POST['color_cuerpo_texto']
color_cuerpo = color_cuerpo.upper()
if color_cuerpo == "MORADO":
color_cuerpo = "#BD8ACF"
elif color_cuerpo == "AZUL":
color_cuerpo = "#706DC9"
elif color_cuerpo == "VERDE":
color_cuerpo = "#4CE656"
elif color_cuerpo == "NARANJA":
color_cuerpo = "#E38914"
elif color_cuerpo == "AMARILLO":
color_cuerpo = "#DBDB3B"
elif color_cuerpo == "ROJO":
color_cuerpo = "#ED2828"
elif color_cuerpo == "ROSA":
color_cuerpo = "#E089BC"
elif color_cuerpo == "GRIS":
color_cuerpo = "#9C9599"
elif color_cuerpo == "MARRON":
color_cuerpo = "#D18D6B"
elif color_cuerpo == "BLANCO":
color_cuerpo = "#FFFFFF"
else:
color_cuerpo = pagina_personal.color_cuerpo
pagina_personal.color_cuerpo = color_cuerpo
pagina_personal.save()
color_cuerpo = Pagina_Personal.objects.get(nombre_usuario=usuario).color_cuerpo
respuesta = render(request, 'museos/usuario.html', {'lista_museos': museos_usuario, 'logged': logged, 'link': link, 'name_link': name_link, 'nombre_pagina': nombre_pagina, 'mostrar_selec': mostrar_selec, 'color_cuerpo': color_cuerpo, 'color_cabecera': color_cabecera})
elif 'color_cabecera_boton' in request.POST: ##
pagina_personal = Pagina_Personal.objects.get(nombre_usuario=usuario)
color_cabecera = request.POST['color_cabecera_texto']
color_cabecera = color_cabecera.upper()
if color_cabecera == "MORADO":
color_cabecera = "#BD8ACF"
elif color_cabecera == "AZUL":
color_cabecera = "#706DC9"
elif color_cabecera == "VERDE":
color_cabecera = "#4CE656"
elif color_cabecera == "NARANJA":
color_cabecera = "#E38914"
elif color_cabecera == "AMARILLO":
color_cabecera = "#DBDB3B"
elif color_cabecera == "ROJO":
color_cabecera = "#ED2828"
elif color_cabecera == "ROSA":
color_cabecera = "#E089BC"
elif color_cabecera == "GRIS":
color_cabecera = "#9C9599"
elif color_cabecera == "MARRON":
color_cabecera = "#D18D6B"
elif color_cabecera == "BLANCO":
color_cabecera = "#FFFFFF"
else:
color_cabecera = pagina_personal.color_cabecera
pagina_personal.color_cabecera = color_cabecera
pagina_personal.save()
color_cabecera = Pagina_Personal.objects.get(nombre_usuario=usuario).color_cabecera
respuesta = render(request, 'museos/usuario.html', {'lista_museos': museos_usuario, 'logged': logged, 'link': link, 'name_link': name_link, 'nombre_pagina': nombre_pagina, 'mostrar_selec': mostrar_selec, 'color_cuerpo': color_cuerpo, 'color_cabecera': color_cabecera})
elif 'Next' in request.POST:
respuesta = HttpResponseRedirect('/' + usuario +'/1')
elif 'Cargar' in request.POST: ######
crear_database()
respuesta = HttpResponseRedirect('/')
return respuesta
@csrf_exempt
def mostrar_usuario_next(request, usuario, numero):
mostrar_selec = False
if request.user.is_authenticated():
logged = 'Logged in as ' + request.user.username + '.'
name_link = 'Logout'
link = '/logout'
if request.user.username == usuario:
mostrar_selec = True
else:
logged = 'Not logged in.'
name_link = 'Login'
link = '/login'
try:
nombre_pagina = Pagina_Personal.objects.get(nombre_usuario=usuario).nombre_pagina
color_cuerpo = Pagina_Personal.objects.get(nombre_usuario=usuario).color_cuerpo ##
color_cabecera = Pagina_Personal.objects.get(nombre_usuario=usuario).color_cabecera ##
except Pagina_Personal.DoesNotExist:
nombre_pagina = "Página de " + usuario
color_cuerpo = "#FFFFFF"
color_cabecera = "#9E4528"
pagina_personal = Pagina_Personal(nombre_pagina = nombre_pagina, nombre_usuario = usuario, color_cuerpo = "#FFFFFF", color_cabecera = "#9E4528") ##
pagina_personal.save()
museos_usuario = Usuario.objects.filter(nombre=usuario)
volver = False
if len(museos_usuario) <= 5:
museos_usuario = museos_usuario[0:len(museos_usuario)]
volver = True
elif len(museos_usuario) - int(numero)*5 < 5:
museos_usuario = museos_usuario[int(numero)*5:len(museos_usuario)]
volver = True
else:
museos_usuario = museos_usuario[int(numero)*5:(int(numero)*5+5)]
if request.method == "GET":
respuesta = render(request, 'museos/usuario.html', {'lista_museos': museos_usuario, 'logged': logged, 'link': link, 'name_link': name_link, 'nombre_pagina': nombre_pagina, 'mostrar_selec': mostrar_selec, 'color_cuerpo': color_cuerpo, 'color_cabecera': color_cabecera})
elif request.method == "POST":
if 'Todos' in request.POST:
respuesta = HttpResponseRedirect('/museos')
elif 'About' in request.POST:
respuesta = HttpResponseRedirect('/about')
elif 'Inicio' in request.POST:
respuesta = HttpResponseRedirect('/')
elif 'Modificar' in request.POST:
pagina_personal = Pagina_Personal.objects.get(nombre_usuario=usuario)
pagina_personal.nombre_pagina = request.POST['Pagina']
pagina_personal.save()
nombre_pagina = Pagina_Personal.objects.get(nombre_usuario=usuario).nombre_pagina
respuesta = render(request, 'museos/usuario.html', {'lista_museos': museos_usuario, 'logged': logged, 'link': link, 'name_link': name_link, 'nombre_pagina': nombre_pagina, 'mostrar_selec': mostrar_selec, 'color_cuerpo': color_cuerpo, 'color_cabecera': color_cabecera})
elif 'color_cuerpo_boton' in request.POST: ##
pagina_personal = Pagina_Personal.objects.get(nombre_usuario=usuario)
color_cuerpo = request.POST['color_cuerpo_texto']
color_cuerpo = color_cuerpo.upper()
if color_cuerpo == "MORADO":
color_cuerpo = "#BD8ACF"
elif color_cuerpo == "AZUL":
color_cuerpo = "#706DC9"
elif color_cuerpo == "VERDE":
color_cuerpo = "#4CE656"
elif color_cuerpo == "NARANJA":
color_cuerpo = "#E38914"
elif color_cuerpo == "AMARILLO":
color_cuerpo = "#DBDB3B"
elif color_cuerpo == "ROJO":
color_cuerpo = "#ED2828"
elif color_cuerpo == "ROSA":
color_cuerpo = "#E089BC"
elif color_cuerpo == "GRIS":
color_cuerpo = "#9C9599"
elif color_cuerpo == "MARRON":
color_cuerpo = "#D18D6B"
elif color_cuerpo == "BLANCO":
color_cuerpo = "#FFFFFF"
else:
color_cuerpo = pagina_personal.color_cuerpo
pagina_personal.color_cuerpo = color_cuerpo
pagina_personal.save()
color_cuerpo = Pagina_Personal.objects.get(nombre_usuario=usuario).color_cuerpo
respuesta = render(request, 'museos/usuario.html', {'lista_museos': museos_usuario, 'logged': logged, 'link': link, 'name_link': name_link, 'nombre_pagina': nombre_pagina, 'mostrar_selec': mostrar_selec, 'color_cuerpo': color_cuerpo, 'color_cabecera': color_cabecera})
elif 'color_cabecera_boton' in request.POST: ##
pagina_personal = Pagina_Personal.objects.get(nombre_usuario=usuario)
color_cabecera = request.POST['color_cabecera_texto']
color_cabecera = color_cabecera.upper()
if color_cabecera == "MORADO":
color_cabecera = "#BD8ACF"
elif color_cabecera == "AZUL":
color_cabecera = "#706DC9"
elif color_cabecera == "VERDE":
color_cabecera = "#4CE656"
elif color_cabecera == "NARANJA":
color_cabecera = "#E38914"
elif color_cabecera == "AMARILLO":
color_cabecera = "#DBDB3B"
elif color_cabecera == "ROJO":
color_cabecera = "#ED2828"
elif color_cabecera == "ROSA":
color_cabecera = "#E089BC"
elif color_cabecera == "GRIS":
color_cabecera = "#9C9599"
elif color_cabecera == "MARRON":
color_cabecera = "#D18D6B"
elif color_cabecera == "BLANCO":
color_cabecera = "#FFFFFF"
else:
color_cabecera = pagina_personal.color_cabecera
pagina_personal.color_cabecera = color_cabecera
pagina_personal.save()
color_cabecera = Pagina_Personal.objects.get(nombre_usuario=usuario).color_cabecera
respuesta = render(request, 'museos/usuario.html', {'lista_museos': museos_usuario, 'logged': logged, 'link': link, 'name_link': name_link, 'nombre_pagina': nombre_pagina, 'mostrar_selec': mostrar_selec, 'color_cuerpo': color_cuerpo, 'color_cabecera': color_cabecera})
elif 'Next' in request.POST:
if volver:
respuesta = HttpResponseRedirect('/' + usuario)
else:
respuesta = HttpResponseRedirect('/' + usuario + '/' + str(int(numero)+1))
elif 'Cargar' in request.POST: ######
crear_database()
respuesta = HttpResponseRedirect('/')
return respuesta
@csrf_exempt
def mostrar_ayuda(request):
if request.user.is_authenticated():
logged = 'Logged in as ' + request.user.username + '.'
name_link = 'Logout'
link = '/logout'
else:
logged = 'Not logged in.'
name_link = 'Login'
link = '/login'
if request.method == "GET":
respuesta = render(request, 'museos/about.html', {'logged': logged, 'link': link, 'name_link': name_link})
elif request.method == "POST":
if 'Todos' in request.POST:
respuesta = HttpResponseRedirect('/museos')
elif 'Inicio' in request.POST:
respuesta = HttpResponseRedirect('/')
return respuesta
def mostrar_xml(request, usuario):
museos_usuario = Usuario.objects.filter(nombre=usuario)
template = get_template('canal.xml')
if request.method == "GET":
return HttpResponse(template.render(Context({'nombre_usuario':usuario, 'lista_museos':museos_usuario})), content_type="text/xml")
| 36.164306 | 922 | 0.707739 |
7957cfb121776e34b4910ec717268648c282bbf1 | 516 | py | Python | rx/operators/observable/thendo.py | yutiansut/RxPY | c3bbba77f9ebd7706c949141725e220096deabd4 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2018-11-16T09:07:13.000Z | 2018-11-16T09:07:13.000Z | rx/operators/observable/thendo.py | yutiansut/RxPY | c3bbba77f9ebd7706c949141725e220096deabd4 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | rx/operators/observable/thendo.py | yutiansut/RxPY | c3bbba77f9ebd7706c949141725e220096deabd4 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-05-08T08:23:08.000Z | 2020-05-08T08:23:08.000Z | from rx.core.typing import Mapper
from rx.core import ObservableBase
from rx.joins import Pattern
def then_do(source: ObservableBase, mapper: Mapper) -> ObservableBase:
"""Matches when the observable sequence has an available value and
projects the value.
mapper -- Mapper that will be invoked for values in the source
sequence.
Returns Plan that produces the projected values, to be fed (with
other plans) to the when operator.
"""
return Pattern([source]).then_do(mapper)
| 27.157895 | 70 | 0.728682 |
7957d036c2359d6180f85f64417914f452fed423 | 48,589 | py | Python | tests/integration/test_api_gateway.py | jaume-pinyol/localstack | a7eb1ef9c992dde53e2f3abe27d3fab94ba40bde | [
"Apache-2.0"
] | null | null | null | tests/integration/test_api_gateway.py | jaume-pinyol/localstack | a7eb1ef9c992dde53e2f3abe27d3fab94ba40bde | [
"Apache-2.0"
] | null | null | null | tests/integration/test_api_gateway.py | jaume-pinyol/localstack | a7eb1ef9c992dde53e2f3abe27d3fab94ba40bde | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import re
import json
import base64
import unittest
import xmltodict
from botocore.exceptions import ClientError
from jsonpatch import apply_patch
from requests.models import Response
from requests.structures import CaseInsensitiveDict
from localstack import config
from localstack.utils import testutil
from localstack.utils.aws import aws_stack
from localstack.constants import TEST_AWS_ACCOUNT_ID, HEADER_LOCALSTACK_REQUEST_URL
from localstack.utils.common import (
to_str, json_safe, clone, short_uid, get_free_tcp_port,
load_file, select_attributes, safe_requests as requests)
from localstack.services.infra import start_proxy
from localstack.services.generic_proxy import ProxyListener
from localstack.services.apigateway.helpers import (
get_rest_api_paths, get_resource_for_path, connect_api_gateway_to_sqs, gateway_request_url)
from localstack.services.awslambda.lambda_api import add_event_source
from localstack.services.awslambda.lambda_utils import LAMBDA_RUNTIME_PYTHON36
from .test_lambda import TEST_LAMBDA_PYTHON, TEST_LAMBDA_LIBS
THIS_FOLDER = os.path.dirname(os.path.realpath(__file__))
TEST_SWAGGER_FILE = os.path.join(THIS_FOLDER, 'files', 'swagger.json')
TEST_IMPORT_REST_API_FILE = os.path.join(THIS_FOLDER, 'files', 'pets.json')
TEST_LAMBDA_ECHO_FILE = os.path.join(THIS_FOLDER, 'lambdas', 'lambda_echo.py')
class TestAPIGateway(unittest.TestCase):
# template used to transform incoming requests at the API Gateway (stream name to be filled in later)
APIGATEWAY_DATA_INBOUND_TEMPLATE = """{
"StreamName": "%s",
"Records": [
#set( $numRecords = $input.path('$.records').size() )
#if($numRecords > 0)
#set( $maxIndex = $numRecords - 1 )
#foreach( $idx in [0..$maxIndex] )
#set( $elem = $input.path("$.records[${idx}]") )
#set( $elemJsonB64 = $util.base64Encode($elem.data) )
{
"Data": "$elemJsonB64",
"PartitionKey": #if( $elem.partitionKey != '')"$elem.partitionKey"
#else"$elemJsonB64.length()"#end
}#if($foreach.hasNext),#end
#end
#end
]
}"""
# endpoint paths
API_PATH_DATA_INBOUND = '/data'
API_PATH_HTTP_BACKEND = '/hello_world'
API_PATH_LAMBDA_PROXY_BACKEND = '/lambda/foo1'
API_PATH_LAMBDA_PROXY_BACKEND_WITH_PATH_PARAM = '/lambda/{test_param1}'
API_PATH_LAMBDA_PROXY_BACKEND_ANY_METHOD = '/lambda-any-method/foo1'
API_PATH_LAMBDA_PROXY_BACKEND_ANY_METHOD_WITH_PATH_PARAM = '/lambda-any-method/{test_param1}'
# name of Kinesis stream connected to API Gateway
TEST_STREAM_KINESIS_API_GW = 'test-stream-api-gw'
TEST_STAGE_NAME = 'testing'
TEST_LAMBDA_PROXY_BACKEND = 'test_lambda_apigw_backend'
TEST_LAMBDA_PROXY_BACKEND_WITH_PATH_PARAM = 'test_lambda_apigw_backend_path_param'
TEST_LAMBDA_PROXY_BACKEND_ANY_METHOD = 'test_lambda_apigw_backend_any_method'
TEST_LAMBDA_PROXY_BACKEND_ANY_METHOD_WITH_PATH_PARAM = 'test_lambda_apigw_backend_any_method_path_param'
TEST_LAMBDA_SQS_HANDLER_NAME = 'lambda_sqs_handler'
TEST_LAMBDA_AUTHORIZER_HANDLER_NAME = 'lambda_authorizer_handler'
TEST_API_GATEWAY_ID = 'fugvjdxtri'
TEST_API_GATEWAY_AUTHORIZER = {
'name': 'test',
'type': 'TOKEN',
'providerARNs': [
'arn:aws:cognito-idp:us-east-1:123412341234:userpool/us-east-1_123412341'
],
'authType': 'custom',
'authorizerUri': 'arn:aws:apigateway:us-east-1:lambda:path/2015-03-31/functions/' +
'arn:aws:lambda:us-east-1:123456789012:function:myApiAuthorizer/invocations',
'authorizerCredentials': 'arn:aws:iam::123456789012:role/apigAwsProxyRole',
'identitySource': 'method.request.header.Authorization',
'identityValidationExpression': '.*',
'authorizerResultTtlInSeconds': 300
}
TEST_API_GATEWAY_AUTHORIZER_OPS = [
{
'op': 'replace',
'path': '/name',
'value': 'test1'
}
]
def test_api_gateway_kinesis_integration(self):
# create target Kinesis stream
stream = aws_stack.create_kinesis_stream(self.TEST_STREAM_KINESIS_API_GW)
stream.wait_for()
# create API Gateway and connect it to the target stream
result = self.connect_api_gateway_to_kinesis('test_gateway1', self.TEST_STREAM_KINESIS_API_GW)
# generate test data
test_data = {'records': [
{'data': '{"foo": "bar1"}'},
{'data': '{"foo": "bar2"}'},
{'data': '{"foo": "bar3"}'}
]}
url = gateway_request_url(
api_id=result['id'], stage_name=self.TEST_STAGE_NAME, path=self.API_PATH_DATA_INBOUND)
# list Kinesis streams via API Gateway
result = requests.get(url)
result = json.loads(to_str(result.content))
self.assertIn('StreamNames', result)
# post test data to Kinesis via API Gateway
result = requests.post(url, data=json.dumps(test_data))
result = json.loads(to_str(result.content))
self.assertEqual(result['FailedRecordCount'], 0)
self.assertEqual(len(result['Records']), len(test_data['records']))
# clean up
kinesis = aws_stack.connect_to_service('kinesis')
kinesis.delete_stream(StreamName=self.TEST_STREAM_KINESIS_API_GW)
def test_api_gateway_sqs_integration_with_event_source(self):
# create target SQS stream
queue_name = 'queue-%s' % short_uid()
queue_url = aws_stack.create_sqs_queue(queue_name)['QueueUrl']
# create API Gateway and connect it to the target queue
result = connect_api_gateway_to_sqs(
'test_gateway4', stage_name=self.TEST_STAGE_NAME,
queue_arn=queue_name, path=self.API_PATH_DATA_INBOUND)
# create event source for sqs lambda processor
self.create_lambda_function(self.TEST_LAMBDA_SQS_HANDLER_NAME)
event_source_data = {
'FunctionName': self.TEST_LAMBDA_SQS_HANDLER_NAME,
'EventSourceArn': aws_stack.sqs_queue_arn(queue_name),
'Enabled': True
}
add_event_source(event_source_data)
# generate test data
test_data = {'spam': 'eggs & beans'}
url = gateway_request_url(
api_id=result['id'], stage_name=self.TEST_STAGE_NAME, path=self.API_PATH_DATA_INBOUND)
result = requests.post(url, data=json.dumps(test_data))
self.assertEqual(result.status_code, 200)
parsed_json = xmltodict.parse(result.content)
result = parsed_json['SendMessageResponse']['SendMessageResult']
body_md5 = result['MD5OfMessageBody']
self.assertEqual(body_md5, 'b639f52308afd65866c86f274c59033f')
# clean up
sqs_client = aws_stack.connect_to_service('sqs')
sqs_client.delete_queue(QueueUrl=queue_url)
lambda_client = aws_stack.connect_to_service('lambda')
lambda_client.delete_function(FunctionName=self.TEST_LAMBDA_SQS_HANDLER_NAME)
def test_api_gateway_sqs_integration(self):
# create target SQS stream
queue_name = 'queue-%s' % short_uid()
aws_stack.create_sqs_queue(queue_name)
# create API Gateway and connect it to the target queue
result = connect_api_gateway_to_sqs('test_gateway4', stage_name=self.TEST_STAGE_NAME,
queue_arn=queue_name, path=self.API_PATH_DATA_INBOUND)
# generate test data
test_data = {'spam': 'eggs'}
url = gateway_request_url(
api_id=result['id'], stage_name=self.TEST_STAGE_NAME, path=self.API_PATH_DATA_INBOUND)
result = requests.post(url, data=json.dumps(test_data))
self.assertEqual(result.status_code, 200)
messages = aws_stack.sqs_receive_message(queue_name)['Messages']
self.assertEqual(len(messages), 1)
self.assertEqual(json.loads(base64.b64decode(messages[0]['Body'])), test_data)
def test_api_gateway_http_integrations(self):
self.run_api_gateway_http_integration('custom')
self.run_api_gateway_http_integration('proxy')
def run_api_gateway_http_integration(self, int_type):
test_port = get_free_tcp_port()
backend_url = 'http://localhost:%s%s' % (test_port, self.API_PATH_HTTP_BACKEND)
# start test HTTP backend
proxy = self.start_http_backend(test_port)
# create API Gateway and connect it to the HTTP_PROXY/HTTP backend
result = self.connect_api_gateway_to_http(
int_type,
'test_gateway2',
backend_url,
path=self.API_PATH_HTTP_BACKEND
)
url = gateway_request_url(
api_id=result['id'], stage_name=self.TEST_STAGE_NAME, path=self.API_PATH_HTTP_BACKEND)
# make sure CORS headers are present
origin = 'localhost'
result = requests.options(url, headers={'origin': origin})
self.assertEqual(result.status_code, 200)
self.assertTrue(re.match(result.headers['Access-Control-Allow-Origin'].replace('*', '.*'), origin))
self.assertIn('POST', result.headers['Access-Control-Allow-Methods'])
self.assertIn('PATCH', result.headers['Access-Control-Allow-Methods'])
custom_result = json.dumps({'foo': 'bar'})
# make test GET request to gateway
result = requests.get(url)
self.assertEqual(result.status_code, 200)
expected = custom_result if int_type == 'custom' else '{}'
self.assertEqual(json.loads(to_str(result.content))['data'], expected)
# make test POST request to gateway
data = json.dumps({'data': 123})
result = requests.post(url, data=data)
self.assertEqual(result.status_code, 200)
expected = custom_result if int_type == 'custom' else data
self.assertEqual(json.loads(to_str(result.content))['data'], expected)
# make test POST request with non-JSON content type
data = 'test=123'
ctype = 'application/x-www-form-urlencoded'
result = requests.post(url, data=data, headers={'content-type': ctype})
self.assertEqual(result.status_code, 200)
content = json.loads(to_str(result.content))
headers = CaseInsensitiveDict(content['headers'])
expected = custom_result if int_type == 'custom' else data
self.assertEqual(content['data'], expected)
self.assertEqual(headers['content-type'], ctype)
# clean up
proxy.stop()
def test_api_gateway_lambda_proxy_integration(self):
self._test_api_gateway_lambda_proxy_integration(
self.TEST_LAMBDA_PROXY_BACKEND,
self.API_PATH_LAMBDA_PROXY_BACKEND)
def test_api_gateway_lambda_proxy_integration_with_path_param(self):
self._test_api_gateway_lambda_proxy_integration(
self.TEST_LAMBDA_PROXY_BACKEND_WITH_PATH_PARAM,
self.API_PATH_LAMBDA_PROXY_BACKEND_WITH_PATH_PARAM)
def _test_api_gateway_lambda_proxy_integration(self, fn_name, path):
self.create_lambda_function(fn_name)
# create API Gateway and connect it to the Lambda proxy backend
lambda_uri = aws_stack.lambda_function_arn(fn_name)
invocation_uri = 'arn:aws:apigateway:%s:lambda:path/2015-03-31/functions/%s/invocations'
target_uri = invocation_uri % (aws_stack.get_region(), lambda_uri)
result = testutil.connect_api_gateway_to_http_with_lambda_proxy(
'test_gateway2', target_uri, path=path, stage_name=self.TEST_STAGE_NAME)
api_id = result['id']
path_map = get_rest_api_paths(api_id)
_, resource = get_resource_for_path('/lambda/foo1', path_map)
# make test request to gateway and check response
path = path.replace('{test_param1}', 'foo1')
path = path + '?foo=foo&bar=bar&bar=baz'
url = gateway_request_url(api_id=api_id, stage_name=self.TEST_STAGE_NAME, path=path)
data = {'return_status_code': 203, 'return_headers': {'foo': 'bar123'}}
result = requests.post(url, data=json.dumps(data),
headers={'User-Agent': 'python-requests/testing'})
self.assertEqual(result.status_code, 203)
self.assertEqual(result.headers.get('foo'), 'bar123')
self.assertIn('set-cookie', result.headers)
parsed_body = json.loads(to_str(result.content))
self.assertEqual(parsed_body.get('return_status_code'), 203)
self.assertDictEqual(parsed_body.get('return_headers'), {'foo': 'bar123'})
self.assertDictEqual(parsed_body.get('queryStringParameters'), {'foo': 'foo', 'bar': ['bar', 'baz']})
request_context = parsed_body.get('requestContext')
source_ip = request_context['identity'].pop('sourceIp')
self.assertTrue(re.match(r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$', source_ip))
self.assertEqual(request_context['path'], '/' + self.TEST_STAGE_NAME + '/lambda/foo1')
self.assertEqual(request_context.get('stageVariables'), None)
self.assertEqual(request_context['accountId'], TEST_AWS_ACCOUNT_ID)
self.assertEqual(request_context['resourceId'], resource.get('id'))
self.assertEqual(request_context['stage'], self.TEST_STAGE_NAME)
self.assertEqual(request_context['identity']['userAgent'], 'python-requests/testing')
self.assertEqual(request_context['httpMethod'], 'POST')
self.assertEqual(request_context['protocol'], 'HTTP/1.1')
self.assertIn('requestTimeEpoch', request_context)
self.assertIn('requestTime', request_context)
result = requests.delete(url, data=json.dumps(data))
self.assertEqual(result.status_code, 204)
# send message with non-ASCII chars
body_msg = '🙀 - 参よ'
result = requests.post(url, data=json.dumps({'return_raw_body': body_msg}))
self.assertEqual(to_str(result.content), body_msg)
def test_api_gateway_lambda_proxy_integration_any_method(self):
self._test_api_gateway_lambda_proxy_integration_any_method(
self.TEST_LAMBDA_PROXY_BACKEND_ANY_METHOD,
self.API_PATH_LAMBDA_PROXY_BACKEND_ANY_METHOD)
def test_api_gateway_lambda_proxy_integration_any_method_with_path_param(self):
self._test_api_gateway_lambda_proxy_integration_any_method(
self.TEST_LAMBDA_PROXY_BACKEND_ANY_METHOD_WITH_PATH_PARAM,
self.API_PATH_LAMBDA_PROXY_BACKEND_ANY_METHOD_WITH_PATH_PARAM)
def test_api_gateway_authorizer_crud(self):
apig = aws_stack.connect_to_service('apigateway')
authorizer = apig.create_authorizer(
restApiId=self.TEST_API_GATEWAY_ID,
**self.TEST_API_GATEWAY_AUTHORIZER)
authorizer_id = authorizer.get('id')
create_result = apig.get_authorizer(
restApiId=self.TEST_API_GATEWAY_ID,
authorizerId=authorizer_id)
# ignore boto3 stuff
del create_result['ResponseMetadata']
create_expected = clone(self.TEST_API_GATEWAY_AUTHORIZER)
create_expected['id'] = authorizer_id
self.assertDictEqual(create_expected, create_result)
apig.update_authorizer(
restApiId=self.TEST_API_GATEWAY_ID,
authorizerId=authorizer_id,
patchOperations=self.TEST_API_GATEWAY_AUTHORIZER_OPS)
update_result = apig.get_authorizer(
restApiId=self.TEST_API_GATEWAY_ID,
authorizerId=authorizer_id)
# ignore boto3 stuff
del update_result['ResponseMetadata']
update_expected = apply_patch(create_expected, self.TEST_API_GATEWAY_AUTHORIZER_OPS)
self.assertDictEqual(update_expected, update_result)
apig.delete_authorizer(
restApiId=self.TEST_API_GATEWAY_ID,
authorizerId=authorizer_id)
self.assertRaises(
Exception,
apig.get_authorizer,
self.TEST_API_GATEWAY_ID,
authorizer_id
)
def test_apigateway_with_lambda_integration(self):
apigw_client = aws_stack.connect_to_service('apigateway')
# create Lambda function
lambda_name = 'apigw-lambda-%s' % short_uid()
self.create_lambda_function(lambda_name)
lambda_uri = aws_stack.lambda_function_arn(lambda_name)
target_uri = aws_stack.apigateway_invocations_arn(lambda_uri)
# create REST API
api = apigw_client.create_rest_api(name='test-api', description='')
api_id = api['id']
root_res_id = apigw_client.get_resources(restApiId=api_id)['items'][0]['id']
api_resource = apigw_client.create_resource(restApiId=api_id, parentId=root_res_id, pathPart='test')
apigw_client.put_method(
restApiId=api_id,
resourceId=api_resource['id'],
httpMethod='GET',
authorizationType='NONE'
)
rs = apigw_client.put_integration(
restApiId=api_id,
resourceId=api_resource['id'],
httpMethod='GET',
integrationHttpMethod='POST',
type='AWS',
uri=target_uri,
timeoutInMillis=3000,
contentHandling='CONVERT_TO_BINARY',
requestTemplates={
'application/json': '{"param1": "$input.params(\'param1\')"}'
}
)
integration_keys = ['httpMethod', 'type', 'passthroughBehavior', 'cacheKeyParameters', 'uri', 'cacheNamespace',
'timeoutInMillis', 'contentHandling', 'requestParameters']
self.assertEqual(rs['ResponseMetadata']['HTTPStatusCode'], 200)
for key in integration_keys:
self.assertIn(key, rs)
self.assertNotIn('responseTemplates', rs)
apigw_client.create_deployment(restApiId=api_id, stageName=self.TEST_STAGE_NAME)
rs = apigw_client.get_integration(
restApiId=api_id,
resourceId=api_resource['id'],
httpMethod='GET'
)
self.assertEqual(rs['ResponseMetadata']['HTTPStatusCode'], 200)
self.assertEqual(rs['type'], 'AWS')
self.assertEqual(rs['httpMethod'], 'POST')
self.assertEqual(rs['uri'], target_uri)
# invoke the gateway endpoint
url = gateway_request_url(api_id=api_id, stage_name=self.TEST_STAGE_NAME, path='/test')
response = requests.get('%s?param1=foobar' % url)
self.assertLess(response.status_code, 400)
content = json.loads(to_str(response.content))
self.assertEqual(content.get('httpMethod'), 'GET')
self.assertEqual(content.get('requestContext', {}).get('resourceId'), api_resource['id'])
self.assertEqual(content.get('requestContext', {}).get('stage'), self.TEST_STAGE_NAME)
self.assertEqual(content.get('body'), '{"param1": "foobar"}')
# delete integration
rs = apigw_client.delete_integration(
restApiId=api_id,
resourceId=api_resource['id'],
httpMethod='GET',
)
self.assertEqual(rs['ResponseMetadata']['HTTPStatusCode'], 200)
with self.assertRaises(ClientError) as ctx:
# This call should not be successful as the integration is deleted
apigw_client.get_integration(
restApiId=api_id,
resourceId=api_resource['id'],
httpMethod='GET'
)
self.assertEqual(ctx.exception.response['Error']['Code'], 'BadRequestException')
# clean up
lambda_client = aws_stack.connect_to_service('lambda')
lambda_client.delete_function(FunctionName=lambda_name)
apigw_client.delete_rest_api(restApiId=api_id)
def test_api_gateway_handle_domain_name(self):
domain_name = '%s.example.com' % short_uid()
apigw_client = aws_stack.connect_to_service('apigateway')
rs = apigw_client.create_domain_name(
domainName=domain_name
)
self.assertEqual(rs['ResponseMetadata']['HTTPStatusCode'], 200)
rs = apigw_client.get_domain_name(
domainName=domain_name
)
self.assertEqual(rs['ResponseMetadata']['HTTPStatusCode'], 200)
self.assertEqual(rs['domainName'], domain_name)
# clean up
apigw_client.delete_domain_name(domainName=domain_name)
def _test_api_gateway_lambda_proxy_integration_any_method(self, fn_name, path):
self.create_lambda_function(fn_name)
# create API Gateway and connect it to the Lambda proxy backend
lambda_uri = aws_stack.lambda_function_arn(fn_name)
target_uri = aws_stack.apigateway_invocations_arn(lambda_uri)
result = testutil.connect_api_gateway_to_http_with_lambda_proxy(
'test_gateway3', target_uri, methods=['ANY'], path=path, stage_name=self.TEST_STAGE_NAME)
# make test request to gateway and check response
path = path.replace('{test_param1}', 'foo1')
url = gateway_request_url(api_id=result['id'], stage_name=self.TEST_STAGE_NAME, path=path)
data = {}
for method in ('GET', 'POST', 'PUT', 'PATCH', 'DELETE', 'OPTIONS'):
body = json.dumps(data) if method in ('POST', 'PUT', 'PATCH') else None
result = getattr(requests, method.lower())(url, data=body)
if method != 'DELETE':
self.assertEqual(result.status_code, 200)
parsed_body = json.loads(to_str(result.content))
self.assertEqual(parsed_body.get('httpMethod'), method)
else:
self.assertEqual(result.status_code, 204)
def test_apigateway_with_custom_authorization_method(self):
apigw_client = aws_stack.connect_to_service('apigateway')
# create Lambda function
lambda_name = 'apigw-lambda-%s' % short_uid()
self.create_lambda_function(lambda_name)
lambda_uri = aws_stack.lambda_function_arn(lambda_name)
# create REST API
api = apigw_client.create_rest_api(name='test-api', description='')
api_id = api['id']
root_res_id = apigw_client.get_resources(restApiId=api_id)['items'][0]['id']
# create authorizer at root resource
authorizer = apigw_client.create_authorizer(
restApiId=api_id,
name='lambda_authorizer',
type='TOKEN',
authorizerUri='arn:aws:apigateway:us-east-1:lambda:path/ \
2015-03-31/functions/{}/invocations'.format(lambda_uri),
identitySource='method.request.header.Auth'
)
# create method with custom authorizer
is_api_key_required = True
method_response = apigw_client.put_method(
restApiId=api_id, resourceId=root_res_id, httpMethod='GET', authorizationType='CUSTOM',
authorizerId=authorizer['id'], apiKeyRequired=is_api_key_required
)
self.assertEqual(authorizer['id'], method_response['authorizerId'])
# clean up
lambda_client = aws_stack.connect_to_service('lambda')
lambda_client.delete_function(FunctionName=lambda_name)
apigw_client.delete_rest_api(restApiId=api_id)
def test_create_model(self):
client = aws_stack.connect_to_service('apigateway')
response = client.create_rest_api(name='my_api', description='this is my api')
rest_api_id = response['id']
dummy_rest_api_id = '_non_existing_'
model_name = 'testModel'
description = 'test model'
content_type = 'application/json'
# success case with valid params
response = client.create_model(
restApiId=rest_api_id,
name=model_name,
description=description,
contentType=content_type,
)
self.assertEqual(response['name'], model_name)
self.assertEqual(response['description'], description)
try:
client.create_model(
restApiId=dummy_rest_api_id,
name=model_name,
description=description,
contentType=content_type,
)
self.fail('This call should not be successful as the rest api is not valid.')
except ClientError as e:
self.assertEqual(e.response['Error']['Code'], 'NotFoundException')
self.assertEqual(e.response['Error']['Message'], 'Invalid Rest API Id specified')
try:
client.create_model(
restApiId=dummy_rest_api_id,
name='',
description=description,
contentType=content_type,
)
self.fail('This call should not be successful as the model name is not specified.')
except ClientError as e:
self.assertEqual(e.response['Error']['Code'], 'BadRequestException')
self.assertEqual(e.response['Error']['Message'], 'No Model Name specified')
# clean up
client.delete_rest_api(restApiId=rest_api_id)
def test_get_api_models(self):
client = aws_stack.connect_to_service('apigateway')
response = client.create_rest_api(name='my_api', description='this is my api')
rest_api_id = response['id']
model_name = 'testModel'
description = 'test model'
content_type = 'application/json'
# when no models are present
result = client.get_models(restApiId=rest_api_id)
self.assertEqual(result['items'], [])
# add a model
client.create_model(
restApiId=rest_api_id,
name=model_name,
description=description,
contentType=content_type,
)
# get models after adding
result = client.get_models(restApiId=rest_api_id)
self.assertEqual(result['items'][0]['name'], model_name)
self.assertEqual(result['items'][0]['description'], description)
# clean up
client.delete_rest_api(restApiId=rest_api_id)
def test_request_validator(self):
client = aws_stack.connect_to_service('apigateway')
response = client.create_rest_api(name='my_api', description='this is my api')
rest_api_id = response['id']
# CREATE
name = 'validator123'
result = client.create_request_validator(restApiId=rest_api_id, name=name)
self.assertEqual(result['ResponseMetadata']['HTTPStatusCode'], 200)
validator_id = result['id']
# LIST
result = client.get_request_validators(restApiId=rest_api_id)
self.assertEqual(result['ResponseMetadata']['HTTPStatusCode'], 200)
self.assertEqual(result['items'], [{'id': validator_id, 'name': name}])
# GET
result = client.get_request_validator(restApiId=rest_api_id, requestValidatorId=validator_id)
self.assertEqual(result['ResponseMetadata']['HTTPStatusCode'], 200)
self.assertEqual(select_attributes(result, ['id', 'name']), {'id': validator_id, 'name': name})
# UPDATE
result = client.update_request_validator(restApiId=rest_api_id, requestValidatorId=validator_id,
patchOperations=[])
# DELETE
client.delete_request_validator(restApiId=rest_api_id, requestValidatorId=validator_id)
with self.assertRaises(Exception):
client.get_request_validator(restApiId=rest_api_id, requestValidatorId=validator_id)
with self.assertRaises(Exception):
client.delete_request_validator(restApiId=rest_api_id, requestValidatorId=validator_id)
# clean up
client.delete_rest_api(restApiId=rest_api_id)
def test_base_path_mapping(self):
client = aws_stack.connect_to_service('apigateway')
response = client.create_rest_api(name='my_api', description='this is my api')
rest_api_id = response['id']
# CREATE
domain_name = 'domain1.example.com'
base_path = '/foo'
result = client.create_base_path_mapping(
domainName=domain_name, basePath=base_path, restApiId=rest_api_id, stage='dev')
self.assertEqual(result['ResponseMetadata']['HTTPStatusCode'], 200)
# LIST
result = client.get_base_path_mappings(domainName=domain_name)
self.assertEqual(result['ResponseMetadata']['HTTPStatusCode'], 200)
expected = {'basePath': base_path, 'restApiId': rest_api_id, 'stage': 'dev'}
self.assertEqual(result['items'], [expected])
# GET
result = client.get_base_path_mapping(domainName=domain_name, basePath=base_path)
self.assertEqual(result['ResponseMetadata']['HTTPStatusCode'], 200)
self.assertEqual(select_attributes(result, ['basePath', 'restApiId', 'stage']), expected)
# UPDATE
result = client.update_base_path_mapping(domainName=domain_name, basePath=base_path,
patchOperations=[])
# DELETE
client.delete_base_path_mapping(domainName=domain_name, basePath=base_path)
with self.assertRaises(Exception):
client.get_base_path_mapping(domainName=domain_name, basePath=base_path)
with self.assertRaises(Exception):
client.delete_base_path_mapping(domainName=domain_name, basePath=base_path)
def test_api_account(self):
client = aws_stack.connect_to_service('apigateway')
response = client.create_rest_api(name='my_api', description='test 123')
rest_api_id = response['id']
result = client.get_account()
self.assertIn('UsagePlans', result['features'])
result = client.update_account(patchOperations=[{'op': 'add', 'path': '/features/-', 'value': 'foobar'}])
self.assertIn('foobar', result['features'])
# clean up
client.delete_rest_api(restApiId=rest_api_id)
def test_get_model_by_name(self):
client = aws_stack.connect_to_service('apigateway')
response = client.create_rest_api(name='my_api', description='this is my api')
rest_api_id = response['id']
dummy_rest_api_id = '_non_existing_'
model_name = 'testModel'
description = 'test model'
content_type = 'application/json'
# add a model
client.create_model(
restApiId=rest_api_id,
name=model_name,
description=description,
contentType=content_type,
)
# get models after adding
result = client.get_model(restApiId=rest_api_id, modelName=model_name)
self.assertEqual(result['name'], model_name)
self.assertEqual(result['description'], description)
try:
client.get_model(restApiId=dummy_rest_api_id, modelName=model_name)
self.fail('This call should not be successful as the model is not created.')
except ClientError as e:
self.assertEqual(e.response['Error']['Code'], 'NotFoundException')
self.assertEqual(e.response['Error']['Message'], 'Invalid Rest API Id specified')
def test_get_model_with_invalid_name(self):
client = aws_stack.connect_to_service('apigateway')
response = client.create_rest_api(name='my_api', description='this is my api')
rest_api_id = response['id']
# test with an invalid model name
try:
client.get_model(restApiId=rest_api_id, modelName='fake')
self.fail('This call should not be successful as the model is not created.')
except ClientError as e:
self.assertEqual(e.response['Error']['Code'], 'NotFoundException')
# clean up
client.delete_rest_api(restApiId=rest_api_id)
def test_put_integration_dynamodb_proxy_validation_without_response_template(self):
api_id = self.create_api_gateway_and_deploy({})
url = gateway_request_url(api_id=api_id, stage_name='staging', path='/')
response = requests.put(
url,
json.dumps({'id': 'id1', 'data': 'foobar123'}),
)
self.assertEqual(response.status_code, 404)
def test_put_integration_dynamodb_proxy_validation_with_response_template(self):
response_templates = {'application/json': json.dumps({'TableName': 'MusicCollection',
'Item': {'id': '$.Id', 'data': '$.data'}})}
api_id = self.create_api_gateway_and_deploy(response_templates)
url = gateway_request_url(api_id=api_id, stage_name='staging', path='/')
response = requests.put(
url,
json.dumps({'id': 'id1', 'data': 'foobar123'}),
)
self.assertEqual(response.status_code, 200)
dynamo_client = aws_stack.connect_to_resource('dynamodb')
table = dynamo_client.Table('MusicCollection')
result = table.get_item(Key={'id': 'id1'})
self.assertEqual(result['Item']['data'], 'foobar123')
def test_api_key_required_for_methods(self):
response_templates = {'application/json': json.dumps({'TableName': 'MusicCollection',
'Item': {'id': '$.Id', 'data': '$.data'}})}
api_id = self.create_api_gateway_and_deploy(response_templates, True)
url = gateway_request_url(api_id=api_id, stage_name='staging', path='/')
payload = {
'name': 'TEST-PLAN-2',
'description': 'Description',
'quota': {'limit': 10, 'period': 'DAY', 'offset': 0},
'throttle': {'rateLimit': 2, 'burstLimit': 1},
'apiStages': [{'apiId': api_id, 'stage': 'staging'}],
'tags': {'tag_key': 'tag_value'},
}
client = aws_stack.connect_to_service('apigateway')
usage_plan_id = client.create_usage_plan(**payload)['id']
key_name = 'testApiKey'
key_type = 'API_KEY'
api_key = client.create_api_key(name=key_name)
payload = {'usagePlanId': usage_plan_id, 'keyId': api_key['id'], 'keyType': key_type}
client.create_usage_plan_key(**payload)
response = requests.put(
url,
json.dumps({'id': 'id1', 'data': 'foobar123'}),
)
# when the api key is not passed as part of the header
self.assertEqual(response.status_code, 403)
response = requests.put(
url,
json.dumps({'id': 'id1', 'data': 'foobar123'}),
headers={'X-API-Key': api_key['value']}
)
# when the api key is passed as part of the header
self.assertEqual(response.status_code, 200)
def test_multiple_api_keys_validate(self):
response_templates = {'application/json': json.dumps({'TableName': 'MusicCollection',
'Item': {'id': '$.Id', 'data': '$.data'}})}
api_id = self.create_api_gateway_and_deploy(response_templates, True)
url = gateway_request_url(api_id=api_id, stage_name='staging', path='/')
client = aws_stack.connect_to_service('apigateway')
# Create multiple usage plans
usage_plan_ids = []
for i in range(2):
payload = {
'name': 'APIKEYTEST-PLAN-{}'.format(i),
'description': 'Description',
'quota': {'limit': 10, 'period': 'DAY', 'offset': 0},
'throttle': {'rateLimit': 2, 'burstLimit': 1},
'apiStages': [{'apiId': api_id, 'stage': 'staging'}],
'tags': {'tag_key': 'tag_value'},
}
usage_plan_ids.append(client.create_usage_plan(**payload)['id'])
api_keys = []
key_type = 'API_KEY'
# Create multiple API Keys in each usage plan
for usage_plan_id in usage_plan_ids:
for i in range(2):
api_key = client.create_api_key(name='testMultipleApiKeys{}'.format(i))
payload = {'usagePlanId': usage_plan_id, 'keyId': api_key['id'], 'keyType': key_type}
client.create_usage_plan_key(**payload)
api_keys.append(api_key['value'])
response = requests.put(
url,
json.dumps({'id': 'id1', 'data': 'foobar123'}),
)
# when the api key is not passed as part of the header
self.assertEqual(response.status_code, 403)
# Check All API Keys work
for key in api_keys:
response = requests.put(
url,
json.dumps({'id': 'id1', 'data': 'foobar123'}),
headers={'X-API-Key': key}
)
# when the api key is passed as part of the header
self.assertEqual(response.status_code, 200)
def test_import_rest_api(self):
rest_api_name = 'restapi-%s' % short_uid()
client = aws_stack.connect_to_service('apigateway')
rest_api_id = client.create_rest_api(name=rest_api_name)['id']
spec_file = load_file(TEST_SWAGGER_FILE)
rs = client.put_rest_api(
restApiId=rest_api_id, body=spec_file, mode='overwrite'
)
self.assertEqual(rs['ResponseMetadata']['HTTPStatusCode'], 200)
rs = client.get_resources(restApiId=rest_api_id)
self.assertEqual(len(rs['items']), 1)
resource = rs['items'][0]
self.assertEqual(resource['path'], '/test')
self.assertIn('GET', resource['resourceMethods'])
# clean up
client.delete_rest_api(restApiId=rest_api_id)
spec_file = load_file(TEST_IMPORT_REST_API_FILE)
rs = client.import_rest_api(
body=spec_file
)
self.assertEqual(rs['ResponseMetadata']['HTTPStatusCode'], 200)
rest_api_id = rs['id']
rs = client.get_resources(restApiId=rest_api_id)
resources = rs['items']
self.assertEqual(len(resources), 2)
paths = [res['path'] for res in resources]
self.assertIn('/pets', paths)
self.assertIn('/pets/{petId}', paths)
# clean up
client.delete_rest_api(restApiId=rest_api_id)
def test_step_function_integrations(self):
client = aws_stack.connect_to_service('apigateway')
sfn_client = aws_stack.connect_to_service('stepfunctions')
lambda_client = aws_stack.connect_to_service('lambda')
state_machine_name = 'test'
state_machine_def = {
'Comment': 'Hello World example',
'StartAt': 'step1',
'States': {
'step1': {
'Type': 'Task',
'Resource': '__tbd__',
'End': True
},
}
}
# create state machine
fn_name = 'test-stepfunctions-apigw'
testutil.create_lambda_function(
handler_file=TEST_LAMBDA_ECHO_FILE, func_name=fn_name, runtime=LAMBDA_RUNTIME_PYTHON36)
resp = lambda_client.list_functions()
role_arn = aws_stack.role_arn('sfn_role')
definition = clone(state_machine_def)
lambda_arn_1 = aws_stack.lambda_function_arn(fn_name)
definition['States']['step1']['Resource'] = lambda_arn_1
definition = json.dumps(definition)
sm_arn = 'arn:aws:states:%s:%s:stateMachine:%s' \
% (aws_stack.get_region(), TEST_AWS_ACCOUNT_ID, state_machine_name)
sfn_client.create_state_machine(name=state_machine_name, definition=definition, roleArn=role_arn)
rest_api = client.create_rest_api(name='test', description='test')
resources = client.get_resources(restApiId=rest_api['id'])
client.put_method(
restApiId=rest_api['id'],
resourceId=resources['items'][0]['id'],
httpMethod='POST',
authorizationType='NONE'
)
client.put_integration(
restApiId=rest_api['id'],
resourceId=resources['items'][0]['id'],
httpMethod='POST',
integrationHttpMethod='POST',
type='AWS',
uri='arn:aws:apigateway:%s:states:action/StartExecution' % aws_stack.get_region(),
requestTemplates={
'application/json': """
#set($data = $util.escapeJavaScript($input.json('$')))
{"input": "$data","stateMachineArn": "%s"}
""" % sm_arn
},
)
client.create_deployment(restApiId=rest_api['id'], stageName='dev')
url = gateway_request_url(api_id=rest_api['id'], stage_name='dev', path='/')
test_data = {'test': 'test-value'}
resp = requests.post(url, data=json.dumps(test_data))
self.assertEqual(resp.status_code, 200)
self.assertIn('executionArn', resp.content.decode())
self.assertIn('startDate', resp.content.decode())
client.delete_integration(
restApiId=rest_api['id'],
resourceId=resources['items'][0]['id'],
httpMethod='POST',
)
client.put_integration(
restApiId=rest_api['id'],
resourceId=resources['items'][0]['id'],
httpMethod='POST',
integrationHttpMethod='POST',
type='AWS',
uri='arn:aws:apigateway:%s:states:action/StartExecution' % aws_stack.get_region(),
)
test_data = {
'input': json.dumps({'test': 'test-value'}),
'name': 'MyExecution',
'stateMachineArn': '{}'.format(sm_arn)
}
resp = requests.post(url, data=json.dumps(test_data))
self.assertEqual(resp.status_code, 200)
self.assertIn('executionArn', resp.content.decode())
self.assertIn('startDate', resp.content.decode())
# Clean up
lambda_client.delete_function(FunctionName=fn_name)
sfn_client.delete_state_machine(stateMachineArn=sm_arn)
client.delete_rest_api(restApiId=rest_api['id'])
def test_api_gateway_http_integration_with_path_request_parmeter(self):
client = aws_stack.connect_to_service('apigateway')
test_port = get_free_tcp_port()
backend_url = 'http://localhost:%s/person/{id}' % (test_port)
# start test HTTP backend
proxy = self.start_http_backend(test_port)
# create rest api
api_rest = client.create_rest_api(name='test')
api_id = api_rest['id']
parent_response = client.get_resources(restApiId=api_id)
parent_id = parent_response['items'][0]['id']
resource_1 = client.create_resource(restApiId=api_id, parentId=parent_id, pathPart='person')
resource_1_id = resource_1['id']
resource_2 = client.create_resource(restApiId=api_id, parentId=resource_1_id, pathPart='{id}')
resource_2_id = resource_2['id']
client.put_method(
restApiId=api_id, resourceId=resource_2_id, httpMethod='GET', authorizationType='NONE',
apiKeyRequired=False, requestParameters={'method.request.path.id': True})
client.put_integration(
restApiId=api_id,
resourceId=resource_2_id,
httpMethod='GET',
integrationHttpMethod='GET',
type='HTTP',
uri=backend_url,
timeoutInMillis=3000,
contentHandling='CONVERT_TO_BINARY',
requestParameters={'integration.request.path.id': 'method.request.path.id'})
client.create_deployment(restApiId=api_id, stageName='test')
url = f'http://localhost:{config.EDGE_PORT}/restapis/{api_id}/test/_user_request_/person/123'
result = requests.get(url)
content = json.loads(result._content)
self.assertEqual(result.status_code, 200)
self.assertEqual(content['headers'].get(HEADER_LOCALSTACK_REQUEST_URL),
f'http://localhost:{config.EDGE_PORT}/person/123')
# clean up
client.delete_rest_api(restApiId=api_id)
proxy.stop()
# =====================================================================
# Helper methods
# =====================================================================
def connect_api_gateway_to_kinesis(self, gateway_name, kinesis_stream):
resources = {}
template = self.APIGATEWAY_DATA_INBOUND_TEMPLATE % kinesis_stream
resource_path = self.API_PATH_DATA_INBOUND.replace('/', '')
resources[resource_path] = [{
'httpMethod': 'POST',
'authorizationType': 'NONE',
'integrations': [{
'type': 'AWS',
'uri': 'arn:aws:apigateway:%s:kinesis:action/PutRecords' % aws_stack.get_region(),
'requestTemplates': {
'application/json': template
}
}]
}, {
'httpMethod': 'GET',
'authorizationType': 'NONE',
'integrations': [{
'type': 'AWS',
'uri': 'arn:aws:apigateway:%s:kinesis:action/ListStreams' % aws_stack.get_region(),
'requestTemplates': {
'application/json': '{}'
}
}]
}]
return aws_stack.create_api_gateway(
name=gateway_name,
resources=resources,
stage_name=self.TEST_STAGE_NAME
)
def connect_api_gateway_to_http(self, int_type, gateway_name, target_url, methods=[], path=None):
if not methods:
methods = ['GET', 'POST']
if not path:
path = '/'
resources = {}
resource_path = path.replace('/', '')
resources[resource_path] = []
req_templates = {
'application/json': json.dumps({'foo': 'bar'})
} if int_type == 'custom' else {}
for method in methods:
resources[resource_path].append({
'httpMethod': method,
'integrations': [{
'type': 'HTTP' if int_type == 'custom' else 'HTTP_PROXY',
'uri': target_url,
'requestTemplates': req_templates,
'responseTemplates': {}
}]
})
return aws_stack.create_api_gateway(
name=gateway_name,
resources=resources,
stage_name=self.TEST_STAGE_NAME
)
@staticmethod
def create_lambda_function(fn_name):
testutil.create_lambda_function(
handler_file=TEST_LAMBDA_PYTHON, libs=TEST_LAMBDA_LIBS, func_name=fn_name)
@staticmethod
def start_http_backend(test_port):
# test listener for target HTTP backend
class TestListener(ProxyListener):
def forward_request(self, **kwargs):
response = Response()
response.status_code = 200
result = {
'data': kwargs.get('data') or '{}',
'headers': dict(kwargs.get('headers'))
}
response._content = json.dumps(json_safe(result))
return response
proxy = start_proxy(test_port, update_listener=TestListener())
return proxy
@staticmethod
def create_api_gateway_and_deploy(response_template, is_api_key_required=False):
apigw_client = aws_stack.connect_to_service('apigateway')
response = apigw_client.create_rest_api(name='my_api', description='this is my api')
api_id = response['id']
resources = apigw_client.get_resources(restApiId=api_id)
root_resources = [resource for resource in resources['items'] if resource['path'] == '/']
root_id = root_resources[0]['id']
apigw_client.put_method(
restApiId=api_id, resourceId=root_id, httpMethod='PUT', authorizationType='NONE',
apiKeyRequired=is_api_key_required
)
apigw_client.put_method_response(
restApiId=api_id, resourceId=root_id, httpMethod='PUT', statusCode='200',
)
aws_stack.create_dynamodb_table('MusicCollection', partition_key='id')
# Ensure that it works fine when providing the integrationHttpMethod-argument
apigw_client.put_integration(
restApiId=api_id,
resourceId=root_id,
httpMethod='PUT',
integrationHttpMethod='PUT',
type='AWS_PROXY',
uri='arn:aws:apigateway:us-east-1:dynamodb:action/PutItem&Table=MusicCollection',
)
apigw_client.put_integration_response(
restApiId=api_id,
resourceId=root_id,
httpMethod='PUT',
statusCode='200',
selectionPattern='',
responseTemplates=response_template)
apigw_client.create_deployment(restApiId=api_id, stageName='staging')
return api_id
| 42.032007 | 119 | 0.638828 |
7957d045aef512b56c642005e8a4bb6b9407701e | 1,769 | py | Python | django_blog/blog/views.py | IvanSEfimov/django-blog | bcf49fdb59aaad5db622e10cc5e7ceed575a0c85 | [
"MIT"
] | null | null | null | django_blog/blog/views.py | IvanSEfimov/django-blog | bcf49fdb59aaad5db622e10cc5e7ceed575a0c85 | [
"MIT"
] | null | null | null | django_blog/blog/views.py | IvanSEfimov/django-blog | bcf49fdb59aaad5db622e10cc5e7ceed575a0c85 | [
"MIT"
] | null | null | null | from django.shortcuts import render, get_object_or_404
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.contrib.auth.models import User
from django.views.generic import (
ListView,
DetailView,
CreateView,
UpdateView,
DeleteView
)
from .models import Post
class PostListView(ListView):
model = Post
template_name = 'blog/home.html'
context_object_name = 'posts'
ordering = ['-date_posted']
paginate_by = 5
class UserPostListView(ListView):
model = Post
template_name = 'blog/user_posts.html'
context_object_name = 'posts'
paginate_by = 5
def get_queryset(self):
user = get_object_or_404(User, username=self.kwargs.get('username'))
return Post.objects.filter(author=user).order_by('-date_posted')
class PostDetailView(DetailView):
model = Post
class PostDeleteView(UserPassesTestMixin, LoginRequiredMixin, DeleteView):
model = Post
success_url = '/'
def test_func(self):
post = self.get_object()
return self.request.user == post.author
class PostCreateView(LoginRequiredMixin, CreateView):
model = Post
fields = ['title', 'content']
def form_valid(self, form):
form.instance.author = self.request.user
return super().form_valid(form)
class PostUpdateView(UserPassesTestMixin, LoginRequiredMixin, UpdateView):
model = Post
fields = ['title', 'content']
def form_valid(self, form):
form.instance.author = self.request.user
return super().form_valid(form)
def test_func(self):
post = self.get_object()
return self.request.user == post.author
def about(request):
return render(request, 'blog/about.html', {'title': 'About'})
| 24.915493 | 78 | 0.695873 |
7957d07a1220b313a0591e35c41e974e21c0768b | 606 | py | Python | potions/migrations/0002_potion_user.py | NievesBorrero/potionlab | 2ce4c97906bd6d8ea84e1d6e2a5afdad68182bd2 | [
"MIT"
] | 11 | 2020-01-28T10:46:13.000Z | 2020-02-10T20:20:08.000Z | potions/migrations/0002_potion_user.py | NievesBorrero/potionlab | 2ce4c97906bd6d8ea84e1d6e2a5afdad68182bd2 | [
"MIT"
] | null | null | null | potions/migrations/0002_potion_user.py | NievesBorrero/potionlab | 2ce4c97906bd6d8ea84e1d6e2a5afdad68182bd2 | [
"MIT"
] | null | null | null | # Generated by Django 2.0 on 2020-01-21 09:49
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('potions', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='potion',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='user', to=settings.AUTH_USER_MODEL),
),
]
| 27.545455 | 154 | 0.674917 |
7957d1b1ab9c83bd79151c1a498645588e0563a3 | 1,062 | py | Python | h2o-py/h2o/model/anomaly_detection.py | koniecsveta/h2o-3 | b672bd80a08b0c899086b0ae24985ddb1c537de0 | [
"Apache-2.0"
] | null | null | null | h2o-py/h2o/model/anomaly_detection.py | koniecsveta/h2o-3 | b672bd80a08b0c899086b0ae24985ddb1c537de0 | [
"Apache-2.0"
] | null | null | null | h2o-py/h2o/model/anomaly_detection.py | koniecsveta/h2o-3 | b672bd80a08b0c899086b0ae24985ddb1c537de0 | [
"Apache-2.0"
] | null | null | null | # -*- encoding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from .model_base import ModelBase
from h2o.utils.shared_utils import can_use_pandas
class H2OAnomalyDetectionModel(ModelBase):
def varsplits(self, use_pandas=False):
"""
Retrieve per-variable split information for a given Isolation Forest model.
:param use_pandas: If True, then the variable splits will be returned as a pandas data frame.
:returns: A list or Pandas DataFrame.
"""
model = self._model_json["output"]
if "variable_splits" in list(model.keys()) and model["variable_splits"]:
vals = model["variable_splits"].cell_values
header = model["variable_splits"].col_header
if use_pandas and can_use_pandas():
import pandas
return pandas.DataFrame(vals, columns=header)
else:
return vals
else:
print("Warning: This model doesn't provide variable split information")
| 36.62069 | 101 | 0.660075 |
7957d33b4ebd365066ed8e2758ede568260257a7 | 25,342 | py | Python | neutron/agent/linux/iptables_firewall.py | venkataanil/juno_neutron | 2e62e150c264ccae2dd75fb78caae453eaa77e9f | [
"Apache-2.0"
] | null | null | null | neutron/agent/linux/iptables_firewall.py | venkataanil/juno_neutron | 2e62e150c264ccae2dd75fb78caae453eaa77e9f | [
"Apache-2.0"
] | null | null | null | neutron/agent/linux/iptables_firewall.py | venkataanil/juno_neutron | 2e62e150c264ccae2dd75fb78caae453eaa77e9f | [
"Apache-2.0"
] | null | null | null | # Copyright 2012, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from oslo.config import cfg
from neutron.agent import firewall
from neutron.agent.linux import ipset_manager
from neutron.agent.linux import iptables_manager
from neutron.common import constants
from neutron.common import ipv6_utils
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
SG_CHAIN = 'sg-chain'
INGRESS_DIRECTION = 'ingress'
EGRESS_DIRECTION = 'egress'
SPOOF_FILTER = 'spoof-filter'
CHAIN_NAME_PREFIX = {INGRESS_DIRECTION: 'i',
EGRESS_DIRECTION: 'o',
SPOOF_FILTER: 's'}
DIRECTION_IP_PREFIX = {'ingress': 'source_ip_prefix',
'egress': 'dest_ip_prefix'}
IPSET_DIRECTION = {INGRESS_DIRECTION: 'src',
EGRESS_DIRECTION: 'dst'}
LINUX_DEV_LEN = 14
IPSET_CHAIN_LEN = 17
IPSET_CHANGE_BULK_THRESHOLD = 10
IPSET_ADD_BULK_THRESHOLD = 5
class IptablesFirewallDriver(firewall.FirewallDriver):
"""Driver which enforces security groups through iptables rules."""
IPTABLES_DIRECTION = {INGRESS_DIRECTION: 'physdev-out',
EGRESS_DIRECTION: 'physdev-in'}
def __init__(self):
self.root_helper = cfg.CONF.AGENT.root_helper
self.iptables = iptables_manager.IptablesManager(
root_helper=self.root_helper,
use_ipv6=ipv6_utils.is_enabled())
# TODO(majopela, shihanzhang): refactor out ipset to a separate
# driver composed over this one
self.ipset = ipset_manager.IpsetManager(root_helper=self.root_helper)
# list of port which has security group
self.filtered_ports = {}
self._add_fallback_chain_v4v6()
self._defer_apply = False
self._pre_defer_filtered_ports = None
# List of security group rules for ports residing on this host
self.sg_rules = {}
self.pre_sg_rules = None
# List of security group member ips for ports residing on this host
self.sg_members = {}
self.pre_sg_members = None
self.ipset_chains = {}
self.enable_ipset = cfg.CONF.SECURITYGROUP.enable_ipset
@property
def ports(self):
return self.filtered_ports
def update_security_group_rules(self, sg_id, sg_rules):
LOG.debug("Update rules of security group (%s)", sg_id)
self.sg_rules[sg_id] = sg_rules
def update_security_group_members(self, sg_id, sg_members):
LOG.debug("Update members of security group (%s)", sg_id)
self.sg_members[sg_id] = sg_members
def prepare_port_filter(self, port):
LOG.debug(_("Preparing device (%s) filter"), port['device'])
self._remove_chains()
self.filtered_ports[port['device']] = port
# each security group has it own chains
self._setup_chains()
self.iptables.apply()
def update_port_filter(self, port):
LOG.debug(_("Updating device (%s) filter"), port['device'])
if port['device'] not in self.filtered_ports:
LOG.info(_('Attempted to update port filter which is not '
'filtered %s'), port['device'])
return
self._remove_chains()
self.filtered_ports[port['device']] = port
self._setup_chains()
self.iptables.apply()
def remove_port_filter(self, port):
LOG.debug(_("Removing device (%s) filter"), port['device'])
if not self.filtered_ports.get(port['device']):
LOG.info(_('Attempted to remove port filter which is not '
'filtered %r'), port)
return
self._remove_chains()
self.filtered_ports.pop(port['device'], None)
self._setup_chains()
self.iptables.apply()
def _setup_chains(self):
"""Setup ingress and egress chain for a port."""
if not self._defer_apply:
self._setup_chains_apply(self.filtered_ports)
def _setup_chains_apply(self, ports):
self._add_chain_by_name_v4v6(SG_CHAIN)
for port in ports.values():
self._setup_chain(port, INGRESS_DIRECTION)
self._setup_chain(port, EGRESS_DIRECTION)
self.iptables.ipv4['filter'].add_rule(SG_CHAIN, '-j ACCEPT')
self.iptables.ipv6['filter'].add_rule(SG_CHAIN, '-j ACCEPT')
def _remove_chains(self):
"""Remove ingress and egress chain for a port."""
if not self._defer_apply:
self._remove_chains_apply(self.filtered_ports)
def _remove_chains_apply(self, ports):
for port in ports.values():
self._remove_chain(port, INGRESS_DIRECTION)
self._remove_chain(port, EGRESS_DIRECTION)
self._remove_chain(port, SPOOF_FILTER)
self._remove_chain_by_name_v4v6(SG_CHAIN)
def _setup_chain(self, port, DIRECTION):
self._add_chain(port, DIRECTION)
self._add_rule_by_security_group(port, DIRECTION)
def _remove_chain(self, port, DIRECTION):
chain_name = self._port_chain_name(port, DIRECTION)
self._remove_chain_by_name_v4v6(chain_name)
def _add_fallback_chain_v4v6(self):
self.iptables.ipv4['filter'].add_chain('sg-fallback')
self.iptables.ipv4['filter'].add_rule('sg-fallback', '-j DROP')
self.iptables.ipv6['filter'].add_chain('sg-fallback')
self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP')
def _add_chain_by_name_v4v6(self, chain_name):
self.iptables.ipv6['filter'].add_chain(chain_name)
self.iptables.ipv4['filter'].add_chain(chain_name)
def _remove_chain_by_name_v4v6(self, chain_name):
self.iptables.ipv4['filter'].ensure_remove_chain(chain_name)
self.iptables.ipv6['filter'].ensure_remove_chain(chain_name)
def _add_rule_to_chain_v4v6(self, chain_name, ipv4_rules, ipv6_rules):
for rule in ipv4_rules:
self.iptables.ipv4['filter'].add_rule(chain_name, rule)
for rule in ipv6_rules:
self.iptables.ipv6['filter'].add_rule(chain_name, rule)
def _get_device_name(self, port):
return port['device']
def _add_chain(self, port, direction):
chain_name = self._port_chain_name(port, direction)
self._add_chain_by_name_v4v6(chain_name)
# Note(nati) jump to the security group chain (SG_CHAIN)
# This is needed because the packet may much two rule in port
# if the two port is in the same host
# We accept the packet at the end of SG_CHAIN.
# jump to the security group chain
device = self._get_device_name(port)
jump_rule = ['-m physdev --%s %s --physdev-is-bridged '
'-j $%s' % (self.IPTABLES_DIRECTION[direction],
device,
SG_CHAIN)]
self._add_rule_to_chain_v4v6('FORWARD', jump_rule, jump_rule)
# jump to the chain based on the device
jump_rule = ['-m physdev --%s %s --physdev-is-bridged '
'-j $%s' % (self.IPTABLES_DIRECTION[direction],
device,
chain_name)]
self._add_rule_to_chain_v4v6(SG_CHAIN, jump_rule, jump_rule)
if direction == EGRESS_DIRECTION:
self._add_rule_to_chain_v4v6('INPUT', jump_rule, jump_rule)
def _split_sgr_by_ethertype(self, security_group_rules):
ipv4_sg_rules = []
ipv6_sg_rules = []
for rule in security_group_rules:
if rule.get('ethertype') == constants.IPv4:
ipv4_sg_rules.append(rule)
elif rule.get('ethertype') == constants.IPv6:
if rule.get('protocol') == 'icmp':
rule['protocol'] = 'icmpv6'
ipv6_sg_rules.append(rule)
return ipv4_sg_rules, ipv6_sg_rules
def _select_sgr_by_direction(self, port, direction):
return [rule
for rule in port.get('security_group_rules', [])
if rule['direction'] == direction]
def _setup_spoof_filter_chain(self, port, table, mac_ip_pairs, rules):
if mac_ip_pairs:
chain_name = self._port_chain_name(port, SPOOF_FILTER)
table.add_chain(chain_name)
for mac, ip in mac_ip_pairs:
if ip is None:
# If fixed_ips is [] this rule will be added to the end
# of the list after the allowed_address_pair rules.
table.add_rule(chain_name,
'-m mac --mac-source %s -j RETURN'
% mac)
else:
table.add_rule(chain_name,
'-m mac --mac-source %s -s %s -j RETURN'
% (mac, ip))
table.add_rule(chain_name, '-j DROP')
rules.append('-j $%s' % chain_name)
def _build_ipv4v6_mac_ip_list(self, mac, ip_address, mac_ipv4_pairs,
mac_ipv6_pairs):
if netaddr.IPNetwork(ip_address).version == 4:
mac_ipv4_pairs.append((mac, ip_address))
else:
mac_ipv6_pairs.append((mac, ip_address))
def _spoofing_rule(self, port, ipv4_rules, ipv6_rules):
#Note(nati) allow dhcp or RA packet
ipv4_rules += ['-p udp -m udp --sport 68 --dport 67 -j RETURN']
ipv6_rules += ['-p icmpv6 -j RETURN']
ipv6_rules += ['-p udp -m udp --sport 546 --dport 547 -j RETURN']
mac_ipv4_pairs = []
mac_ipv6_pairs = []
if isinstance(port.get('allowed_address_pairs'), list):
for address_pair in port['allowed_address_pairs']:
self._build_ipv4v6_mac_ip_list(address_pair['mac_address'],
address_pair['ip_address'],
mac_ipv4_pairs,
mac_ipv6_pairs)
for ip in port['fixed_ips']:
self._build_ipv4v6_mac_ip_list(port['mac_address'], ip,
mac_ipv4_pairs, mac_ipv6_pairs)
if not port['fixed_ips']:
mac_ipv4_pairs.append((port['mac_address'], None))
mac_ipv6_pairs.append((port['mac_address'], None))
self._setup_spoof_filter_chain(port, self.iptables.ipv4['filter'],
mac_ipv4_pairs, ipv4_rules)
self._setup_spoof_filter_chain(port, self.iptables.ipv6['filter'],
mac_ipv6_pairs, ipv6_rules)
def _drop_dhcp_rule(self, ipv4_rules, ipv6_rules):
#Note(nati) Drop dhcp packet from VM
ipv4_rules += ['-p udp -m udp --sport 67 --dport 68 -j DROP']
ipv6_rules += ['-p udp -m udp --sport 547 --dport 546 -j DROP']
def _accept_inbound_icmpv6(self):
# Allow multicast listener, neighbor solicitation and
# neighbor advertisement into the instance
icmpv6_rules = []
for icmp6_type in constants.ICMPV6_ALLOWED_TYPES:
icmpv6_rules += ['-p icmpv6 --icmpv6-type %s -j RETURN' %
icmp6_type]
return icmpv6_rules
def _select_sg_rules_for_port(self, port, direction):
sg_ids = port.get('security_groups', [])
port_rules = []
fixed_ips = port.get('fixed_ips', [])
for sg_id in sg_ids:
for rule in self.sg_rules.get(sg_id, []):
if rule['direction'] == direction:
if self.enable_ipset:
port_rules.append(rule)
continue
remote_group_id = rule.get('remote_group_id')
if not remote_group_id:
port_rules.append(rule)
continue
ethertype = rule['ethertype']
for ip in self.sg_members[remote_group_id][ethertype]:
if ip in fixed_ips:
continue
ip_rule = rule.copy()
direction_ip_prefix = DIRECTION_IP_PREFIX[direction]
ip_rule[direction_ip_prefix] = str(
netaddr.IPNetwork(ip).cidr)
port_rules.append(ip_rule)
return port_rules
def _get_remote_sg_ids(self, port, direction):
sg_ids = port.get('security_groups', [])
remote_sg_ids = []
for sg_id in sg_ids:
remote_sg_ids.extend([rule['remote_group_id']
for rule in self.sg_rules.get(sg_id, []) if
rule['direction'] == direction
and rule.get('remote_group_id')])
return remote_sg_ids
def _add_rule_by_security_group(self, port, direction):
chain_name = self._port_chain_name(port, direction)
# select rules for current direction
security_group_rules = self._select_sgr_by_direction(port, direction)
security_group_rules += self._select_sg_rules_for_port(port, direction)
if self.enable_ipset:
remote_sg_ids = self._get_remote_sg_ids(port, direction)
# update the corresponding ipset chain member
self._update_ipset_chain_member(remote_sg_ids)
# split groups by ip version
# for ipv4, iptables command is used
# for ipv6, iptables6 command is used
ipv4_sg_rules, ipv6_sg_rules = self._split_sgr_by_ethertype(
security_group_rules)
ipv4_iptables_rule = []
ipv6_iptables_rule = []
if direction == EGRESS_DIRECTION:
self._spoofing_rule(port,
ipv4_iptables_rule,
ipv6_iptables_rule)
self._drop_dhcp_rule(ipv4_iptables_rule, ipv6_iptables_rule)
if direction == INGRESS_DIRECTION:
ipv6_iptables_rule += self._accept_inbound_icmpv6()
ipv4_iptables_rule += self._convert_sgr_to_iptables_rules(
ipv4_sg_rules)
ipv6_iptables_rule += self._convert_sgr_to_iptables_rules(
ipv6_sg_rules)
self._add_rule_to_chain_v4v6(chain_name,
ipv4_iptables_rule,
ipv6_iptables_rule)
def _get_cur_sg_member_ips(self, sg_id, ethertype):
return self.sg_members.get(sg_id, {}).get(ethertype, [])
def _get_pre_sg_member_ips(self, sg_id, ethertype):
return self.pre_sg_members.get(sg_id, {}).get(ethertype, [])
def _get_new_sg_member_ips(self, sg_id, ethertype):
add_member_ips = (set(self._get_cur_sg_member_ips(sg_id, ethertype)) -
set(self._get_pre_sg_member_ips(sg_id, ethertype)))
return list(add_member_ips)
def _get_deleted_sg_member_ips(self, sg_id, ethertype):
del_member_ips = (set(self._get_pre_sg_member_ips(sg_id, ethertype)) -
set(self._get_cur_sg_member_ips(sg_id, ethertype)))
return list(del_member_ips)
def _bulk_set_ips_to_chain(self, chain_name, member_ips, ethertype):
self.ipset.refresh_ipset_chain_by_name(chain_name, member_ips,
ethertype)
self.ipset_chains[chain_name] = member_ips
def _add_ips_to_ipset_chain(self, chain_name, add_ips):
for ip in add_ips:
if ip not in self.ipset_chains[chain_name]:
self.ipset.add_member_to_ipset_chain(chain_name, ip)
self.ipset_chains[chain_name].append(ip)
def _del_ips_from_ipset_chain(self, chain_name, del_ips):
if chain_name in self.ipset_chains:
for del_ip in del_ips:
if del_ip in self.ipset_chains[chain_name]:
self.ipset.del_ipset_chain_member(chain_name, del_ip)
self.ipset_chains[chain_name].remove(del_ip)
def _update_ipset_chain_member(self, security_group_ids):
for sg_id in security_group_ids or []:
for ethertype in ['IPv4', 'IPv6']:
add_ips = self._get_new_sg_member_ips(sg_id, ethertype)
del_ips = self._get_deleted_sg_member_ips(sg_id, ethertype)
cur_member_ips = self._get_cur_sg_member_ips(sg_id, ethertype)
chain_name = 'NET' + ethertype + sg_id[:IPSET_CHAIN_LEN]
if chain_name not in self.ipset_chains and cur_member_ips:
self.ipset_chains[chain_name] = []
self.ipset.create_ipset_chain(
chain_name, ethertype)
self._bulk_set_ips_to_chain(chain_name,
cur_member_ips, ethertype)
elif (len(add_ips) + len(del_ips)
< IPSET_CHANGE_BULK_THRESHOLD):
self._add_ips_to_ipset_chain(chain_name, add_ips)
self._del_ips_from_ipset_chain(chain_name, del_ips)
else:
self._bulk_set_ips_to_chain(chain_name,
cur_member_ips, ethertype)
def _generate_ipset_chain(self, sg_rule, remote_gid):
iptables_rules = []
args = self._protocol_arg(sg_rule.get('protocol'))
args += self._port_arg('sport',
sg_rule.get('protocol'),
sg_rule.get('source_port_range_min'),
sg_rule.get('source_port_range_max'))
args += self._port_arg('dport',
sg_rule.get('protocol'),
sg_rule.get('port_range_min'),
sg_rule.get('port_range_max'))
direction = sg_rule.get('direction')
ethertype = sg_rule.get('ethertype')
# the length of ipset chain name require less than 31
# characters
ipset_chain_name = ('NET' + ethertype + remote_gid[:IPSET_CHAIN_LEN])
if ipset_chain_name in self.ipset_chains:
args += ['-m set', '--match-set',
ipset_chain_name,
IPSET_DIRECTION[direction]]
args += ['-j RETURN']
iptables_rules += [' '.join(args)]
return iptables_rules
def _convert_sgr_to_iptables_rules(self, security_group_rules):
iptables_rules = []
self._allow_established(iptables_rules)
for rule in security_group_rules:
if self.enable_ipset:
remote_gid = rule.get('remote_group_id')
if remote_gid:
iptables_rules.extend(
self._generate_ipset_chain(rule, remote_gid))
continue
# These arguments MUST be in the format iptables-save will
# display them: source/dest, protocol, sport, dport, target
# Otherwise the iptables_manager code won't be able to find
# them to preserve their [packet:byte] counts.
args = self._ip_prefix_arg('s',
rule.get('source_ip_prefix'))
args += self._ip_prefix_arg('d',
rule.get('dest_ip_prefix'))
args += self._protocol_arg(rule.get('protocol'))
args += self._port_arg('sport',
rule.get('protocol'),
rule.get('source_port_range_min'),
rule.get('source_port_range_max'))
args += self._port_arg('dport',
rule.get('protocol'),
rule.get('port_range_min'),
rule.get('port_range_max'))
args += ['-j RETURN']
iptables_rules += [' '.join(args)]
self._drop_invalid_packets(iptables_rules)
iptables_rules += ['-j $sg-fallback']
return iptables_rules
def _drop_invalid_packets(self, iptables_rules):
# Always drop invalid packets
iptables_rules += ['-m state --state ' 'INVALID -j DROP']
return iptables_rules
def _allow_established(self, iptables_rules):
# Allow established connections
iptables_rules += ['-m state --state RELATED,ESTABLISHED -j RETURN']
return iptables_rules
def _protocol_arg(self, protocol):
if not protocol:
return []
iptables_rule = ['-p', protocol]
# iptables always adds '-m protocol' for udp and tcp
if protocol in ['udp', 'tcp']:
iptables_rule += ['-m', protocol]
return iptables_rule
def _port_arg(self, direction, protocol, port_range_min, port_range_max):
if (protocol not in ['udp', 'tcp', 'icmp', 'icmpv6']
or not port_range_min):
return []
if protocol in ['icmp', 'icmpv6']:
# Note(xuhanp): port_range_min/port_range_max represent
# icmp type/code when protocol is icmp or icmpv6
# icmp code can be 0 so we cannot use "if port_range_max" here
if port_range_max is not None:
return ['--%s-type' % protocol,
'%s/%s' % (port_range_min, port_range_max)]
return ['--%s-type' % protocol, '%s' % port_range_min]
elif port_range_min == port_range_max:
return ['--%s' % direction, '%s' % (port_range_min,)]
else:
return ['-m', 'multiport',
'--%ss' % direction,
'%s:%s' % (port_range_min, port_range_max)]
def _ip_prefix_arg(self, direction, ip_prefix):
#NOTE (nati) : source_group_id is converted to list of source_
# ip_prefix in server side
if ip_prefix:
return ['-%s' % direction, ip_prefix]
return []
def _port_chain_name(self, port, direction):
return iptables_manager.get_chain_name(
'%s%s' % (CHAIN_NAME_PREFIX[direction], port['device'][3:]))
def filter_defer_apply_on(self):
if not self._defer_apply:
self.iptables.defer_apply_on()
self._pre_defer_filtered_ports = dict(self.filtered_ports)
self.pre_sg_members = dict(self.sg_members)
self.pre_sg_rules = dict(self.sg_rules)
self._defer_apply = True
def _remove_unused_security_group_info(self):
need_removed_ipset_chains = set()
need_removed_security_groups = set()
remote_group_ids = set()
cur_group_ids = set()
for port in self.filtered_ports.values():
source_groups = port.get('security_group_source_groups', [])
remote_group_ids.update(source_groups)
groups = port.get('security_groups', [])
cur_group_ids.update(groups)
need_removed_ipset_chains.update(
[x for x in self.pre_sg_members if x not in remote_group_ids])
need_removed_security_groups.update(
[x for x in self.pre_sg_rules if x not in cur_group_ids])
# Remove unused remote security group member ips
for remove_chain_id in need_removed_ipset_chains:
if remove_chain_id in self.sg_members:
self.sg_members.pop(remove_chain_id, None)
if self.enable_ipset:
for ethertype in ['IPv4', 'IPv6']:
removed_chain = (
'NET' + ethertype + remove_chain_id[:IPSET_CHAIN_LEN])
if removed_chain in self.ipset_chains:
self.ipset.destroy_ipset_chain_by_name(removed_chain)
self.ipset_chains.pop(removed_chain, None)
# Remove unused security group rules
for remove_group_id in need_removed_security_groups:
if remove_group_id in self.sg_rules:
self.sg_rules.pop(remove_group_id, None)
def filter_defer_apply_off(self):
if self._defer_apply:
self._defer_apply = False
self._remove_chains_apply(self._pre_defer_filtered_ports)
self._setup_chains_apply(self.filtered_ports)
self.iptables.defer_apply_off()
self._remove_unused_security_group_info()
self._pre_defer_filtered_ports = None
class OVSHybridIptablesFirewallDriver(IptablesFirewallDriver):
OVS_HYBRID_TAP_PREFIX = constants.TAP_DEVICE_PREFIX
def _port_chain_name(self, port, direction):
return iptables_manager.get_chain_name(
'%s%s' % (CHAIN_NAME_PREFIX[direction], port['device']))
def _get_device_name(self, port):
return (self.OVS_HYBRID_TAP_PREFIX + port['device'])[:LINUX_DEV_LEN]
| 44.381786 | 79 | 0.599913 |
7957d447ebe05304fae6f654848e2cce0c44d7c2 | 9,049 | py | Python | SPViT_Swin/data/cached_image_folder.py | zhuang-group/SPViT | 74f08c6e55fb6adc0322722cedfd2c25ebdee999 | [
"Apache-2.0"
] | 69 | 2021-11-23T23:44:38.000Z | 2022-03-15T01:27:30.000Z | SPViT_Swin/data/cached_image_folder.py | zip-group/SPViT | 74f08c6e55fb6adc0322722cedfd2c25ebdee999 | [
"Apache-2.0"
] | null | null | null | SPViT_Swin/data/cached_image_folder.py | zip-group/SPViT | 74f08c6e55fb6adc0322722cedfd2c25ebdee999 | [
"Apache-2.0"
] | 10 | 2021-11-25T01:26:38.000Z | 2022-03-14T04:59:42.000Z | # --------------------------------------------------------
# Swin Transformer
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ze Liu
# --------------------------------------------------------
# Modifications copyright (c) 2021 Zhuang AI Group, Haoyu He
import io
import os
import time
import torch.distributed as dist
import torch.utils.data as data
from PIL import Image
from .zipreader import is_zip_path, ZipReader
def has_file_allowed_extension(filename, extensions):
"""Checks if a file is an allowed extension.
Args:
filename (string): path to a file
Returns:
bool: True if the filename ends with a known image extension
"""
filename_lower = filename.lower()
return any(filename_lower.endswith(ext) for ext in extensions)
def find_classes(dir):
classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return classes, class_to_idx
def make_dataset(dir, class_to_idx, extensions):
images = []
dir = os.path.expanduser(dir)
for target in sorted(os.listdir(dir)):
d = os.path.join(dir, target)
if not os.path.isdir(d):
continue
for root, _, fnames in sorted(os.walk(d)):
for fname in sorted(fnames):
if has_file_allowed_extension(fname, extensions):
path = os.path.join(root, fname)
item = (path, class_to_idx[target])
images.append(item)
return images
def make_dataset_with_ann(ann_file, img_prefix, extensions):
images = []
with open(ann_file, "r") as f:
contents = f.readlines()
for line_str in contents:
path_contents = [c for c in line_str.split('\t')]
im_file_name = path_contents[0]
class_index = int(path_contents[1])
assert str.lower(os.path.splitext(im_file_name)[-1]) in extensions
item = (os.path.join(img_prefix, im_file_name), class_index)
images.append(item)
return images
class DatasetFolder(data.Dataset):
"""A generic data loader where the samples are arranged in this way: ::
root/class_x/xxx.ext
root/class_x/xxy.ext
root/class_x/xxz.ext
root/class_y/123.ext
root/class_y/nsdf3.ext
root/class_y/asd932_.ext
Args:
root (string): Root directory path.
loader (callable): A function to load a sample given its path.
extensions (list[string]): A list of allowed extensions.
transform (callable, optional): A function/transform that takes in
a sample and returns a transformed version.
E.g, ``transforms.RandomCrop`` for images.
target_transform (callable, optional): A function/transform that takes
in the target and transforms it.
Attributes:
samples (list): List of (sample path, class_index) tuples
"""
def __init__(self, root, loader, extensions, ann_file='', img_prefix='', transform=None, target_transform=None,
cache_mode="no"):
# image folder mode
if ann_file == '':
_, class_to_idx = find_classes(root)
samples = make_dataset(root, class_to_idx, extensions)
# zip mode
else:
samples = make_dataset_with_ann(os.path.join(root, ann_file),
os.path.join(root, img_prefix),
extensions)
if len(samples) == 0:
raise (RuntimeError("Found 0 files in subfolders of: " + root + "\n" +
"Supported extensions are: " + ",".join(extensions)))
self.root = root
self.loader = loader
self.extensions = extensions
self.samples = samples
self.labels = [y_1k for _, y_1k in samples]
self.classes = list(set(self.labels))
self.transform = transform
self.target_transform = target_transform
self.cache_mode = cache_mode
if self.cache_mode != "no":
self.init_cache()
def init_cache(self):
assert self.cache_mode in ["part", "full"]
n_sample = len(self.samples)
global_rank = dist.get_rank()
world_size = dist.get_world_size()
samples_bytes = [None for _ in range(n_sample)]
start_time = time.time()
for index in range(n_sample):
if index % (n_sample // 10) == 0:
t = time.time() - start_time
print(f'global_rank {dist.get_rank()} cached {index}/{n_sample} takes {t:.2f}s per block')
start_time = time.time()
path, target = self.samples[index]
if self.cache_mode == "full":
samples_bytes[index] = (ZipReader.read(path), target)
elif self.cache_mode == "part" and index % world_size == global_rank:
samples_bytes[index] = (ZipReader.read(path), target)
else:
samples_bytes[index] = (path, target)
self.samples = samples_bytes
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (sample, target) where target is class_index of the target class.
"""
path, target = self.samples[index]
sample = self.loader(path)
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
return sample, target
def __len__(self):
return len(self.samples)
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
IMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif']
def pil_loader(path):
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
if isinstance(path, bytes):
img = Image.open(io.BytesIO(path))
elif is_zip_path(path):
data = ZipReader.read(path)
img = Image.open(io.BytesIO(data))
else:
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def accimage_loader(path):
import accimage
try:
return accimage.Image(path)
except IOError:
# Potentially a decoding problem, fall back to PIL.Image
return pil_loader(path)
def default_img_loader(path):
from torchvision import get_image_backend
if get_image_backend() == 'accimage':
return accimage_loader(path)
else:
return pil_loader(path)
class CachedImageFolder(DatasetFolder):
"""A generic data loader where the images are arranged in this way: ::
root/dog/xxx.png
root/dog/xxy.png
root/dog/xxz.png
root/cat/123.png
root/cat/nsdf3.png
root/cat/asd932_.png
Args:
root (string): Root directory path.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
loader (callable, optional): A function to load an image given its path.
Attributes:
imgs (list): List of (image path, class_index) tuples
"""
def __init__(self, root, ann_file='', img_prefix='', transform=None, target_transform=None,
loader=default_img_loader, cache_mode="no"):
super(CachedImageFolder, self).__init__(root, loader, IMG_EXTENSIONS,
ann_file=ann_file, img_prefix=img_prefix,
transform=transform, target_transform=target_transform,
cache_mode=cache_mode)
self.imgs = self.samples
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
path, target = self.samples[index]
image = self.loader(path)
if self.transform is not None:
img = self.transform(image)
else:
img = image
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
| 35.766798 | 115 | 0.590452 |
7957d44ea5c7acf1ec7c8b324c4d11a2df58fc2a | 550 | py | Python | emu/tests/test_wps_wordcounter.py | Ouranosinc/emu | f3b92f44555b9e85f8c62e8e34a8a59d420a1c67 | [
"Apache-2.0"
] | null | null | null | emu/tests/test_wps_wordcounter.py | Ouranosinc/emu | f3b92f44555b9e85f8c62e8e34a8a59d420a1c67 | [
"Apache-2.0"
] | 1 | 2020-11-11T03:21:22.000Z | 2020-11-11T03:21:22.000Z | emu/tests/test_wps_wordcounter.py | Ouranosinc/emu | f3b92f44555b9e85f8c62e8e34a8a59d420a1c67 | [
"Apache-2.0"
] | null | null | null | import pytest
from pywps import Service
from emu.tests.common import client_for, assert_response_success
from emu.processes.wps_wordcounter import WordCounter
@pytest.mark.online
def test_wps_wordcount():
client = client_for(Service(processes=[WordCounter()]))
datainputs = "text={0}".format(
"https://en.wikipedia.org/wiki/Web_Processing_Service")
resp = client.get(
service='wps', request='execute', version='1.0.0',
identifier='wordcounter',
datainputs=datainputs)
assert_response_success(resp)
| 28.947368 | 64 | 0.727273 |
7957d46cc9e244284883b5ead689554d76a74d9f | 2,698 | py | Python | tools/crawl_page/parse.py | willdonetang/taisi360 | 1b7dbc5269bc2ed87f59eab0bfa5c0f85db27072 | [
"Apache-2.0"
] | null | null | null | tools/crawl_page/parse.py | willdonetang/taisi360 | 1b7dbc5269bc2ed87f59eab0bfa5c0f85db27072 | [
"Apache-2.0"
] | null | null | null | tools/crawl_page/parse.py | willdonetang/taisi360 | 1b7dbc5269bc2ed87f59eab0bfa5c0f85db27072 | [
"Apache-2.0"
] | null | null | null | #!/bin/env
# -*- coding: utf-8 -*-
from langconv import *
from bs4 import BeautifulSoup
import re
import os
import json
# 输入Unicode编辑,输出Unicode
def big2simple(line):
# 转换繁体到简体
line = Converter('zh-hans').convert(line)
return line
def save_img_url(img_url, num):
img_url_file = open("img_url.txt", "a")
line = num + "\t" + img_url + "\n"
img_url_file.write(line.encode("utf-8"))
img_url_file.close()
# path文件路径,num是代表源文件编号
def parse_html(path, num):
in_file = open(path, "r")
if not in_file:
print path, "open error"
return None
soup = BeautifulSoup(open(path, "r"))
result = {}
result['id'] = num
# keyword
keyword = None
node = soup.find_all("meta", attrs={"property": "article:tag"})
key = ""
if node:
for no in node:
key = key + no['content']+","
result['keyword'] = big2simple(key)
print result['keyword']
# title
title = None
node = soup.find("h1", attrs={"class": "entry-title"})
if node:
result['title'] = big2simple(node.get_text())
# category
category = None
node = soup.find_all("a", attrs={"rel": "category"})
if node:
print node
result['category'] = big2simple(node[-1].get_text())
# node = node.find('a')
# if node:
# 正文
content = None
node = soup.find("div", class_="entry-content")
if node:
for i in node.find_all("script"):
i.decompose()
for i in node.find_all("iframe"):
i.decompose()
tmp = node.find("a", class_="twitter-share-button")
if tmp:
tmp.decompose()
content = big2simple(unicode(node))
result['content'] = content
# 正文图片
img_list = node.find_all("img")
result['img_list'] = []
for img in img_list:
img_url = img['src']
result['img_list'].append(img_url)
# save_img_url(img_url, num)
return result
def save_result(file_path, ret):
file_out = open(file_path, "a")
ret = json.dumps(ret)
file_out.write(ret)
file_out.write("\n")
file_out.close()
def load_files_list(path):
file_list = []
for i in open(path):
line = i.strip()
if not line:
continue
file_list.append(line)
print "load_files_list ok, count=%d" % len(file_list)
return file_list
if __name__ == "__main__":
dir_path = r'pages'
file_num_list = load_files_list("files.txt")
for file_num in file_num_list:
path = os.path.join(dir_path, file_num)
print path
ret = parse_html(path, file_num)
save_result("ret.txt", ret)
| 24.527273 | 67 | 0.577094 |
7957d50969335a074a1d08c82d184c3fd9655994 | 8,607 | py | Python | Classifier.py | marcus-deans/Animal10-VGG-Classification | 84e3ea0dbfda277ff907b370f8659e7be39aed02 | [
"MIT"
] | 1 | 2021-12-30T17:37:41.000Z | 2021-12-30T17:37:41.000Z | Classifier.py | marcus-deans/Animal10-VGG-Classification | 84e3ea0dbfda277ff907b370f8659e7be39aed02 | [
"MIT"
] | null | null | null | Classifier.py | marcus-deans/Animal10-VGG-Classification | 84e3ea0dbfda277ff907b370f8659e7be39aed02 | [
"MIT"
] | 1 | 2021-04-29T14:06:48.000Z | 2021-04-29T14:06:48.000Z |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import tensorflow as tf
import keras
from keras import layers, applications, optimizers
from keras.layers import Input, Dense, Activation, MaxPool2D, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, Dropout
from keras.models import Sequential, Model, load_model
from keras.callbacks import ModelCheckpoint
from keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split
from keras.callbacks import ReduceLROnPlateau
from sklearn.utils import class_weight, shuffle
import os
import random
import cv2
import matplotlib.pyplot as plt
from tqdm.notebook import tqdm
import requests
from PIL import Image
from io import BytesIO
#The Original Kaggle dataset is in Italian, so to more easily interpret, we convert Italian labels to English
translate = {"cane": "Dog", "cavallo": "Horse", "elefante": "Elephant", "farfalla": "Butterfly", "gallina": "Chicken", "gatto": "Cat", "mucca": "Cow", "pecora": "Sheep", "scoiattolo": "Squirrel", "ragno": "Spider"}
#Directly for the animal image classification database from Kaggle
foldernames = os.listdir('/kaggle/input/animals10/raw-img/')
files, files2, target, target2 = [], [], [], []
#Iterate through the database and retrieve our relevant files
for i, folder in enumerate(foldernames):
filenames = os.listdir("/kaggle/input/animals10/raw-img/" + folder);
count = 0
#Due to the specific nature of the database being used, there are 1446 images of a specific class (others are higher)
#Hence use a maximum of 1400 images from a specific classes for consistency of data as well as brevity
for file in filenames:
if count < 1400:
files.append("/kaggle/input/animals10/raw-img/" + folder + "/" + file)
target.append(translate[folder])
else:
files2.append("/kaggle/input/animals10/raw-img/" + folder + "/" + file)
target2.append(translate[folder])
count += 1
#Create dataframes to read the images
df = pd.DataFrame({'Filepath':files, 'Target':target})
#Split into training, dev, and test sets with a 60/20/20 split respsectively
train, dev, test = np.split(df.sample(frac=1), [int(.6*len(df)), int(.8*len(df))])
#Perform data augmentation which will artifically grow dataset, allowing CNN to better generalize learning
#Images were sheared, rotated, zoomed, and shifted by up to 20% of the relevant factor (shear, degrees, zoom, width, height respectively)
#Note that horizontal but not vertical flips were employed to simulate real-world conditions
#Simultaneously normalize by dividing by 255 within the image data generator
augdata = ImageDataGenerator(rescale=1./255,
shear_range = 0.2,
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=True, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=30, # randomly rotate images in the range (degrees, 0 to 180)
zoom_range = 0.2, # Randomly zoom image
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
#Create the test set which will not have augmented data (want real performance not on data of identical origin)
augdata_test = ImageDataGenerator(rescale=1./255, samplewise_center = True)
#Create image sets for the train, dev, and test sets. The training set has augmented data whereas the dev and test sets do not
#A standard image size of 224 is used for the VGG16 CNN. Use lanczos interpolation in frequency domain to reduce potential aliasing from resizing
train_flow = augdata.flow_from_dataframe(train, x_col = 'Filepath', y_col = 'Target', target_size=(224, 224), interpolation = 'lanczos', validate_filenames = False)
dev_flow = augdata_test.flow_from_dataframe(dev, x_col = 'Filepath', y_col = 'Target', target_size=(224, 224), interpolation = 'lanczos', validate_filenames = False)
test_flow = augdata_test.flow_from_dataframe(test, x_col = 'Filepath', y_col = 'Target', target_size=(224, 224), interpolation = 'lanczos', validate_filenames = False)
#Reducing learning rate of CNN during plateaus to continue progress. Learning rate is projected to be minute at end of CNN and hence min_lr=1e-8
learning_rate_reduction = ReduceLROnPlateau(monitor='val_loss', patience = 1, verbose=1,factor=0.2, min_delta=0.0001, min_lr=0.00000001)
#Use transfer learning with VGG16 CNN with previously determined image classification weights for faster learning and high accuracy
vgg16_model = applications.VGG16(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
#Create a final classification neural network on the VGG16 output in order to determine which of the 10 animals the image is, dropout weights are standard
classification_model = Sequential() #Sequential model for ease of implementation
classification_model.add(Flatten(input_shape=vgg16_model.output_shape[1:])) #Simplify VGG16 output to 1D vector
classification_model.add(Dropout(0.1)) #Dropout layer to reduce parameters
classification_model.add(Dense(256, activation='relu')) #Relu function to clean up VGG16 output
classification_model.add(Dropout(0.1)) #Dropout layer to reduce parameters
classification_model.add(Dense(10, activation = 'softmax')) #Softmax for one of 10 possible classifications
#We create the final model using the Model command, taking the input as that of VGG16 and the output as the classification NN with input as output of VGG16
model = Model(inputs=vgg16_model.inputs, outputs=classification_model(vgg16_model.output))
#Create the model using standard, robust gradient descent optimizer having added momentum, and using categorical_crossentropy (instead of sparse since 2D target)
model.compile(loss = 'categorical_crossentropy', optimizer = optimizers.SGD(lr=1e-3, momentum=0.9), metrics = ['accuracy'])
model.summary() #Print model configuration for examination
#Fit CNN to training set and perform validation with the dev set, using previously established learning rate reduction parameters
history = model.fit_generator(train_flow, epochs = 12, validation_data = dev_flow, callbacks=[ModelCheckpoint('VGG16.model', monitor='val_acc'), learning_rate_reduction])
#Create plots of the training performance using the monitors that were established for learning rate and the model fitting
epochs = range(1, len(history.history['accuracy'])+1) #compute total number of epochs
train_loss_values = history.history['loss'] #Loss values for training set
dev_loss_values = history.history['val_loss'] #Loss values for dev set
train_accuracy_values = history.history['accuracy'] #Accuracy values for training set
dev_accuracy_values = history.history['val_accuracy'] #Accuracy values for dev set
#Create two side-by-side subplots in order to visualize the model's training performance
f, ax = plt.subplots(nrows=1, ncols = 2, figsize=(20,5))
#Create first subplot for training and validation loss
ax[0].plot(epochs, train_loss_values, marker='v', color = 'magenta', label='Training Loss')
ax[0].plot(epochs, dev_loss_values, marker='v', color = 'green', label='Validation Loss')
ax[0].set_title('Training & Validation Loss')
ax[0].set_xlabel('Epochs')
ax[0].set_ylabel('Loss')
ax[0].legend(loc='best')
ax[0].grid(True)
#Create second subplot for training and validation accuracy
ax[1].plot(epochs, train_accuracy_values, marker='^', color = 'magenta', label='Training Accuracy')
ax[1].plot(epochs, dev_accuracy_values, marker='^', color = 'green', label='Validation Accuracy')
ax[1].set_title('Training & Validation Accuracy')
ax[1].set_xlabel('Epochs')
ax[1].set_ylabel('Accuracy')
ax[1].legend(loc='best')
ax[1].grid(True)
#Save the plots for future reference as they will not be immediately shown during program run
f.savefig('AccuracyAndLossPlot.eps', format='eps')
f.savefig('AccuracyAndLossPlot.png', format='png')
#Delete values for clean up memory and for efficiency of program
del epochs, train_loss_values, dev_loss_values, train_accuracy_values, dev_accuracy_values
#Evaluate CNN accuracy on the final test set and print accuracy
score = model.evaluate(test_flow)
print("Test Accuracy ", score[1]*100, "%")
| 59.770833 | 214 | 0.765191 |
7957d6b0fbe90506c98074f57a7b7a5098b84346 | 983 | py | Python | test/test_resources_web_form_authentication.py | kiblik/vm-console-client-python | 038f6d33e8b2654a558326c6eb87f09ee23e0e22 | [
"MIT"
] | 61 | 2018-05-17T05:57:09.000Z | 2022-03-08T13:59:21.000Z | test/test_resources_web_form_authentication.py | kiblik/vm-console-client-python | 038f6d33e8b2654a558326c6eb87f09ee23e0e22 | [
"MIT"
] | 33 | 2018-06-26T16:21:14.000Z | 2022-03-03T20:55:47.000Z | test/test_resources_web_form_authentication.py | kiblik/vm-console-client-python | 038f6d33e8b2654a558326c6eb87f09ee23e0e22 | [
"MIT"
] | 43 | 2018-02-24T05:45:53.000Z | 2022-03-31T22:15:16.000Z | # coding: utf-8
"""
Python InsightVM API Client
OpenAPI spec version: 3
Contact: support@rapid7.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.resources_web_form_authentication import ResourcesWebFormAuthentication # noqa: E501
from swagger_client.rest import ApiException
class TestResourcesWebFormAuthentication(unittest.TestCase):
"""ResourcesWebFormAuthentication unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testResourcesWebFormAuthentication(self):
"""Test ResourcesWebFormAuthentication"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.resources_web_form_authentication.ResourcesWebFormAuthentication() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 25.205128 | 120 | 0.748728 |
7957d7138c48071e39967039cf986988f790d46c | 2,900 | py | Python | momba/engine/time.py | koehlma/momba | 68d6431d2732570696d3c67a9e23006e6e3a7740 | [
"MIT"
] | 12 | 2021-01-18T14:38:32.000Z | 2022-01-17T09:16:52.000Z | momba/engine/time.py | koehlma/momba | 68d6431d2732570696d3c67a9e23006e6e3a7740 | [
"MIT"
] | 3 | 2021-05-16T15:26:34.000Z | 2022-02-21T20:46:55.000Z | momba/engine/time.py | koehlma/momba | 68d6431d2732570696d3c67a9e23006e6e3a7740 | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
#
# Copyright (C) 2021, Saarland University
# Copyright (C) 2021, Maximilian Köhl <koehl@cs.uni-saarland.de>
from __future__ import annotations
import dataclasses as d
import typing as t
import abc
from .. import model
from ._engine import engine as _engine
from . import zones
from .translator import Translation, translate_network
if t.TYPE_CHECKING:
from .explore import Parameters
class InvalidModelType(Exception):
pass
T = t.TypeVar("T")
@d.dataclass(frozen=True)
class CompiledNetwork:
translation: Translation
internal: t.Any
class TimeType(abc.ABC):
"""
Base class for time representations.
"""
@staticmethod
@abc.abstractmethod
def compile(
network: model.Network, *, parameters: Parameters = None
) -> CompiledNetwork:
raise NotImplementedError()
@classmethod
@abc.abstractmethod
def load_valuations(cls: t.Type[T], valuations: t.Any) -> T:
raise NotImplementedError()
class DiscreteTime(TimeType):
"""
A representation of time without continuous-time clocks.
"""
@staticmethod
def compile(
network: model.Network, *, parameters: Parameters = None
) -> CompiledNetwork:
translation = translate_network(network, parameters=parameters)
if not network.ctx.model_type.is_untimed:
raise InvalidModelType(
f"{network.ctx.model_type} is not a discrete time model type"
)
return CompiledNetwork(
translation, _engine.Explorer.new_no_clocks(translation.json_network)
)
@classmethod
def load_valuations(cls, valuations: t.Any) -> DiscreteTime:
return cls()
@d.dataclass(frozen=True)
class GlobalTime(TimeType):
zone: zones.Zone[float]
@staticmethod
def compile(
network: model.Network, *, parameters: Parameters = None
) -> CompiledNetwork:
translation = translate_network(
network, parameters=parameters, global_clock=True
)
return CompiledNetwork(
translation, _engine.Explorer.new_global_time(translation.json_network)
)
@classmethod
def load_valuations(cls, valuations: t.Any) -> GlobalTime:
return cls(zones._wrap_zone(valuations, zones.ZoneF64))
@d.dataclass(frozen=True)
class ZoneF64(TimeType):
zone: zones.Zone[float]
@staticmethod
def compile(
network: model.Network, *, parameters: Parameters = None
) -> CompiledNetwork:
translation = translate_network(
network, parameters=parameters, global_clock=False
)
return CompiledNetwork(
translation, _engine.Explorer.new_global_time(translation.json_network)
)
@classmethod
def load_valuations(cls, valuations: t.Any) -> ZoneF64:
return cls(zones._wrap_zone(valuations, zones.ZoneF64))
| 24.786325 | 83 | 0.674483 |
7957d905f901c651a7ce6d385402c67a6204cf77 | 15,144 | py | Python | tests/chainer_tests/test_reporter.py | dydo0316/test2 | a9982a8b426dd07eb1ec4e7695a7bc546ecc6063 | [
"MIT"
] | null | null | null | tests/chainer_tests/test_reporter.py | dydo0316/test2 | a9982a8b426dd07eb1ec4e7695a7bc546ecc6063 | [
"MIT"
] | 2 | 2018-01-09T23:05:30.000Z | 2018-01-19T01:19:34.000Z | tests/chainer_tests/test_reporter.py | dydo0316/test2 | a9982a8b426dd07eb1ec4e7695a7bc546ecc6063 | [
"MIT"
] | null | null | null | import contextlib
import tempfile
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import configuration
from chainer import functions
from chainer import testing
from chainer.testing import attr
class TestReporter(unittest.TestCase):
def test_empty_reporter(self):
reporter = chainer.Reporter()
self.assertEqual(reporter.observation, {})
def test_enter_exit(self):
reporter1 = chainer.Reporter()
reporter2 = chainer.Reporter()
with reporter1:
self.assertIs(chainer.get_current_reporter(), reporter1)
with reporter2:
self.assertIs(chainer.get_current_reporter(), reporter2)
self.assertIs(chainer.get_current_reporter(), reporter1)
def test_scope(self):
reporter1 = chainer.Reporter()
reporter2 = chainer.Reporter()
with reporter1:
observation = {}
with reporter2.scope(observation):
self.assertIs(chainer.get_current_reporter(), reporter2)
self.assertIs(reporter2.observation, observation)
self.assertIs(chainer.get_current_reporter(), reporter1)
self.assertIsNot(reporter2.observation, observation)
def test_add_observer(self):
reporter = chainer.Reporter()
observer = object()
reporter.add_observer('o', observer)
reporter.report({'x': 1}, observer)
observation = reporter.observation
self.assertIn('o/x', observation)
self.assertEqual(observation['o/x'], 1)
self.assertNotIn('x', observation)
def test_add_observers(self):
reporter = chainer.Reporter()
observer1 = object()
reporter.add_observer('o1', observer1)
observer2 = object()
reporter.add_observer('o2', observer2)
reporter.report({'x': 1}, observer1)
reporter.report({'y': 2}, observer2)
observation = reporter.observation
self.assertIn('o1/x', observation)
self.assertEqual(observation['o1/x'], 1)
self.assertIn('o2/y', observation)
self.assertEqual(observation['o2/y'], 2)
self.assertNotIn('x', observation)
self.assertNotIn('y', observation)
self.assertNotIn('o1/y', observation)
self.assertNotIn('o2/x', observation)
def test_report_without_observer(self):
reporter = chainer.Reporter()
reporter.report({'x': 1})
observation = reporter.observation
self.assertIn('x', observation)
self.assertEqual(observation['x'], 1)
class TestKeepGraphOnReportFlag(unittest.TestCase):
@contextlib.contextmanager
def _scope(self, flag):
# If flag is None, return the nop context.
# Otherwise, return the context in which
# keep_graph_on_report is set temporarily.
old = configuration.config.keep_graph_on_report
if flag is not None:
configuration.config.keep_graph_on_report = flag
try:
yield
finally:
configuration.config.keep_graph_on_report = old
def test_keep_graph_default(self):
x = chainer.Variable(numpy.array([1], numpy.float32))
y = functions.sigmoid(x)
reporter = chainer.Reporter()
with self._scope(None):
reporter.report({'y': y})
self.assertIsNone(reporter.observation['y'].creator)
def test_keep_graph(self):
x = chainer.Variable(numpy.array([1], numpy.float32))
y = functions.sigmoid(x)
reporter = chainer.Reporter()
with self._scope(True):
reporter.report({'y': y})
assert reporter.observation['y'].creator is not None
def test_not_keep_graph(self):
x = chainer.Variable(numpy.array([1], numpy.float32))
y = functions.sigmoid(x)
reporter = chainer.Reporter()
with self._scope(False):
reporter.report({'y': y})
self.assertIsNone(reporter.observation['y'].creator)
class TestReport(unittest.TestCase):
def test_report_without_reporter(self):
observer = object()
chainer.report({'x': 1}, observer)
def test_report(self):
reporter = chainer.Reporter()
with reporter:
chainer.report({'x': 1})
observation = reporter.observation
self.assertIn('x', observation)
self.assertEqual(observation['x'], 1)
def test_report_with_observer(self):
reporter = chainer.Reporter()
observer = object()
reporter.add_observer('o', observer)
with reporter:
chainer.report({'x': 1}, observer)
observation = reporter.observation
self.assertIn('o/x', observation)
self.assertEqual(observation['o/x'], 1)
def test_report_with_unregistered_observer(self):
reporter = chainer.Reporter()
observer = object()
with reporter:
with self.assertRaises(KeyError):
chainer.report({'x': 1}, observer)
def test_report_scope(self):
reporter = chainer.Reporter()
observation = {}
with reporter:
with chainer.report_scope(observation):
chainer.report({'x': 1})
self.assertIn('x', observation)
self.assertEqual(observation['x'], 1)
self.assertNotIn('x', reporter.observation)
class TestSummary(unittest.TestCase):
def setUp(self):
self.summary = chainer.reporter.Summary()
def test_numpy(self):
self.summary.add(numpy.array(1, 'f'))
self.summary.add(numpy.array(-2, 'f'))
mean = self.summary.compute_mean()
testing.assert_allclose(mean, numpy.array(-0.5, 'f'))
mean, std = self.summary.make_statistics()
testing.assert_allclose(mean, numpy.array(-0.5, 'f'))
testing.assert_allclose(std, numpy.array(1.5, 'f'))
@attr.gpu
def test_cupy(self):
xp = cuda.cupy
self.summary.add(xp.array(1, 'f'))
self.summary.add(xp.array(-2, 'f'))
mean = self.summary.compute_mean()
testing.assert_allclose(mean, numpy.array(-0.5, 'f'))
mean, std = self.summary.make_statistics()
testing.assert_allclose(mean, numpy.array(-0.5, 'f'))
testing.assert_allclose(std, numpy.array(1.5, 'f'))
def test_int(self):
self.summary.add(1)
self.summary.add(2)
self.summary.add(3)
mean = self.summary.compute_mean()
testing.assert_allclose(mean, 2)
mean, std = self.summary.make_statistics()
testing.assert_allclose(mean, 2)
testing.assert_allclose(std, numpy.sqrt(2 / 3))
def test_float(self):
self.summary.add(1.)
self.summary.add(2.)
self.summary.add(3.)
mean = self.summary.compute_mean()
testing.assert_allclose(mean, 2.)
mean, std = self.summary.make_statistics()
testing.assert_allclose(mean, 2.)
testing.assert_allclose(std, numpy.sqrt(2. / 3.))
def test_weight(self):
self.summary.add(1., 0.5)
self.summary.add(2., numpy.array(0.4))
self.summary.add(3., chainer.Variable(numpy.array(0.3)))
mean = self.summary.compute_mean().array
val = (1 * 0.5 + 2 * 0.4 + 3 * 0.3) / (0.5 + 0.4 + 0.3)
testing.assert_allclose(mean, val)
def test_serialize(self):
self.summary.add(1.)
self.summary.add(2.)
summary = chainer.reporter.Summary()
testing.save_and_load_npz(self.summary, summary)
summary.add(3.)
mean = summary.compute_mean()
testing.assert_allclose(mean, 2.)
mean, std = summary.make_statistics()
testing.assert_allclose(mean, 2.)
testing.assert_allclose(std, numpy.sqrt(2. / 3.))
@attr.gpu
def test_serialize_cupy(self):
xp = cuda.cupy
self.summary.add(xp.array(1, 'f'))
self.summary.add(xp.array(2, 'f'))
summary = chainer.reporter.Summary()
testing.save_and_load_npz(self.summary, summary)
summary.add(xp.array(3, 'f'))
mean = summary.compute_mean()
testing.assert_allclose(mean, 2.)
mean, std = summary.make_statistics()
testing.assert_allclose(mean, 2.)
testing.assert_allclose(std, numpy.sqrt(2. / 3.))
def test_serialize_backward_compat(self):
with tempfile.NamedTemporaryFile(delete=False) as f:
# old version does not save anything
numpy.savez(f, dummy=0)
with testing.assert_warns(UserWarning):
chainer.serializers.load_npz(f.name, self.summary)
self.summary.add(2.)
self.summary.add(3.)
mean = self.summary.compute_mean()
testing.assert_allclose(mean, 2.5)
mean, std = self.summary.make_statistics()
testing.assert_allclose(mean, 2.5)
testing.assert_allclose(std, 0.5)
class TestDictSummary(unittest.TestCase):
def setUp(self):
self.summary = chainer.reporter.DictSummary()
def check(self, summary, data):
mean = summary.compute_mean()
self.assertEqual(set(mean.keys()), set(data.keys()))
for name in data.keys():
m = sum(data[name]) / len(data[name])
testing.assert_allclose(mean[name], m)
stats = summary.make_statistics()
self.assertEqual(
set(stats.keys()),
set(data.keys()).union(name + '.std' for name in data.keys()))
for name in data.keys():
m = sum(data[name]) / len(data[name])
s = numpy.sqrt(
sum(x * x for x in data[name]) / len(data[name]) - m * m)
testing.assert_allclose(stats[name], m)
testing.assert_allclose(stats[name + '.std'], s)
def test(self):
self.summary.add({'numpy': numpy.array(3, 'f'), 'int': 1, 'float': 4.})
self.summary.add({'numpy': numpy.array(1, 'f'), 'int': 5, 'float': 9.})
self.summary.add({'numpy': numpy.array(2, 'f'), 'int': 6, 'float': 5.})
self.summary.add({'numpy': numpy.array(3, 'f'), 'int': 5, 'float': 8.})
self.check(self.summary, {
'numpy': (3., 1., 2., 3.),
'int': (1, 5, 6, 5),
'float': (4., 9., 5., 8.),
})
@attr.gpu
def test_cupy(self):
xp = cuda.cupy
self.summary.add({'cupy': xp.array(3, 'f')})
self.summary.add({'cupy': xp.array(1, 'f')})
self.summary.add({'cupy': xp.array(2, 'f')})
self.summary.add({'cupy': xp.array(3, 'f')})
self.check(self.summary, {'cupy': (3., 1., 2., 3.)})
def test_sparse(self):
self.summary.add({'a': 3., 'b': 1.})
self.summary.add({'a': 1., 'b': 5., 'c': 9.})
self.summary.add({'b': 6.})
self.summary.add({'a': 3., 'b': 5., 'c': 8.})
self.check(self.summary, {
'a': (3., 1., 3.),
'b': (1., 5., 6., 5.),
'c': (9., 8.),
})
def test_weight(self):
self.summary.add({'a': (1., 0.5)})
self.summary.add({'a': (2., numpy.array(0.4))})
self.summary.add({'a': (3., chainer.Variable(numpy.array(0.3)))})
mean = self.summary.compute_mean()
val = (1 * 0.5 + 2 * 0.4 + 3 * 0.3) / (0.5 + 0.4 + 0.3)
testing.assert_allclose(mean['a'], val)
with self.assertRaises(ValueError):
self.summary.add({'a': (4., numpy.array([0.5]))})
with self.assertRaises(ValueError):
self.summary.add({'a': (4., chainer.Variable(numpy.array([0.5])))})
def test_serialize(self):
self.summary.add({'numpy': numpy.array(3, 'f'), 'int': 1, 'float': 4.})
self.summary.add({'numpy': numpy.array(1, 'f'), 'int': 5, 'float': 9.})
self.summary.add({'numpy': numpy.array(2, 'f'), 'int': 6, 'float': 5.})
summary = chainer.reporter.DictSummary()
testing.save_and_load_npz(self.summary, summary)
summary.add({'numpy': numpy.array(3, 'f'), 'int': 5, 'float': 8.})
self.check(summary, {
'numpy': (3., 1., 2., 3.),
'int': (1, 5, 6, 5),
'float': (4., 9., 5., 8.),
})
@attr.gpu
def test_serialize_cupy(self):
xp = cuda.cupy
self.summary.add({'cupy': xp.array(3, 'f')})
self.summary.add({'cupy': xp.array(1, 'f')})
self.summary.add({'cupy': xp.array(2, 'f')})
summary = chainer.reporter.DictSummary()
testing.save_and_load_npz(self.summary, summary)
summary.add({'cupy': xp.array(3, 'f')})
self.check(summary, {'cupy': (3., 1., 2., 3.)})
def test_serialize_names_with_slash(self):
self.summary.add({'a/b': 3., '/a/b': 1., 'a/b/': 4.})
self.summary.add({'a/b': 1., '/a/b': 5., 'a/b/': 9.})
self.summary.add({'a/b': 2., '/a/b': 6., 'a/b/': 5.})
summary = chainer.reporter.DictSummary()
testing.save_and_load_npz(self.summary, summary)
summary.add({'a/b': 3., '/a/b': 5., 'a/b/': 8.})
self.check(summary, {
'a/b': (3., 1., 2., 3.),
'/a/b': (1., 5., 6., 5.),
'a/b/': (4., 9., 5., 8.),
})
def test_serialize_overwrite_different_names(self):
self.summary.add({'a': 3., 'b': 1.})
self.summary.add({'a': 1., 'b': 5.})
summary = chainer.reporter.DictSummary()
summary.add({'c': 5.})
testing.save_and_load_npz(self.summary, summary)
self.check(summary, {
'a': (3., 1.),
'b': (1., 5.),
})
def test_serialize_overwrite_rollback(self):
self.summary.add({'a': 3., 'b': 1.})
self.summary.add({'a': 1., 'b': 5.})
with tempfile.NamedTemporaryFile(delete=False) as f:
chainer.serializers.save_npz(f.name, self.summary)
self.summary.add({'a': 2., 'b': 6., 'c': 5.})
self.summary.add({'a': 3., 'b': 4., 'c': 6.})
chainer.serializers.load_npz(f.name, self.summary)
self.summary.add({'a': 3., 'b': 5., 'c': 8.})
self.check(self.summary, {
'a': (3., 1., 3.),
'b': (1., 5., 5.),
'c': (8.,),
})
def test_serialize_backward_compat(self):
with tempfile.NamedTemporaryFile(delete=False) as f:
# old version does not save anything
numpy.savez(f, dummy=0)
with testing.assert_warns(UserWarning):
chainer.serializers.load_npz(f.name, self.summary)
def test_serialize_backward_compat_overwrite(self):
self.summary.add({'a': 3., 'b': 1., 'c': 4.})
self.summary.add({'a': 1., 'b': 5., 'c': 9.})
with tempfile.NamedTemporaryFile(delete=False) as f:
# old version does not save anything
numpy.savez(f, dummy=0)
with testing.assert_warns(UserWarning):
chainer.serializers.load_npz(f.name, self.summary)
self.summary.add({'a': 9., 'b': 2.})
self.summary.add({'a': 6., 'b': 5.})
self.check(self.summary, {
'a': (9., 6.),
'b': (2., 5.),
})
testing.run_module(__name__, __file__)
| 33.578714 | 79 | 0.573098 |
7957da5fd4b96ccfe2b3dc87c7af58ca04ac649b | 488 | py | Python | infodesk/migrations/0003_auto_20201103_1816.py | richardkefa/myneighbourhood | 2e07dd15a0eb717788f6e6426e06e0dcc5f639b0 | [
"MIT"
] | null | null | null | infodesk/migrations/0003_auto_20201103_1816.py | richardkefa/myneighbourhood | 2e07dd15a0eb717788f6e6426e06e0dcc5f639b0 | [
"MIT"
] | null | null | null | infodesk/migrations/0003_auto_20201103_1816.py | richardkefa/myneighbourhood | 2e07dd15a0eb717788f6e6426e06e0dcc5f639b0 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.2 on 2020-11-03 15:16
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('infodesk', '0002_profiles_user'),
]
operations = [
migrations.AlterField(
model_name='user',
name='hood',
field=models.OneToOneField(default='1', on_delete=django.db.models.deletion.CASCADE, to='infodesk.neighbourhoods'),
),
]
| 24.4 | 127 | 0.641393 |
7957db23bf0389a90dc666573a959676ca69d0b2 | 4,017 | py | Python | spockbot/mcp/mcpacket.py | SpockBotMC/SpockBot | f89911551f18357720034fbaa52837a0d09f66ea | [
"MIT"
] | 171 | 2015-02-04T00:24:15.000Z | 2022-03-06T10:23:47.000Z | spockbot/mcp/mcpacket.py | SpockBotMC/SpockBot | f89911551f18357720034fbaa52837a0d09f66ea | [
"MIT"
] | 142 | 2015-02-04T02:17:51.000Z | 2021-11-07T22:37:27.000Z | spockbot/mcp/mcpacket.py | SpockBotMC/SpockBot | f89911551f18357720034fbaa52837a0d09f66ea | [
"MIT"
] | 60 | 2015-02-06T01:19:29.000Z | 2022-03-18T18:01:42.000Z | try:
basestring
except NameError:
basestring = str
import copy
import logging
import zlib
from time import gmtime, strftime
from spockbot.mcp import datautils, proto
from spockbot.mcp.bbuff import BoundBuffer, BufferUnderflowException
from spockbot.mcp.extensions import hashed_extensions
from spockbot.mcp.proto import MC_VARINT
logger = logging.getLogger('spockbot')
class PacketDecodeFailure(Exception):
def __init__(self, packet, pbuff, underflow=False):
self.packet = packet
self.pbuff = pbuff
self.underflow = underflow
class Packet(object):
def __init__(self,
ident=[proto.HANDSHAKE_STATE, proto.CLIENT_TO_SERVER, 0x00],
data=None
):
if isinstance(ident, basestring):
ident = proto.packet_str2ident[ident]
self.__ident = list(ident)
# Quick hack to fake default ident
if len(self.__ident) == 2:
self.__ident.append(0x00)
self.ident = tuple(self.__ident)
self.str_ident = proto.packet_ident2str[self.ident]
self.data = data if data else {}
def clone(self):
return Packet(self.ident, copy.deepcopy(self.data))
def new_ident(self, ident):
self.__init__(ident, self.data)
def decode(self, bbuff, proto_comp_state):
self.data = {}
packet_length = datautils.unpack(MC_VARINT, bbuff)
packet_data = bbuff.recv(packet_length)
pbuff = BoundBuffer(packet_data)
if proto_comp_state == proto.PROTO_COMP_ON:
body_length = datautils.unpack(MC_VARINT, pbuff)
if body_length:
body_data = zlib.decompress(pbuff.flush(), zlib.MAX_WBITS)
pbuff.write(body_data)
pbuff.save()
try:
# Ident
self.__ident[2] = datautils.unpack(MC_VARINT, pbuff)
self.ident = tuple(self.__ident)
self.str_ident = proto.packet_ident2str[self.ident]
# Payload
for dtype, name in proto.hashed_structs[self.ident]:
self.data[name] = datautils.unpack(dtype, pbuff)
# Extension
if self.ident in hashed_extensions:
hashed_extensions[self.ident].decode_extra(self, pbuff)
if pbuff:
raise PacketDecodeFailure(self, pbuff)
except BufferUnderflowException:
raise PacketDecodeFailure(self, pbuff, True)
return self
def encode(self, proto_comp_state, proto_comp_threshold, comp_level=6):
# Ident
o = datautils.pack(MC_VARINT, self.ident[2])
# Payload
for dtype, name in proto.hashed_structs[self.ident]:
o += datautils.pack(dtype, self.data[name])
# Extension
if self.ident in hashed_extensions:
o += hashed_extensions[self.ident].encode_extra(self)
if proto_comp_state == proto.PROTO_COMP_ON:
uncompressed_len = len(o)
if uncompressed_len < proto_comp_threshold:
header = datautils.pack(MC_VARINT, uncompressed_len + 1)
header += datautils.pack(MC_VARINT, 0)
else:
o = zlib.compress(o, comp_level)
ulen_varint = datautils.pack(MC_VARINT, uncompressed_len)
header = datautils.pack(MC_VARINT,
len(o) + len(ulen_varint))
header += ulen_varint
return header + o
elif proto_comp_state == proto.PROTO_COMP_OFF:
return datautils.pack(MC_VARINT, len(o)) + o
else:
return None
def __repr__(self):
s = ('<<<', '>>>')[self.ident[1]]
f = "[%s] %s (0x%02X, 0x%02X): %-" + str(
max([len(i) for i in proto.hashed_names.values()]) + 1) + "s%s"
return f % (
strftime("%H:%M:%S", gmtime()), s, self.ident[0], self.ident[2],
proto.hashed_names[self.ident],
str(self.data)
)
| 35.548673 | 77 | 0.59771 |
7957ddd48f597afda38daa1b553440df15930cb1 | 345 | py | Python | .history/my_classes/FunctionParameters/default_values_20210702195552.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | .history/my_classes/FunctionParameters/default_values_20210702195552.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | .history/my_classes/FunctionParameters/default_values_20210702195552.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | """[Default values]
What happens at run time...
When modules are loaded: All the code is executed immediately.
Module Code
a = 10 the interger object 10 is created and a references it.
def func(a): the function object is created, and func references it.
print(a)
func
""" | 24.642857 | 79 | 0.591304 |
7957de4df1581483516658db0927a49be0f1e6a4 | 25 | py | Python | Week 1: Integers, I-O, simple string operations/06.py | MLunov/Python-programming-basics-HSE | 7df8bba105db84d6b932c454fdc39193a648254e | [
"MIT"
] | null | null | null | Week 1: Integers, I-O, simple string operations/06.py | MLunov/Python-programming-basics-HSE | 7df8bba105db84d6b932c454fdc39193a648254e | [
"MIT"
] | null | null | null | Week 1: Integers, I-O, simple string operations/06.py | MLunov/Python-programming-basics-HSE | 7df8bba105db84d6b932c454fdc39193a648254e | [
"MIT"
] | null | null | null | print(int(input()) % 10)
| 12.5 | 24 | 0.6 |
7957de945d11b8d54044a2e93884cd29db7b2956 | 26,456 | py | Python | corehq/apps/couch_sql_migration/tests/test_casediff.py | tstalka/commcare-hq | 902412b0f97ba0daac173fe284f3adc4c01bcd76 | [
"BSD-3-Clause"
] | null | null | null | corehq/apps/couch_sql_migration/tests/test_casediff.py | tstalka/commcare-hq | 902412b0f97ba0daac173fe284f3adc4c01bcd76 | [
"BSD-3-Clause"
] | null | null | null | corehq/apps/couch_sql_migration/tests/test_casediff.py | tstalka/commcare-hq | 902412b0f97ba0daac173fe284f3adc4c01bcd76 | [
"BSD-3-Clause"
] | null | null | null | import logging
import os
from collections import defaultdict
from contextlib import contextmanager
from copy import deepcopy
from glob import glob
from inspect import signature
from signal import SIGINT
from django.test import SimpleTestCase
import attr
import gevent
from gevent.event import Event
from gevent.queue import Queue
from mock import patch
from testil import Config, tempdir
from corehq.apps.tzmigration.timezonemigration import FormJsonDiff
from corehq.form_processor.parsers.ledgers.helpers import UniqueLedgerReference
from .. import casediff as mod
from ..statedb import StateDB, init_state_db
log = logging.getLogger(__name__)
@patch.object(mod.CaseDiffQueue, "BATCH_SIZE", 2)
@patch.object(mod.BatchProcessor, "MAX_RETRIES", 0)
@patch.object(gevent.get_hub(), "SYSTEM_ERROR", BaseException)
class TestCaseDiffQueue(SimpleTestCase):
def setUp(self):
super(TestCaseDiffQueue, self).setUp()
self.patches = [
patch.object(mod, "diff_cases", self.diff_cases),
patch(
"corehq.form_processor.backends.couch.dbaccessors.CaseAccessorCouch.get_cases",
self.get_cases,
),
patch.object(mod, "get_stock_forms_by_case_id", self.get_stock_forms),
]
for patcher in self.patches:
patcher.start()
self.statedb = StateDB.init(":memory:") # in-memory db for test speed
self.cases = {}
self.processed_forms = defaultdict(set)
self.stock_forms = defaultdict(set)
self.diffed = defaultdict(int)
def tearDown(self):
for patcher in self.patches:
patcher.stop()
self.statedb.close()
super(TestCaseDiffQueue, self).tearDown()
def test_case_diff(self):
self.add_cases("c", "fx")
with self.queue() as queue:
queue.update({"c"}, "fx")
self.assertDiffed("c")
def test_diff_case_without_forms(self):
self.add_cases("cx")
with self.queue() as queue:
queue.update({"cx"}, "fx")
self.assertDiffed("cx")
def test_case_with_unprocessed_form(self):
self.add_cases("c", "f0 f1")
with self.queue() as queue:
queue.update({"c"}, "f0")
self.assertDiffed("c")
def test_case_with_null_form_update(self):
self.add_cases("cx", "fx")
with self.queue() as queue:
queue.update({"cx"}, None)
self.assertDiffed("cx")
def test_diff_batching(self):
self.add_cases("a b c d e", "fx")
batch_size = mod.CaseDiffQueue.BATCH_SIZE
assert batch_size < 3, batch_size
with self.queue() as queue:
queue.update({"a", "b", "c", "d", "e"}, "fx")
self.assertLess(len(queue.pending_cases), batch_size)
self.assertLess(len(queue.cases_to_diff), batch_size)
self.assertDiffed("a b c d e")
def test_get_cases_failure(self):
self.add_cases("a b c", "f1")
self.add_cases("d", "f2")
# simulate a single call to couch failing
with self.queue() as queue, self.get_cases_failure():
queue.update({"a", "b", "c"}, "f1")
queue.flush()
with self.queue() as queue:
queue.update({"d"}, "f2")
self.assertDiffed("a b c d")
def test_resume_after_couch_down(self):
self.add_cases("a b c", "f1")
self.add_cases("d", "f2")
# simulate couch down all the way through queue __exit__
with self.get_cases_failure(), self.queue() as queue:
queue.update({"a", "b", "c"}, "f1")
with self.queue() as queue:
queue.update({"d"}, "f2")
self.assertDiffed("a b c d")
def test_num_diffed_cases_on_resume(self):
self.add_cases("a b c", "f1")
self.add_cases("d", "f2")
# simulate a single call to couch failing
with self.queue() as queue:
queue.update({"a", "b", "c"}, "f1")
self.assertEqual(queue.num_diffed_cases, 3)
with self.queue() as queue:
queue.update({"d"}, "f2")
self.assertEqual(queue.num_diffed_cases, 4)
def test_diff_cases_failure(self):
self.add_cases("a", "f1")
self.add_cases("b", "f2")
with self.queue() as queue, self.diff_cases_failure():
queue.update({"a"}, "f1")
queue.flush()
with self.queue() as queue:
queue.update({"b"}, "f2")
self.assertDiffed("a b")
def test_stop_with_cases_to_diff(self):
self.add_cases("a", "f1")
with self.assertRaises(Error), self.queue() as queue:
# HACK mutate queue internal state
# currently there is no easier way to stop non-empty cases_to_diff
queue.cases_to_diff["a"] = 1
raise Error("do not process_remaining_diffs")
self.assertTrue(queue.cases_to_diff)
with self.queue() as queue:
pass
self.assertDiffed("a")
def test_resume_after_error_in_process_remaining_diffs(self):
self.add_cases("a", "f1")
self.add_cases("b", "f2")
with self.assertRaises(Error), self.queue() as queue:
mock = patch.object(queue, "process_remaining_diffs").start()
mock.side_effect = Error("cannot process remaining diffs")
queue.update({"a"}, "f1")
queue.pool.kill() # simulate end process
with self.queue() as queue:
queue.update({"b"}, "f2")
self.assertDiffed("a b")
def test_defer_diff_until_all_forms_are_processed(self):
self.add_cases("a b", "f0")
self.add_cases("c d", "f1")
self.add_cases("b d e", "f2")
with self.queue() as queue:
queue.update({"a", "b"}, "f0")
queue.update({"c", "d"}, "f1")
queue.flush(complete=False)
self.assertDiffed("a c")
queue.update({"b", "d", "e"}, "f2")
self.assertDiffed("a b c d e")
def test_unexpected_case_update_scenario_1(self):
self.add_cases("a b", "f0")
with self.queue() as queue:
queue.update({"a", "b"}, "f0")
queue.flush(complete=False)
self.assertDiffed("a b")
queue.update({"a"}, "f1") # unexpected update
self.assertDiffed({"a": 2, "b": 1})
def test_unexpected_case_update_scenario_2(self):
self.add_cases("a", "f0")
self.add_cases("b", "f1")
with self.queue() as queue:
queue.update({"a", "b"}, "f0") # unexpected update first time b is seen
queue.flush(complete=False)
self.assertDiffed("a b")
queue.update({"b"}, "f1")
self.assertDiffed({"a": 1, "b": 2})
def test_missing_couch_case(self):
self.add_cases("found", "form")
with self.queue() as queue:
queue.update({"miss", "found"}, "form")
self.assertDiffed("found")
missing = queue.statedb.get_missing_doc_ids("CommCareCase-couch")
self.assertEqual(missing, {"miss"})
def test_case_action_with_null_xform_id(self):
self.add_cases("a", actions=[FakeAction(None), FakeAction("f0")])
self.add_cases("b c", "f1")
with self.queue() as queue:
queue.update({"a"}, "f0")
queue.update(["b", "c"], "f1")
queue.flush(complete=False)
self.assertDiffed("a b c")
def test_case_with_stock_forms(self):
self.add_cases("a", "f0", stock_forms="f1")
self.add_cases("b c", "f2")
with self.queue() as queue:
queue.update({"a"}, "f0")
queue.update(["b", "c"], "f2")
queue.flush(complete=False)
self.assertDiffed("b c")
queue.update({"a"}, "f1")
queue.flush(complete=False)
self.assertDiffed("a b c")
def test_case_with_many_forms(self):
self.add_cases("a", ["f%s" % n for n in range(25)])
self.add_cases("b c d", "f2")
with self.queue() as queue:
queue.update({"a"}, "f0")
queue.update(["b", "c", "d"], "f2")
queue.flush(complete=False)
self.assertDiffed("b c d")
self.assertNotIn("a", queue.cases)
for n in range(1, 25):
queue.update({"a"}, "f%s" % n)
queue.flush(complete=False)
self.assertDiffed("a b c d")
def test_resume_on_case_with_many_forms(self):
self.add_cases("a", ["f%s" % n for n in range(25)])
self.add_cases("b c d", "f2")
with self.assertRaises(Error), self.queue() as queue:
queue.update({"a"}, "f0")
queue.update(["b", "c", "d"], "f2")
queue.flush(complete=False)
mock = patch.object(queue, "process_remaining_diffs").start()
mock.side_effect = Error("cannot process remaining diffs")
self.assertDiffed("b c d")
queue.pool.kill() # simulate end process
with self.queue() as queue:
queue.flush(complete=False)
self.assertNotIn("a", queue.cases)
for n in range(1, 25):
queue.update({"a"}, "f%s" % n)
queue.flush(complete=False)
self.assertDiffed("a b c d")
def test_cases_cache_max_size(self):
self.add_cases("a b c d e f", ["f0", "f1"])
patch_max_cases = patch.object(mod.CaseDiffQueue, "MAX_MEMORIZED_CASES", 4)
with patch_max_cases, self.queue() as queue:
queue.update({"a", "b", "c", "d", "e", "f"}, "f0")
queue.flush(complete=False)
self.assertEqual(len(queue.cases), 4, queue.cases)
self.assertDiffed([])
queue.update({"a", "b", "c", "d", "e", "f"}, "f1")
self.assertDiffed("a b c d e f")
def test_cases_lru_cache(self):
# forms fa-ff update corresponding cases
for case_id in "abcdef":
self.add_cases(case_id, "f%s" % case_id)
# forms f1-f5 update case a
for i in range(1, 6):
self.add_cases("a", "f%s" % i)
self.add_cases("a b c d e f", "fx")
patch_max_cases = patch.object(mod.CaseDiffQueue, "MAX_MEMORIZED_CASES", 4)
with patch_max_cases, self.queue() as queue:
for i, case_id in enumerate("abcdef"):
queue.update(case_id, "f%s" % case_id)
if i > 0:
# keep "a" fresh in the LRU cache
queue.update("a", "f%s" % i)
queue.flush(complete=False)
self.assertIn("a", queue.cases)
self.assertNotIn("b", queue.cases)
self.assertNotIn("c", queue.cases)
self.assertDiffed([])
queue.update({"a", "b", "c", "d", "e", "f"}, "fx")
self.assertDiffed("a b c d e f")
def test_case_with_many_unprocessed_forms(self):
self.add_cases("a", ["f%s" % n for n in range(25)])
self.add_cases("b c d", "f2")
with self.queue() as queue:
queue.update({"a"}, "f0")
queue.update(["b", "c", "d"], "f2")
self.assertDiffed("a b c d")
def test_case_with_new_forms_since_first_seen(self):
self.add_cases("a b", "f0")
self.add_cases("a b c d", "f1")
self.add_cases("e f g h", "f2")
with self.queue() as queue:
queue.update({"a", "b"}, "f0")
queue.flush(complete=False)
self.assertDiffed([])
self.add_cases("b", "fx")
queue.update(["a", "b", "c", "d"], "f1")
flush(queue.pool)
flush(queue.diff_pool)
self.assertDiffed("a c d")
queue.update(["b"], "fx")
queue.update(["e", "f", "g", "h"], "f2")
flush(queue.pool)
flush(queue.diff_pool)
self.assertDiffed("a b c d e")
self.assertDiffed("a b c d e f g h")
def test_status_logger(self):
event = Event()
with patch.object(mod, "log_status") as log_status:
log_status.side_effect = lambda x: event.set()
with mod.CaseDiffQueue(self.statedb, status_interval=0.00001):
self.assertTrue(event.wait(timeout=5), "queue.log_status() not called")
self.assertGreater(log_status.call_count, 0)
@contextmanager
def queue(self):
log.info("init CaseDiffQueue")
with mod.CaseDiffQueue(self.statedb) as queue:
try:
yield queue
except Exception as err:
log.error("%s: %s", type(err).__name__, err)
raise
def add_cases(self, case_ids, xform_ids=(), actions=(), stock_forms=()):
"""Add cases with updating form ids
`case_ids` and `form_ids` can be either a string (space-
delimited ids) or a sequence of strings (ids).
"""
if isinstance(case_ids, str):
case_ids = case_ids.split()
if isinstance(xform_ids, str):
xform_ids = xform_ids.split()
if isinstance(stock_forms, str):
stock_forms = stock_forms.split()
for case_id in case_ids:
if case_id in self.cases:
case = self.cases[case_id]
else:
case = self.cases[case_id] = FakeCase(case_id)
for fid in xform_ids:
assert fid not in case.xform_ids, (fid, case)
case.xform_ids.append(fid)
case.actions.extend(actions)
self.stock_forms[case_id].update(stock_forms)
def get_cases(self, case_ids):
def get(case_id):
try:
case = self.cases[case_id]
log.info("get %s", case)
except KeyError:
case = None
except Exception as err:
log.info("get %s -> %s", case_id, err)
raise
return case
cases = (get(case_id) for case_id in case_ids)
return [c for c in cases if c is not None]
def get_stock_forms(self, case_ids):
return dict(self.stock_forms)
def diff_cases(self, cases, statedb):
log.info("diff cases %s", list(cases))
for case in cases.values():
case_id = case["_id"]
self.diffed[case_id] += 1
def assertDiffed(self, spec):
if not isinstance(spec, dict):
if isinstance(spec, str):
spec = spec.split()
spec = {c: 1 for c in spec}
self.assertEqual(dict(self.diffed), spec)
@contextmanager
def get_cases_failure(self):
"""Raise error on CaseAccessorCouch.get_cases(...)
Assumes `CaseAccessorCouch.get_cases` has been patched with
`self.get_cases`.
"""
with self.expected_error(), patch.object(self, "cases") as couch:
couch.__getitem__.side_effect = Error("COUCH IS DOWN!")
yield
@contextmanager
def diff_cases_failure(self):
"""Raise error on self.diff_cases(...)"""
with self.expected_error(), patch.object(self, "diffed") as diffed:
diffed.__setitem__.side_effect = Error("FAILED TO DIFF!")
yield
@contextmanager
def expected_error(self):
with silence_expected_errors(), self.assertRaises(Error):
yield
@patch.object(gevent.get_hub(), "SYSTEM_ERROR", BaseException)
class TestCaseDiffProcess(SimpleTestCase):
@classmethod
def setUpClass(cls):
super(TestCaseDiffProcess, cls).setUpClass()
cls.tmp = tempdir()
cls.state_dir = cls.tmp.__enter__()
@classmethod
def tearDownClass(cls):
cls.tmp.__exit__(None, None, None)
super(TestCaseDiffProcess, cls).tearDownClass()
def tearDown(self):
db_paths = glob(os.path.join(self.state_dir, "db", "*"))
for path in db_paths + self.get_log_files():
assert os.path.isabs(path), path
os.remove(path)
super(TestCaseDiffProcess, self).tearDown()
def test_process(self):
with self.process() as proc:
self.assertEqual(proc.get_status(), [0, 0, 0])
proc.update({"case1", "case2"}, "form")
proc.enqueue("case")
self.assertEqual(proc.get_status(), [2, 1, 0])
def test_process_statedb(self):
with self.process() as proc1:
self.assertEqual(proc1.get_status(), [0, 0, 0])
proc1.enqueue("case")
self.assertEqual(proc1.get_status(), [0, 1, 0])
with self.process() as proc2:
self.assertEqual(proc2.get_status(), [0, 1, 0])
proc2.enqueue("case")
self.assertEqual(proc2.get_status(), [0, 2, 0])
def test_process_not_allowed(self):
with init_state_db("test", self.state_dir) as statedb:
with mod.CaseDiffQueue(statedb):
pass
with init_state_db("test", self.state_dir) as statedb:
with self.assertRaises(mod.ProcessNotAllowed):
mod.CaseDiffProcess(statedb)
def test_clean_break(self):
with self.process() as proc:
self.assertEqual(proc.get_status(), [0, 0, 0])
os.kill(proc.process.pid, SIGINT)
self.assertEqual(proc.get_status(), [0, 0, 1])
def test_fake_case_diff_queue_interface(self):
tested = set()
for name in dir(FakeCaseDiffQueue):
if name.startswith("_"):
continue
tested.add(name)
fake = getattr(FakeCaseDiffQueue, name)
real = getattr(mod.CaseDiffQueue, name)
self.assertEqual(signature(fake), signature(real))
self.assertEqual(tested, {"update", "enqueue", "get_status"})
@contextmanager
def process(self):
def log_status(status):
log.info("status: %s", status)
cached = status.pop("cached")
assert cached == "0/0", cached
keys = ["pending", "loaded", "diffed"]
assert set(keys) == set(status), status
status_queue.put([status[k] for k in keys])
def get_status():
proc.request_status()
return status_queue.get(timeout=5)
status_queue = Queue()
try:
with init_state_db("test", self.state_dir) as statedb, \
patch.object(mod, "log_status", log_status), \
mod.CaseDiffProcess(statedb, FakeCaseDiffQueue) as proc:
status_queue.get(timeout=5)
assert not hasattr(proc, "get_status"), proc
proc.get_status = get_status
yield proc
finally:
print(f"{' diff process logs ':-^40}")
for log_file in self.get_log_files():
print("#", log_file)
with open(log_file, encoding="utf-8") as fh:
print(fh.read())
print(f"{' end diff process logs ':-^40}")
def get_log_files(self):
return glob(os.path.join(self.state_dir, "*-casediff.log"))
class FakeCaseDiffQueue(object):
def __init__(self, statedb, status_interval=None):
self.statedb = statedb
self.stats = {"pending": 0, "cached": "0/0", "loaded": 0, "diffed": 0}
self.clean_break = False
def __enter__(self):
with self.statedb.pop_resume_state(type(self).__name__, {}) as state:
if "stats" in state:
self.stats = state["stats"]
return self
def __exit__(self, *exc_info):
self.statedb.set_resume_state(type(self).__name__, {"stats": self.stats})
def update(self, case_ids, form_id):
self.stats["pending"] += len(case_ids)
def enqueue(self, case_id, num_forms=None):
self.stats["loaded"] += 1
def get_status(self):
if self.clean_break:
self.stats["diffed"] = 1
return self.stats
@patch.object(gevent.get_hub(), "SYSTEM_ERROR", BaseException)
class TestBatchProcessor(SimpleTestCase):
def setUp(self):
super(TestBatchProcessor, self).setUp()
self.proc = mod.BatchProcessor(mod.Pool())
def test_retry_batch(self):
def do(thing):
tries.append(1)
if len(tries) < 2:
raise Error("cannot do thing the first time")
done.append(thing)
tries = []
done = []
self.proc.spawn(do, "thing")
flush(self.proc.pool)
self.assertEqual(len(tries), 2)
self.assertEqual(done, ["thing"])
def test_batch_max_retries(self):
def do(thing):
tries.append(1)
raise Error("cannot do thing... ever")
tries = []
self.proc.spawn(do, "thing")
with silence_expected_errors(), self.assertRaises(Error):
flush(self.proc.pool)
self.assertEqual(len(tries), 3)
class TestDiffCases(SimpleTestCase):
def setUp(self):
super(TestDiffCases, self).setUp()
self.patches = [
patch(
"corehq.form_processor.backends.sql.dbaccessors.CaseAccessorSQL.get_cases",
self.get_sql_cases,
),
patch(
"corehq.form_processor.backends.sql.dbaccessors"
".LedgerAccessorSQL.get_ledger_values_for_cases",
self.get_sql_ledgers,
),
patch(
"corehq.apps.commtrack.models.StockState.objects.filter",
self.get_stock_states,
),
patch(
"corehq.form_processor.backends.couch.processor"
".FormProcessorCouch.hard_rebuild_case",
self.hard_rebuild_case,
),
]
for patcher in self.patches:
patcher.start()
self.statedb = StateDB.init(":memory:")
self.sql_cases = {}
self.sql_ledgers = {}
self.couch_cases = {}
self.couch_ledgers = {}
def tearDown(self):
for patcher in self.patches:
patcher.stop()
self.statedb.close()
super(TestDiffCases, self).tearDown()
def test_clean(self):
self.add_case("a")
mod.diff_cases(self.couch_cases, self.statedb)
self.assert_diffs()
def test_diff(self):
couch_json = self.add_case("a", prop=1)
couch_json["prop"] = 2
mod.diff_cases(self.couch_cases, self.statedb)
self.assert_diffs([Diff("a", path=["prop"], old=2, new=1)])
def test_replace_diff(self):
self.add_case("a", prop=1)
different_cases = deepcopy(self.couch_cases)
different_cases["a"]["prop"] = 2
mod.diff_cases(different_cases, self.statedb)
self.assert_diffs([Diff("a", path=["prop"], old=2, new=1)])
mod.diff_cases(self.couch_cases, self.statedb)
self.assert_diffs()
def test_replace_ledger_diff(self):
self.add_case("a")
stock = self.add_ledger("a", x=1)
stock.values["x"] = 2
mod.diff_cases(self.couch_cases, self.statedb)
self.assert_diffs([Diff("a/stock/a", "stock state", path=["x"], old=2, new=1)])
stock.values["x"] = 1
mod.diff_cases(self.couch_cases, self.statedb)
self.assert_diffs()
def assert_diffs(self, expected=None):
actual = [
Diff(diff.doc_id, diff.kind, *diff.json_diff)
for diff in self.statedb.get_diffs()
]
self.assertEqual(actual, expected or [])
def add_case(self, case_id, **props):
assert case_id not in self.sql_cases, self.sql_cases[case_id]
assert case_id not in self.couch_cases, self.couch_cases[case_id]
props.setdefault("doc_type", "CommCareCase")
self.sql_cases[case_id] = Config(
case_id=case_id,
props=props,
to_json=lambda: dict(props, case_id=case_id),
is_deleted=False,
)
self.couch_cases[case_id] = couch_case = dict(props, case_id=case_id)
return couch_case
def add_ledger(self, case_id, **values):
ref = UniqueLedgerReference(case_id, "stock", case_id)
self.sql_ledgers[case_id] = Config(
ledger_reference=ref,
values=values,
to_json=lambda: dict(values, ledger_reference=ref.as_id()),
)
couch_values = dict(values)
stock = Config(
ledger_reference=ref,
values=couch_values,
to_json=lambda: dict(couch_values, ledger_reference=ref.as_id()),
)
self.couch_ledgers[case_id] = stock
return stock
def get_sql_cases(self, case_ids):
return [self.sql_cases[c] for c in case_ids]
def get_sql_ledgers(self, case_ids):
ledgers = self.sql_ledgers
return [ledgers[c] for c in case_ids if c in ledgers]
def get_stock_states(self, case_id__in):
ledgers = self.couch_ledgers
return [ledgers[c] for c in case_id__in if c in ledgers]
def hard_rebuild_case(self, domain, case_id, *args, **kw):
return Config(to_json=lambda: self.couch_cases[case_id])
@attr.s
class Diff(object):
doc_id = attr.ib()
kind = attr.ib(default="CommCareCase")
type = attr.ib(default="diff")
path = attr.ib(factory=list)
old = attr.ib(default=None)
new = attr.ib(default=None)
@attr.s
class FakeCase(object):
_id = attr.ib()
xform_ids = attr.ib(factory=list)
actions = attr.ib(factory=list)
@property
def case_id(self):
return self._id
def to_json(self):
data = {n: getattr(self, n) for n in ["_id", "xform_ids"]}
data["actions"] = [a.to_json() for a in self.actions]
return data
@attr.s
class FakeAction(object):
xform_id = attr.ib()
def to_json(self):
return {"xform_id": self.xform_id}
DIFFS = [FormJsonDiff("diff", ["prop"], "old", "new")]
class Error(Exception):
pass
@contextmanager
def silence_expected_errors():
"""Prevent print expected error to stderr
not sure why it is not captured by nose
"""
def print_unexpected(context, type, value, tb):
if type == Error:
log.error(context, exc_info=(type, value, tb))
else:
print_exception(context, type, value, tb)
hub = gevent.get_hub()
print_exception = hub.print_exception
with patch.object(hub, "print_exception", print_unexpected):
yield
def flush(pool):
while not pool.join(timeout=1):
log.info('waiting on {} case diff workers'.format(len(pool)))
| 35.13413 | 95 | 0.578168 |
7957df8072452549aa57d86e28163a17abbfbd4e | 1,218 | py | Python | env/lib/python3.6/site-packages/django/core/cache/backends/dummy.py | anthowen/duplify | 846d01c1b21230937fdf0281b0cf8c0b08a8c24e | [
"MIT"
] | 58 | 2018-10-03T19:41:36.000Z | 2022-03-14T21:24:43.000Z | env/lib/python3.6/site-packages/django/core/cache/backends/dummy.py | anthowen/duplify | 846d01c1b21230937fdf0281b0cf8c0b08a8c24e | [
"MIT"
] | 67 | 2017-09-11T05:06:12.000Z | 2022-02-14T04:44:04.000Z | env/lib/python3.6/site-packages/django/core/cache/backends/dummy.py | anthowen/duplify | 846d01c1b21230937fdf0281b0cf8c0b08a8c24e | [
"MIT"
] | 210 | 2017-09-01T00:10:08.000Z | 2022-03-19T18:05:12.000Z | "Dummy cache backend"
from django.core.cache.backends.base import DEFAULT_TIMEOUT, BaseCache
class DummyCache(BaseCache):
def __init__(self, host, *args, **kwargs):
BaseCache.__init__(self, *args, **kwargs)
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
return True
def get(self, key, default=None, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
return default
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
def delete(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
def get_many(self, keys, version=None):
return {}
def has_key(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
return False
def set_many(self, data, timeout=DEFAULT_TIMEOUT, version=None):
return []
def delete_many(self, keys, version=None):
pass
def clear(self):
pass
| 27.681818 | 70 | 0.648604 |
7957dfd5618f3270e52b372743b25f721b495a05 | 42 | py | Python | api/__init__.py | derekmerck/DixelKit | 7a3e613deed647a3cef7d3e1fe0d521d827b6ee3 | [
"MIT"
] | null | null | null | api/__init__.py | derekmerck/DixelKit | 7a3e613deed647a3cef7d3e1fe0d521d827b6ee3 | [
"MIT"
] | null | null | null | api/__init__.py | derekmerck/DixelKit | 7a3e613deed647a3cef7d3e1fe0d521d827b6ee3 | [
"MIT"
] | null | null | null | __all__ = ["Montage", "Orthanc", "Splunk"] | 42 | 42 | 0.642857 |
7957e089e31a6ac793238a86fb36207d61d9bb47 | 1,908 | py | Python | tensorflow/python/kernel_tests/strings_ops/string_upper_op_test.py | EricRemmerswaal/tensorflow | 141ff27877579c81a213fa113bd1b474c1749aca | [
"Apache-2.0"
] | 190,993 | 2015-11-09T13:17:30.000Z | 2022-03-31T23:05:27.000Z | tensorflow/python/kernel_tests/strings_ops/string_upper_op_test.py | EricRemmerswaal/tensorflow | 141ff27877579c81a213fa113bd1b474c1749aca | [
"Apache-2.0"
] | 48,461 | 2015-11-09T14:21:11.000Z | 2022-03-31T23:17:33.000Z | tensorflow/python/kernel_tests/strings_ops/string_upper_op_test.py | EricRemmerswaal/tensorflow | 141ff27877579c81a213fa113bd1b474c1749aca | [
"Apache-2.0"
] | 104,981 | 2015-11-09T13:40:17.000Z | 2022-03-31T19:51:54.000Z | # -*- coding: utf-8 -*-
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for string_upper_op."""
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
class StringUpperOpTest(test.TestCase):
"""Test cases for tf.strings.upper."""
def test_string_upper(self):
strings = ["Pigs on The Wing", "aNimals"]
with self.cached_session():
output = string_ops.string_upper(strings)
output = self.evaluate(output)
self.assertAllEqual(output, [b"PIGS ON THE WING", b"ANIMALS"])
def test_string_upper_2d(self):
strings = [["pigS on THE wIng", "aniMals"], [" hello ", "\n\tWorld! \r \n"]]
with self.cached_session():
output = string_ops.string_upper(strings)
output = self.evaluate(output)
self.assertAllEqual(output, [[b"PIGS ON THE WING", b"ANIMALS"],
[b" HELLO ", b"\n\tWORLD! \r \n"]])
def test_string_upper_unicode(self):
strings = [["óósschloë"]]
with self.cached_session():
output = string_ops.string_upper(strings, encoding="utf-8")
output = self.evaluate(output)
# output: "ÓÓSSCHLOË"
self.assertAllEqual(output, [[b"\xc3\x93\xc3\x93SSCHLO\xc3\x8b"]])
if __name__ == "__main__":
test.main()
| 36 | 80 | 0.658281 |
7957e11196ebc0fd698e754b65e942e2b8b463fe | 680 | bzl | Python | cuda/private/toolchain/cuda_configure.bzl | neilisaac/rules_cuda | 5355abe64c9cdbd9fb2ce9c2b12987a9ade7996b | [
"Apache-2.0"
] | 4 | 2021-08-18T12:18:01.000Z | 2022-03-21T09:44:51.000Z | cuda/private/toolchain/cuda_configure.bzl | neilisaac/rules_cuda | 5355abe64c9cdbd9fb2ce9c2b12987a9ade7996b | [
"Apache-2.0"
] | 1 | 2022-02-22T06:46:32.000Z | 2022-02-22T06:46:33.000Z | cuda/private/toolchain/cuda_configure.bzl | neilisaac/rules_cuda | 5355abe64c9cdbd9fb2ce9c2b12987a9ade7996b | [
"Apache-2.0"
] | 1 | 2022-03-24T09:00:36.000Z | 2022-03-24T09:00:36.000Z | load("//cuda:private/toolchain/cuda_configure_windows.bzl", "configure_windows_toolchain")
load("//cuda:private/toolchain/cuda_configure_linux.bzl", "configure_linux_toolchain")
load("//cuda:private/toolchain/lib_cuda_configure.bzl",
"get_cpu_value",
"auto_configure_fail"
)
def _cuda_configure_impl(repository_ctx):
cpu = get_cpu_value(repository_ctx)
if cpu == "x64_windows":
configure_windows_toolchain(repository_ctx)
elif cpu == "k8":
configure_linux_toolchain(repository_ctx)
else:
auto_configure_fail("Unsupported platform")
cuda_configure = repository_rule(
implementation = _cuda_configure_impl,
)
| 34 | 91 | 0.738235 |
7957e19767d3f8f570e3fd168c8a6b470699c9eb | 4,539 | py | Python | venv/Lib/site-packages/tklib37/Table.py | GabrielAmare/Darts | 182748d821b8c1838071f3b28724d0d9b095dcf9 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/tklib37/Table.py | GabrielAmare/Darts | 182748d821b8c1838071f3b28724d0d9b095dcf9 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/tklib37/Table.py | GabrielAmare/Darts | 182748d821b8c1838071f3b28724d0d9b095dcf9 | [
"MIT"
] | null | null | null | from tkinter import Frame, NSEW
class Table(Frame):
def __init__(self, root, **cfg):
super().__init__(root, **cfg)
self.widgets = {}
self.row_weights, self.col_weights = {}, {}
@property
def n_cols(self):
return max((col for row, col in self.widgets.keys()), default=-1) + 1
@property
def n_rows(self):
return max((row for row, col in self.widgets.keys()), default=-1) + 1
def _set_widget(self, row: int, col: int, cls, **cfg):
widget = cls(self, **cfg)
self._put_widget(row, col, widget)
if row not in self.row_weights:
self.row_weights[row] = 1
self.rowconfigure(row, weight=1)
if col not in self.col_weights:
self.col_weights[col] = 1
self.columnconfigure(col, weight=1)
return widget
def _upd_widget(self, row: int, col: int, **cfg):
widget = self._get_widget(row, col)
if widget:
widget.configure(**cfg)
def _get_widget(self, row: int, col: int):
return self.widgets.get((row, col), None)
def _del_widget(self, row: int, col: int):
widget = self._pop_widget(row, col)
if widget:
widget.destroy()
def _pop_widget(self, row: int, col: int):
if (row, col) in self.widgets:
widget = self.widgets.pop((row, col))
widget.grid_forget()
return widget
def _put_widget(self, row: int, col: int, widget):
if widget:
self.widgets[(row, col)] = widget
widget.grid(row=row, column=col, sticky=NSEW)
def _invert_widgets(self, old_row: int, old_col: int, new_row: int, new_col: int):
old_widget = self._pop_widget(old_row, old_col)
new_widget = self._pop_widget(new_row, new_col)
self._put_widget(old_row, old_col, new_widget)
self._put_widget(new_row, new_col, old_widget)
def _invert_rows(self, old_row: int, new_row: int):
for col in range(self.n_cols):
self._invert_widgets(old_row, col, new_row, col)
self.row_weights[old_row], self.row_weights[new_row] = self.row_weights.get(new_row, 1), self.row_weights.get(old_row, 1)
self.rowconfigure(old_row, weight=self.row_weights[old_row])
self.rowconfigure(new_row, weight=self.row_weights[new_row])
def _invert_cols(self, old_col: int, new_col: int):
for row in range(self.n_rows):
self._invert_widgets(row, old_col, row, new_col)
self.col_weights[old_col], self.col_weights[new_col] = self.col_weights.get(new_col, 1), self.col_weights.get(old_col, 1)
self.columnconfigure(old_col, weight=self.col_weights[old_col])
self.columnconfigure(new_col, weight=self.col_weights[new_col])
def _del_row(self, row: int):
to_del = [col for row_, col in self.widgets if row_ == row]
for col in to_del:
self.del_widget(row, col)
if row in self.row_weights:
del self.row_weights[row]
self.rowconfigure(row, weight=0)
def _del_col(self, col: int):
to_del = [row for row, col_ in self.widgets if col_ == col]
for row in to_del:
self.del_widget(row, col)
if col in self.col_weights:
del self.col_weights[col]
self.columnconfigure(col, weight=0)
def set_widget(self, row, col, cls, **cfg):
"""Set a widget at a row & column"""
return self._set_widget(row, col, cls, **cfg)
def upd_widget(self, row, col, **cfg):
"""Get a widget by it's row & column"""
return self._upd_widget(row, col, **cfg)
def get_widget(self, row: int, col: int):
"""Update a widget config"""
return self._get_widget(row, col)
def del_widget(self, row: int, col: int):
"""Delete a widget"""
return self._del_widget(row, col)
def invert_widgets(self, old_row: int, old_col: int, new_row: int, new_col: int):
"""Invert two widgets"""
return self._invert_widgets(old_row, old_col, new_row, new_col)
def invert_rows(self, old_row: int, new_row: int):
"""Invert two rows"""
return self._invert_rows(old_row, new_row)
def invert_cols(self, old_col: int, new_col: int):
"""Invert two columns"""
return self._invert_cols(old_col, new_col)
def del_row(self, row: int):
"""Delete a row"""
self._del_row(row)
def del_col(self, col: int):
"""Delete a column"""
self._del_col(col)
| 33.873134 | 129 | 0.614453 |
7957e1be9b787c120719d95738be37e66741b96b | 4,202 | py | Python | Python/Product/TestAdapter.Executor/PythonFiles/testing_tools/adapter/pytest/_discovery.py | techkey/PTVS | 8355e67eedd8e915ca49bd38a2f36172696fd903 | [
"Apache-2.0"
] | 404 | 2019-05-07T02:21:57.000Z | 2022-03-31T17:03:04.000Z | Python/Product/TestAdapter.Executor/PythonFiles/testing_tools/adapter/pytest/_discovery.py | techkey/PTVS | 8355e67eedd8e915ca49bd38a2f36172696fd903 | [
"Apache-2.0"
] | 1,672 | 2019-05-06T21:09:38.000Z | 2022-03-31T23:16:04.000Z | Python/Product/TestAdapter.Executor/PythonFiles/testing_tools/adapter/pytest/_discovery.py | techkey/PTVS | 8355e67eedd8e915ca49bd38a2f36172696fd903 | [
"Apache-2.0"
] | 186 | 2019-05-13T03:17:37.000Z | 2022-03-31T16:24:05.000Z | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from __future__ import absolute_import, print_function
import sys
import pytest
from .. import util, discovery
from ._pytest_item import parse_item
#note: this must match testlauncher.py
def patch_translate_non_printable():
import _pytest.compat
translate_non_printable = getattr(_pytest.compat, "_translate_non_printable", None)
if translate_non_printable:
def _translate_non_printable_patched(s):
s = translate_non_printable(s)
s = s.replace(':', '/:') # pytest testcase not found error and VS TestExplorer FQN parsing
s = s.replace('.', '_') # VS TestExplorer FQN parsing
s = s.replace('\n', '/n') # pytest testcase not found error
s = s.replace('\\', '/') # pytest testcase not found error, fixes cases (actual backslash followed by n)
s = s.replace('\r', '/r') # pytest testcase not found error
return s
_pytest.compat._translate_non_printable = _translate_non_printable_patched
else:
print("ERROR: failed to patch pytest, _pytest.compat._translate_non_printable")
patch_translate_non_printable()
def discover(pytestargs=None, hidestdio=False,
_pytest_main=pytest.main, _plugin=None, **_ignored):
"""Return the results of test discovery."""
if _plugin is None:
_plugin = TestCollector()
pytestargs = _adjust_pytest_args(pytestargs)
# We use this helper rather than "-pno:terminal" due to possible
# platform-dependent issues.
with (util.hide_stdio() if hidestdio else util.noop_cm()) as stdio:
ec = _pytest_main(pytestargs, [_plugin])
# See: https://docs.pytest.org/en/latest/usage.html#possible-exit-codes
if ec == 5:
# No tests were discovered.
pass
elif ec != 0:
print(('equivalent command: {} -m pytest {}'
).format(sys.executable, util.shlex_unsplit(pytestargs)))
if hidestdio:
print(stdio.getvalue(), file=sys.stderr)
sys.stdout.flush()
print('pytest discovery failed (exit code {})'.format(ec))
if not _plugin._started:
print(('equivalent command: {} -m pytest {}'
).format(sys.executable, util.shlex_unsplit(pytestargs)))
if hidestdio:
print(stdio.getvalue(), file=sys.stderr)
sys.stdout.flush()
raise Exception('pytest discovery did not start')
return (
_plugin._tests.parents,
list(_plugin._tests),
)
def _adjust_pytest_args(pytestargs):
"""Return a corrected copy of the given pytest CLI args."""
pytestargs = list(pytestargs) if pytestargs else []
# Duplicate entries should be okay.
pytestargs.insert(0, '--collect-only')
# TODO: pull in code from:
# src/client/testing/pytest/services/discoveryService.ts
# src/client/testing/pytest/services/argsService.ts
return pytestargs
class TestCollector(object):
"""This is a pytest plugin that collects the discovered tests."""
@classmethod
def parse_item(cls, item):
return parse_item(item)
def __init__(self, tests=None):
if tests is None:
tests = discovery.DiscoveredTests()
self._tests = tests
self._started = False
# Relevant plugin hooks:
# https://docs.pytest.org/en/latest/reference.html#collection-hooks
def pytest_collection_modifyitems(self, session, config, items):
self._started = True
self._tests.reset()
for item in items:
test, parents = self.parse_item(item)
self._tests.add_test(test, parents)
# This hook is not specified in the docs, so we also provide
# the "modifyitems" hook just in case.
def pytest_collection_finish(self, session):
self._started = True
try:
items = session.items
except AttributeError:
# TODO: Is there an alternative?
return
self._tests.reset()
for item in items:
test, parents = self.parse_item(item)
self._tests.add_test(test, parents)
| 36.53913 | 117 | 0.649215 |
7957e1f970d03281921a94fec1f0395c7ffb5438 | 32,522 | py | Python | cabot3/cabotapp/views.py | senzil/cabot | a5d609b815d01dfac85f38e3fee6bc65d962e8bc | [
"MIT"
] | 3 | 2021-07-24T05:49:32.000Z | 2021-09-13T13:59:12.000Z | cabot3/cabotapp/views.py | senzil/cabot | a5d609b815d01dfac85f38e3fee6bc65d962e8bc | [
"MIT"
] | 5 | 2021-08-31T00:07:39.000Z | 2021-08-31T01:43:55.000Z | cabot3/cabotapp/views.py | senzil/cabot | a5d609b815d01dfac85f38e3fee6bc65d962e8bc | [
"MIT"
] | null | null | null | import json
import re
from datetime import date, datetime, timedelta
from itertools import dropwhile, groupby, zip_longest
import os
import requests
from .alert import AlertPlugin, AlertPluginUserData
from cabot3.cabotapp import alert
from cabot3.cabotapp.utils import cabot_needs_setup
from dateutil.relativedelta import relativedelta
from django import forms
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.urls import reverse, reverse_lazy
from django.core.validators import URLValidator
from django.db import transaction
from django.db.models.functions import Lower
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from django.shortcuts import redirect, render
from django.template import RequestContext, loader
from django.utils import timezone
from django.utils.decorators import method_decorator
from django.utils.timezone import utc
from django.views.generic import (CreateView, DeleteView, DetailView, ListView,
TemplateView, UpdateView, View)
from .models import (Instance, Service, Shift, StatusCheck, StatusCheckResult, UserProfile, get_custom_check_plugins,
get_duty_officers)
from django.apps import apps
from rest_framework.views import APIView
from rest_framework.response import Response
from .tasks import run_status_check as _run_status_check
class LoginRequiredMixin(object):
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(LoginRequiredMixin, self).dispatch(*args, **kwargs)
@login_required
def subscriptions(request):
""" Simple list of all checks """
services = Service.objects.all()
users = User.objects.filter(is_active=True)
return render(request, 'cabotapp/subscriptions.html', {
'services': services,
'users': users,
'duty_officers': get_duty_officers(),
'custom_check_types': get_custom_check_plugins(),
})
@login_required
def run_status_check(request, pk):
"""Runs a specific check"""
_run_status_check(check_or_id=pk)
return HttpResponseRedirect(reverse('check', kwargs={'pk': pk}))
def duplicate_instance(request, pk):
instance = Instance.objects.get(pk=pk)
new_instance = instance.duplicate()
return HttpResponseRedirect(reverse('update-instance', kwargs={'pk': new_instance}))
class BaseCommonView(object):
def render_to_response(self, context, *args, **kwargs):
if context is None:
context = {}
context['custom_check_types'] = get_custom_check_plugins()
return super(BaseCommonView, self).render_to_response(context, *args, **kwargs)
class CommonCreateView(BaseCommonView, CreateView):
pass
class CommonUpdateView(BaseCommonView, UpdateView):
pass
class CommonDeleteView(BaseCommonView, DeleteView):
pass
class CommonDetailView(BaseCommonView, DetailView):
pass
class CommonListView(BaseCommonView, ListView):
pass
class StatusCheckResultDetailView(LoginRequiredMixin, CommonDetailView):
model = StatusCheckResult
context_object_name = 'result'
class SymmetricalForm(forms.ModelForm):
symmetrical_fields = () # Iterable of 2-tuples (field, model)
def __init__(self, *args, **kwargs):
super(SymmetricalForm, self).__init__(*args, **kwargs)
if self.instance and self.instance.pk:
for field in self.symmetrical_fields:
self.fields[field].initial = getattr(
self.instance, field).all()
def save(self, commit=True):
instance = super(SymmetricalForm, self).save(commit=False)
if commit:
instance.save()
if instance.pk:
for field in self.symmetrical_fields:
try:
if field == "service_set":
n_field = Service.objects.get(name=self.cleaned_data[field].first().name)
instance.service_set.add(n_field)
if field == "instance_set":
n_field = Instance.objects.get(id=self.cleaned_data[field].first().id)
instance.instance_set.add(n_field)
except:
pass
#setattr(instance, (field+".add"), n_field)
#instance,field,add(n_field)
self.save_m2m()
return instance
base_widgets = {
'name': forms.TextInput(attrs={
'style': 'width:30%',
}),
'importance': forms.RadioSelect(),
}
class StatusCheckForm(SymmetricalForm):
symmetrical_fields = ('service_set', 'instance_set')
service_set = forms.ModelMultipleChoiceField(
queryset=Service.objects.all(),
required=False,
help_text='Link to service(s).',
widget=forms.SelectMultiple(
attrs={
'data-rel': 'chosen',
'style': 'width: 70%',
},
)
)
instance_set = forms.ModelMultipleChoiceField(
queryset=Instance.objects.all(),
required=False,
help_text='Link to instance(s).',
widget=forms.SelectMultiple(
attrs={
'data-rel': 'chosen',
'style': 'width: 70%',
},
)
)
class InstanceForm(SymmetricalForm):
symmetrical_fields = ('service_set',)
service_set = forms.ModelMultipleChoiceField(
queryset=Service.objects.all(),
required=False,
help_text='Link to service(s).',
widget=forms.SelectMultiple(
attrs={
'data-rel': 'chosen',
'style': 'width: 70%',
},
)
)
class Meta:
model = Instance
template_name = 'instance_form.html'
fields = (
'name',
'address',
'users_to_notify',
'status_checks',
'service_set',
)
widgets = {
'name': forms.TextInput(attrs={'style': 'width: 70%;'}),
'address': forms.TextInput(attrs={'style': 'width: 70%;'}),
'status_checks': forms.SelectMultiple(attrs={
'data-rel': 'chosen',
'style': 'width: 70%',
}),
'service_set': forms.SelectMultiple(attrs={
'data-rel': 'chosen',
'style': 'width: 70%',
}),
'alerts': forms.SelectMultiple(attrs={
'data-rel': 'chosen',
'style': 'width: 70%',
}),
'users_to_notify': forms.SelectMultiple(attrs={
'data-rel': 'chosen',
'style': 'width: 70%',
}),
}
def __init__(self, *args, **kwargs):
ret = super(InstanceForm, self).__init__(*args, **kwargs)
self.fields['users_to_notify'].queryset = User.objects.filter(
is_active=True).order_by('first_name', 'last_name')
return ret
class ServiceForm(forms.ModelForm):
class Meta:
model = Service
template_name = 'service_form.html'
fields = (
'name',
'url',
'users_to_notify',
'status_checks',
'instances',
'alerts',
'alerts_enabled',
'hackpad_id',
'runbook_link',
'is_public'
)
widgets = {
'name': forms.TextInput(attrs={'style': 'width: 70%;'}),
'url': forms.TextInput(attrs={'style': 'width: 70%;'}),
'status_checks': forms.SelectMultiple(attrs={
'data-rel': 'chosen',
'style': 'width: 70%',
}),
'instances': forms.SelectMultiple(attrs={
'data-rel': 'chosen',
'style': 'width: 70%',
}),
'alerts': forms.SelectMultiple(attrs={
'data-rel': 'chosen',
'style': 'width: 70%',
}),
'users_to_notify': forms.SelectMultiple(attrs={
'data-rel': 'chosen',
'style': 'width: 70%',
}),
'hackpad_id': forms.TextInput(attrs={'style': 'width:70%;'}),
'runbook_link': forms.TextInput(attrs={'style': 'width:70%;'}),
}
def __init__(self, *args, **kwargs):
ret = super(ServiceForm, self).__init__(*args, **kwargs)
self.fields['users_to_notify'].queryset = User.objects.filter(
is_active=True).order_by('first_name', 'last_name')
return ret
def clean_hackpad_id(self):
value = self.cleaned_data['hackpad_id']
if not value:
return ''
for pattern in settings.RECOVERY_SNIPPETS_WHITELIST:
if re.match(pattern, value):
return value
raise ValidationError('Please specify a valid JS snippet link')
def clean_runbook_link(self):
value = self.cleaned_data['runbook_link']
if not value:
return ''
try:
URLValidator()(value)
return value
except ValidationError:
raise ValidationError('Please specify a valid runbook link')
class StatusCheckReportForm(forms.Form):
service = forms.ModelChoiceField(
queryset=Service.objects.all(),
widget=forms.HiddenInput
)
checks = forms.ModelMultipleChoiceField(
queryset=StatusCheck.objects.all(),
widget=forms.SelectMultiple(
attrs={
'data-rel': 'chosen',
'style': 'width: 70%',
},
)
)
date_from = forms.DateField(label='From', widget=forms.DateInput(attrs={'class': 'datepicker'}))
date_to = forms.DateField(label='To', widget=forms.DateInput(attrs={'class': 'datepicker'}))
def get_report(self):
checks = self.cleaned_data['checks']
now = timezone.now()
for check in checks:
# Group results of the check by status (failed alternating with succeeded),
# take time of the first one in each group (starting from a failed group),
# split them into pairs and form the list of problems.
results = check.statuscheckresult_set.filter(
time__gte=self.cleaned_data['date_from'],
time__lt=self.cleaned_data['date_to'] + timedelta(days=1)
).order_by('time')
groups = dropwhile(lambda item: item[0], groupby(results, key=lambda r: r.succeeded))
times = [next(group).time for succeeded, group in groups]
pairs = zip_longest(*([iter(times)] * 2))
check.problems = [(start, end, (end or now) - start) for start, end in pairs]
if results:
check.success_rate = results.filter(succeeded=True).count() / float(len(results)) * 100
return checks
class CheckCreateView(LoginRequiredMixin, CommonCreateView):
template_name = 'cabotapp/statuscheck_form.html'
def form_valid(self, form):
form.instance.created_by = self.request.user
return super(CheckCreateView, self).form_valid(form)
def get_initial(self):
if self.initial:
initial = self.initial
else:
initial = {}
metric = self.request.GET.get('metric')
if metric:
initial['metric'] = metric
service_id = self.request.GET.get('service')
instance_id = self.request.GET.get('instance')
if service_id:
try:
service = Service.objects.get(id=service_id)
initial['service_set'] = [service]
except Service.DoesNotExist:
pass
if instance_id:
try:
instance = Instance.objects.get(id=instance_id)
initial['instance_set'] = [instance]
except Instance.DoesNotExist:
pass
return initial
def get_success_url(self):
if self.request.GET.get('service'):
return reverse('service', kwargs={'pk': self.request.GET.get('service')})
if self.request.GET.get('instance'):
return reverse('instance', kwargs={'pk': self.request.GET.get('instance')})
return reverse('checks')
class CheckUpdateView(LoginRequiredMixin, CommonUpdateView):
template_name = 'cabotapp/statuscheck_form.html'
def get_success_url(self):
return reverse('check', kwargs={'pk': self.object.id})
class StatusCheckListView(LoginRequiredMixin, CommonListView):
model = StatusCheck
def render_to_response(self, context, *args, **kwargs):
context = super(StatusCheckListView, self).get_context_data(**kwargs)
if context is None:
context = {}
context['checks'] = StatusCheck.objects.all().order_by('name').prefetch_related('service_set', 'instance_set')
return super(StatusCheckListView, self).render_to_response(context, *args, **kwargs)
class StatusCheckDeleteView(LoginRequiredMixin, CommonDeleteView):
model = StatusCheck
success_url = reverse_lazy('checks')
context_object_name = 'check'
template_name = 'cabotapp/statuscheck_confirm_delete.html'
class StatusCheckDetailView(LoginRequiredMixin, CommonDetailView):
model = StatusCheck
context_object_name = 'check'
template_name = 'cabotapp/statuscheck_detail.html'
def render_to_response(self, context, *args, **kwargs):
if context is None:
context = {}
checkresult_list = self.object.statuscheckresult_set.order_by(
'-time_complete').all()
paginator = Paginator(checkresult_list, 25)
page = self.request.GET.get('page')
try:
checkresults = paginator.page(page)
except PageNotAnInteger:
checkresults = paginator.page(1)
except EmptyPage:
checkresults = paginator.page(paginator.num_pages)
context['checkresults'] = checkresults
return super(StatusCheckDetailView, self).render_to_response(context, *args, **kwargs)
class UserProfileUpdateView(LoginRequiredMixin, View):
model = AlertPluginUserData
def get(self, *args, **kwargs):
return HttpResponseRedirect(reverse('update-alert-user-data', args=(self.kwargs['pk'], u'General')))
class UserProfileUpdateAlert(LoginRequiredMixin, View):
template = loader.get_template('cabotapp/alertpluginuserdata_form.html')
model = AlertPluginUserData
def get(self, request, pk, alerttype):
try:
profile = UserProfile.objects.get(user=pk)
except UserProfile.DoesNotExist:
user = User.objects.get(id=pk)
profile = UserProfile(user=user)
profile.save()
profile.user_data()
if alerttype == u'General':
form = GeneralSettingsForm(initial={
'first_name': profile.user.first_name,
'last_name': profile.user.last_name,
'email_address': profile.user.email,
'enabled': profile.user.is_active,
})
else:
plugin_userdata = self.model.objects.get(title=alerttype, user=profile)
form_model = get_object_form(type(plugin_userdata))
form = form_model(instance=plugin_userdata)
return render(request, self.template.template.name, {
'form': form,
'alert_preferences': profile.user_data(),
'custom_check_types': get_custom_check_plugins(),
})
def post(self, request, pk, alerttype):
profile = UserProfile.objects.get(user=pk)
success = False
if alerttype == u'General':
form = GeneralSettingsForm(request.POST)
if form.is_valid():
profile.user.first_name = form.cleaned_data['first_name']
profile.user.last_name = form.cleaned_data['last_name']
profile.user.is_active = form.cleaned_data['enabled']
profile.user.email = form.cleaned_data['email_address']
profile.user.save()
success = True
else:
plugin_userdata = self.model.objects.get(title=alerttype, user=profile)
form_model = get_object_form(type(plugin_userdata))
form = form_model(request.POST, instance=plugin_userdata)
if form.is_valid():
form.save()
success = True
if success:
messages.add_message(request, messages.SUCCESS, 'Updated Successfully', extra_tags='success')
else:
messages.add_message(request, messages.ERROR, 'Error Updating Profile', extra_tags='danger')
return HttpResponseRedirect(reverse('update-alert-user-data', args=(self.kwargs['pk'], alerttype)))
class PluginSettingsView(LoginRequiredMixin, View):
template = loader.get_template('cabotapp/plugin_settings_form.html')
model = AlertPlugin
def get(self, request, plugin_name):
if plugin_name == u'global':
form = CoreSettingsForm()
alert_test_form = AlertTestForm()
else:
plugin = self.model.objects.get(title=plugin_name)
form_model = get_object_form(type(plugin))
form = form_model(instance=plugin)
alert_test_form = AlertTestPluginForm(initial = {
'alert_plugin': plugin
})
return render(request, self.template.template.name, {
'form': form,
'plugins': AlertPlugin.objects.all(),
'plugin_name': plugin_name,
'alert_test_form': alert_test_form,
'custom_check_types': get_custom_check_plugins()
})
def post(self, request, plugin_name):
if plugin_name == u'global':
form = CoreSettingsForm(request.POST)
else:
plugin = self.model.objects.get(title=plugin_name)
form_model = get_object_form(type(plugin))
form = form_model(request.POST, instance=plugin)
if form.is_valid():
form.save()
messages.add_message(request, messages.SUCCESS, 'Updated Successfully', extra_tags='success')
else:
messages.add_message(request, messages.ERROR, 'Error Updating Plugin', extra_tags='danger')
return HttpResponseRedirect(reverse('plugin-settings', args=(plugin_name,)))
def get_object_form(model_type):
class AlertPreferencesForm(forms.ModelForm):
class Meta:
model = model_type
fields = '__all__'
def is_valid(self):
return True
return AlertPreferencesForm
class AlertTestForm(forms.Form):
action = reverse_lazy('alert-test')
service = forms.ModelChoiceField(
queryset=Service.objects.all(),
widget=forms.Select(attrs={
'data-rel': 'chosen',
})
)
STATUS_CHOICES = (
(Service.PASSING_STATUS, 'Passing'),
(Service.WARNING_STATUS, 'Warning'),
(Service.ERROR_STATUS, 'Error'),
(Service.CRITICAL_STATUS, 'Critical'),
)
old_status = forms.ChoiceField(
choices=STATUS_CHOICES,
initial=Service.PASSING_STATUS,
widget=forms.Select(attrs={
'data-rel': 'chosen',
})
)
new_status = forms.ChoiceField(
choices=STATUS_CHOICES,
initial=Service.ERROR_STATUS,
widget=forms.Select(attrs={
'data-rel': 'chosen',
})
)
class AlertTestPluginForm(AlertTestForm):
action = reverse_lazy('alert-test-plugin')
service = None
alert_plugin = forms.ModelChoiceField(
queryset=AlertPlugin.objects.filter(enabled=True),
widget=forms.HiddenInput
)
class AlertTestView(LoginRequiredMixin, View):
def trigger_alert_to_user(self, service, user, old_status, new_status):
"""
Clear out all service users and duty shifts, and disable all fallback users.
Then add a single shift for this user, and add this user to users-to-notify.
This should ensure we never alert anyone except the user triggering the alert test.
"""
with transaction.atomic():
sid = transaction.savepoint()
service.update_status()
service.status_checks.update(active=False)
service.overall_status = new_status
service.old_overall_status = old_status
service.last_alert_sent = None
check = StatusCheck(name='ALERT_TEST')
check.save()
StatusCheckResult.objects.create(
status_check=check,
time=timezone.now(),
time_complete=timezone.now(),
succeeded=new_status == Service.PASSING_STATUS)
check.last_run = timezone.now()
check.save()
service.status_checks.add(check)
service.users_to_notify.clear()
service.users_to_notify.add(user)
service.unexpired_acknowledgements().delete()
Shift.objects.update(deleted=True)
UserProfile.objects.update(fallback_alert_user=False)
Shift(
start=timezone.now() - timedelta(days=1),
end=timezone.now() + timedelta(days=1),
uid='test-shift',
last_modified=timezone.now(),
user=user
).save()
service.alert()
transaction.savepoint_rollback(sid)
def post(self, request):
form = AlertTestForm(request.POST)
if form.is_valid():
data = form.clean()
service = data['service']
self.trigger_alert_to_user(service, request.user, data['old_status'], data['new_status'])
return JsonResponse({"result": "ok"})
return JsonResponse({"result": "error"}, status=400)
class AlertTestPluginView(AlertTestView):
def post(self, request):
form = AlertTestPluginForm(request.POST)
if form.is_valid():
data = form.clean()
with transaction.atomic():
sid = transaction.savepoint()
service = Service.objects.create(
name='test-alert-service'
)
service.alerts.add(data['alert_plugin'])
self.trigger_alert_to_user(service, request.user, data['old_status'], data['new_status'])
transaction.savepoint_rollback(sid)
return JsonResponse({"result": "ok"})
return JsonResponse({"result": "error"}, status=400)
class CoreSettingsForm(forms.Form):
pass
class GeneralSettingsForm(forms.Form):
first_name = forms.CharField(label='First name', max_length=30, required=False)
last_name = forms.CharField(label='Last name', max_length=30, required=False)
email_address = forms.CharField(label='Email Address', max_length=75,
required=False) # We use 75 and not the 254 because Django 1.6.8 only supports
# 75. See commit message for details.
enabled = forms.BooleanField(label='Enabled', required=False)
class InstanceListView(LoginRequiredMixin, CommonListView):
model = Instance
context_object_name = 'instances'
def get_queryset(self):
return Instance.objects.all().order_by('name').prefetch_related('status_checks')
class ServiceListView(LoginRequiredMixin, CommonListView):
model = Service
context_object_name = 'services'
def get_queryset(self):
return Service.objects.all().order_by('name').prefetch_related('status_checks')
class ServicePublicListView(TemplateView):
model = Service
context_object_name = 'services'
template_name = "cabotapp/service_public_list.html"
def get_context_data(self, **kwargs):
context = super(ServicePublicListView, self).get_context_data(**kwargs)
context[self.context_object_name] = Service.objects\
.filter(is_public=True, alerts_enabled=True)\
.order_by(Lower('name')).prefetch_related('status_checks')
return context
class InstanceDetailView(LoginRequiredMixin, CommonDetailView):
model = Instance
context_object_name = 'instance'
def get_context_data(self, **kwargs):
context = super(InstanceDetailView, self).get_context_data(**kwargs)
date_from = date.today() - relativedelta(day=1)
context['report_form'] = StatusCheckReportForm(initial={
'checks': self.object.status_checks.all(),
'service': self.object,
'date_from': date_from,
'date_to': date_from + relativedelta(months=1) - relativedelta(days=1)
})
return context
class ServiceDetailView(LoginRequiredMixin, CommonDetailView):
model = Service
context_object_name = 'service'
def get_context_data(self, **kwargs):
context = super(ServiceDetailView, self).get_context_data(**kwargs)
date_from = date.today() - relativedelta(day=1)
context['report_form'] = StatusCheckReportForm(initial={
'alerts': self.object.alerts.all(),
'checks': self.object.status_checks.all(),
'service': self.object,
'date_from': date_from,
'date_to': date_from + relativedelta(months=1) - relativedelta(days=1)
})
return context
class InstanceCreateView(LoginRequiredMixin, CommonCreateView):
model = Instance
form_class = InstanceForm
def form_valid(self, form):
ret = super(InstanceCreateView, self).form_valid(form)
try:
icmp = apps.get_model('icmp','ICMPStatusCheck')
except:
icmp = None
if icmp:
if self.object.status_checks.filter(polymorphic_ctype__model='icmpstatuscheck').count() == 0:
self.generate_default_ping_check(self.object)
return ret
def generate_default_ping_check(self, obj):
pc = ICMPStatusCheck(
name="Default Ping Check for %s" % obj.name,
frequency=5,
importance=Service.ERROR_STATUS,
debounce=0,
created_by=None,
)
pc.save()
obj.status_checks.add(pc)
def get_success_url(self):
return reverse('instance', kwargs={'pk': self.object.id})
def get_initial(self):
if self.initial:
initial = self.initial
else:
initial = {}
service_id = self.request.GET.get('service')
if service_id:
try:
service = Service.objects.get(id=service_id)
initial['service_set'] = [service]
except Service.DoesNotExist:
pass
return initial
@login_required
def acknowledge_alert(request, pk):
service = Service.objects.get(pk=pk)
service.acknowledge_alert(user=request.user)
return HttpResponseRedirect(reverse('service', kwargs={'pk': pk}))
@login_required
def remove_acknowledgement(request, pk):
service = Service.objects.get(pk=pk)
service.remove_acknowledgement(user=request.user)
return HttpResponseRedirect(reverse('service', kwargs={'pk': pk}))
class ServiceCreateView(LoginRequiredMixin, CommonCreateView):
model = Service
form_class = ServiceForm
def __init__(self, *args, **kwargs):
super(ServiceCreateView, self).__init__(*args, **kwargs)
def get_success_url(self):
return reverse('service', kwargs={'pk': self.object.id})
class InstanceUpdateView(LoginRequiredMixin, CommonUpdateView):
model = Instance
form_class = InstanceForm
def get_success_url(self):
return reverse('instance', kwargs={'pk': self.object.id})
class ServiceUpdateView(LoginRequiredMixin, CommonUpdateView):
model = Service
form_class = ServiceForm
def get_success_url(self):
return reverse('service', kwargs={'pk': self.object.id})
class ServiceDeleteView(LoginRequiredMixin, CommonDeleteView):
model = Service
success_url = reverse_lazy('services')
context_object_name = 'service'
template_name = 'cabotapp/service_confirm_delete.html'
class InstanceDeleteView(LoginRequiredMixin, CommonDeleteView):
model = Instance
success_url = reverse_lazy('instances')
context_object_name = 'instance'
template_name = 'cabotapp/instance_confirm_delete.html'
class ShiftListView(LoginRequiredMixin, CommonListView):
model = Shift
context_object_name = 'shifts'
def get_queryset(self):
return Shift.objects.filter(
end__gt=datetime.utcnow().replace(tzinfo=utc),
deleted=False).order_by('start')
class StatusCheckReportView(LoginRequiredMixin, TemplateView):
template_name = 'cabotapp/statuscheck_report.html'
def get_context_data(self, **kwargs):
form = StatusCheckReportForm(self.request.GET)
if form.is_valid():
return {'checks': form.get_report(), 'service': form.cleaned_data['service']}
class SetupForm(forms.Form):
username = forms.CharField(label='Username', max_length=100, required=True)
email = forms.EmailField(label='Email', max_length=200, required=False)
password = forms.CharField(label='Password', required=True, widget=forms.PasswordInput())
class SetupView(View):
template = loader.get_template('cabotapp/setup.html')
def get(self, request):
if not cabot_needs_setup():
return redirect('login')
form = SetupForm(initial={
'username': 'admin',
})
return HttpResponse(self.template.render({'form': form}, request))
def post(self, request):
if not cabot_needs_setup():
return redirect('login')
form = SetupForm(request.POST)
if form.is_valid():
get_user_model().objects.create_superuser(
username=form.cleaned_data['username'],
email=form.cleaned_data['email'],
password=form.cleaned_data['password'],
)
return redirect('login')
return HttpResponse(self.template.render({'form': form}, request), status=400)
class OnCallView(APIView):
queryset = User.objects
def get(self, request):
users = get_duty_officers()
users_json = []
for user in users:
plugin_data = {}
for pluginuserdata in user.profile.alertpluginuserdata_set.all():
plugin_data[pluginuserdata.title] = pluginuserdata.serialize()
users_json.append({
"username": user.username,
"email": user.email,
"mobile_number": user.profile.mobile_number,
"plugin_data": plugin_data
})
return Response(users_json)
# Misc JSON api and other stuff
def checks_run_recently(request):
"""
Checks whether or not stuff is running by looking to see if checks have run in last 10 mins
"""
ten_mins = datetime.utcnow().replace(tzinfo=utc) - timedelta(minutes=10)
most_recent = StatusCheckResult.objects.filter(time_complete__gte=ten_mins)
if most_recent.exists():
return HttpResponse('Checks running')
return HttpResponse('Checks not running')
def about(request):
""" Very simple about page """
from cabot3 import version
return render(request, 'cabotapp/about.html', {
'cabot_version': version,
})
def jsonify(d):
return HttpResponse(json.dumps(d), content_type='application/json')
@login_required
def graphite_api_data(request):
metric = request.GET.get('metric')
if request.GET.get('frequency'):
mins_to_check = int(request.GET.get('frequency'))
else:
mins_to_check = None
data = None
matching_metrics = None
try:
data = get_data(metric, mins_to_check)
except requests.exceptions.RequestException as e:
pass
if not data:
try:
matching_metrics = get_matching_metrics(metric)
except requests.exceptions.RequestException as e:
return jsonify({'status': 'error', 'message': str(e)})
matching_metrics = {'metrics': matching_metrics}
return jsonify({'status': 'ok', 'data': data, 'matchingMetrics': matching_metrics})
| 33.458848 | 118 | 0.625392 |
7957e34a33360ba1d6953ee06f37f4d191ff2d53 | 11,929 | py | Python | rasa_nlu_gao/models/nlp_architect/utils/text.py | azuredsky/rasa_nlu_gq | fee512aa34d5b091d6ce988f1e2901df2ee0a4fc | [
"Apache-2.0"
] | 1 | 2019-03-12T09:46:37.000Z | 2019-03-12T09:46:37.000Z | rasa_nlu_gao/models/nlp_architect/utils/text.py | azuredsky/rasa_nlu_gq | fee512aa34d5b091d6ce988f1e2901df2ee0a4fc | [
"Apache-2.0"
] | null | null | null | rasa_nlu_gao/models/nlp_architect/utils/text.py | azuredsky/rasa_nlu_gq | fee512aa34d5b091d6ce988f1e2901df2ee0a4fc | [
"Apache-2.0"
] | null | null | null | # ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import re
import sys
from os import path
from typing import List, Tuple
import spacy
from nltk import WordNetLemmatizer
from nltk.stem.snowball import EnglishStemmer
from spacy.cli.download import download as spacy_download
from spacy.lang.en import LEMMA_INDEX, LEMMA_EXC, LEMMA_RULES
from spacy.lemmatizer import Lemmatizer
from rasa_nlu_gao.models.nlp_architect.utils.generic import license_prompt
class Vocabulary:
"""
A vocabulary that maps words to ints (storing a vocabulary)
"""
def __init__(self, start=0):
self._vocab = {}
self._rev_vocab = {}
self.next = start
def add(self, word):
"""
Add word to vocabulary
Args:
word (str): word to add
Returns:
int: id of added word
"""
if word not in self._vocab.keys():
self._vocab[word] = self.next
self._rev_vocab[self.next] = word
self.next += 1
return self._vocab.get(word)
def word_id(self, word):
"""
Get the word_id of given word
Args:
word (str): word from vocabulary
Returns:
int: int id of word
"""
return self._vocab.get(word, None)
def __getitem__(self, item):
"""
Get the word_id of given word (same as `word_id`)
"""
return self.word_id(item)
def __len__(self):
return len(self._vocab)
def __iter__(self):
for word in self.vocab.keys():
yield word
@property
def max(self):
return self.next
def id_to_word(self, wid):
"""
Word-id to word (string)
Args:
wid (int): word id
Returns:
str: string of given word id
"""
return self._rev_vocab.get(wid)
@property
def vocab(self):
"""
dict: get the dict object of the vocabulary
"""
return self._vocab
def add_vocab_offset(self, offset):
"""
Adds an offset to the ints of the vocabulary
Args:
offset (int): an int offset
"""
new_vocab = {}
for k, v in self.vocab.items():
new_vocab[k] = v + offset
self.next += offset
self._vocab = new_vocab
self._rev_vocab = {v: k for k, v in new_vocab.items()}
def reverse_vocab(self):
"""
Return the vocabulary as a reversed dict object
Returns:
dict: reversed vocabulary object
"""
return self._rev_vocab
def try_to_load_spacy(model_name):
try:
spacy.load(model_name)
return True
except OSError:
return False
class SpacyInstance:
"""
Spacy pipeline wrapper which prompts user for model download authorization.
Args:
model (str, optional): spacy model name (default: english small model)
disable (list of string, optional): pipeline annotators to disable
(default: [])
display_prompt (bool, optional): flag to display/skip license prompt
"""
def __init__(self, model='en', disable=None, display_prompt=True):
if disable is None:
disable = []
try:
self._parser = spacy.load(model, disable=disable)
except OSError:
url = 'https://spacy.io/models'
if display_prompt and license_prompt('Spacy {} model'.format(model), url) is False:
sys.exit(0)
spacy_download(model)
self._parser = spacy.load(model, disable=disable)
@property
def parser(self):
"""return Spacy's instance parser"""
return self._parser
def tokenize(self, text):
"""
Tokenize a sentence into tokens
Args:
text (str): text to tokenize
Returns:
list: a list of str tokens of input
"""
# pylint: disable=not-callable
return [t.text for t in self.parser(text)]
stemmer = EnglishStemmer()
lemmatizer = WordNetLemmatizer()
spacy_lemmatizer = Lemmatizer(LEMMA_INDEX, LEMMA_EXC, LEMMA_RULES)
p = re.compile(r'[ \-,;.@&_]')
class Stopwords(object):
"""
Stop words list class.
"""
stop_words = None
@staticmethod
def get_words():
if Stopwords.stop_words is None:
sw_path = path.join(path.dirname(path.realpath(__file__)),
'resources',
'stopwords.txt')
with open(sw_path) as fp:
stop_words = []
for w in fp:
stop_words.append(w.strip().lower())
Stopwords.stop_words = stop_words
return Stopwords.stop_words
def simple_normalizer(text):
"""
Simple text normalizer. Runs each token of a phrase thru wordnet lemmatizer
and a stemmer.
"""
if not str(text).isupper() or \
not str(text).endswith('S') or \
not len(text.split()) == 1:
tokens = list(filter(lambda x: len(x) != 0, p.split(text.strip())))
text = ' '.join([stemmer.stem(lemmatizer.lemmatize(t))
for t in tokens])
return text
def spacy_normalizer(text, lemma=None):
"""
Simple text normalizer using spacy lemmatizer. Runs each token of a phrase
thru a lemmatizer and a stemmer.
Arguments:
text(string): the text to normalize.
lemma(string): lemma of the given text. in this case only stemmer will
run.
"""
if not str(text).isupper() or \
not str(text).endswith('S') or \
not len(text.split()) == 1:
tokens = list(filter(lambda x: len(x) != 0, p.split(text.strip())))
if lemma:
lemma = lemma.split(' ')
text = ' '.join([stemmer.stem(l)
for l in lemma])
else:
text = ' '.join([stemmer.stem(spacy_lemmatizer(t, u'NOUN')[0])
for t in tokens])
return text
def read_sequential_tagging_file(file_path, ignore_line_patterns=None):
"""
Read a tab separated sequential tagging file.
Returns a list of list of tuple of tags (sentences, words)
Args:
file_path (str): input file path
ignore_line_patterns (list, optional): list of string patterns to ignore
Returns:
list of list of tuples
"""
if ignore_line_patterns:
assert isinstance(ignore_line_patterns, list), 'ignore_line_patterns must be a list'
def _split_into_sentences(file_lines):
sentences = []
s = []
for line in file_lines:
if len(line) == 0:
sentences.append(s)
s = []
continue
s.append(line)
if len(s) > 0:
sentences.append(s)
return sentences
with open(file_path, encoding='utf-8') as fp:
data = fp.readlines()
data = [d.strip() for d in data]
if ignore_line_patterns:
for s in ignore_line_patterns:
data = [d for d in data if s not in d]
data = [tuple(d.split()) for d in data]
return _split_into_sentences(data)
def word_vector_generator(data, lower=False, start=0):
"""
Word vector generator util.
Transforms a list of sentences into numpy int vectors and returns the
constructed vocabulary
Arguments:
data (list): list of list of strings
lower (bool, optional): transform strings into lower case
start (int, optional): vocabulary index start integer
Returns:
2D numpy array and Vocabulary of the detected words
"""
vocab = Vocabulary(start)
data_vec = []
for sentence in data:
sentence_vec = []
for w in sentence:
word = w
if lower:
word = word.lower()
wid = vocab[word]
if wid is None:
wid = vocab.add(word)
sentence_vec.append(wid)
data_vec.append(sentence_vec)
return data_vec, vocab
def character_vector_generator(data, start=0):
"""
Character word vector generator util.
Transforms a list of sentences into numpy int vectors of the characters
of the words of the sentence, and returns the constructed vocabulary
Arguments:
data (list): list of list of strings
start (int, optional): vocabulary index start integer
Returns:
np.array: a 2D numpy array
Vocabulary: constructed vocabulary
"""
vocab = Vocabulary(start)
data_vec = []
for sentence in data:
sentence_vec = []
for w in sentence:
word_vec = []
for char in w:
cid = vocab[char]
if cid is None:
cid = vocab.add(char)
word_vec.append(cid)
sentence_vec.append(word_vec)
data_vec.append(sentence_vec)
return data_vec, vocab
def extract_nps(annotation_list, text=None):
"""
Extract Noun Phrases from given text tokens and phrase annotations.
Returns a list of tuples with start/end indexes.
Args:
annotation_list (list): a list of annotation tags in str
text (list, optional): a list of token texts in str
Returns:
list of start/end markers of noun phrases, if text is provided a list of noun phrase texts
"""
np_starts = [i for i in range(len(annotation_list)) if annotation_list[i] == 'B-NP']
np_markers = []
for s in np_starts:
i = 1
while s + i < len(annotation_list) and annotation_list[s + i] == 'I-NP':
i += 1
np_markers.append((s, s + i))
return_markers = np_markers
if text:
assert len(text) == len(annotation_list), 'annotations/text length mismatch'
return_markers = [' '.join(text[s:e]) for s, e in np_markers]
return return_markers
def bio_to_spans(text: List[str], tags: List[str]) -> List[Tuple[int, int, str]]:
"""
Convert BIO tagged list of strings into span starts and ends
Args:
text: list of words
tags: list of tags
Returns:
tuple: list of start, end and tag of detected spans
"""
pointer = 0
starts = []
for i, t, in enumerate(tags):
if t is not None:
if t.startswith('B-'):
starts.append((i, pointer))
pointer += len(text[i]) + 1
spans = []
for s_i, s_char in starts:
label_str = tags[s_i][2:]
e = 0
e_char = len(text[s_i + e])
while len(tags) > s_i + e + 1 and tags[s_i + e + 1].startswith('I-'):
e += 1
e_char += 1 + len(text[s_i + e])
spans.append((s_char, s_char + e_char, label_str))
return spans
| 30.2 | 99 | 0.559142 |
7957e371bf22b7587c0aa49a6fc0674156883f1e | 2,676 | py | Python | setup.py | sj6077/DeepSpeed | c70b472a68bc9ca387b14a1b35814c582d0ec94b | [
"MIT"
] | 1 | 2020-05-11T03:24:15.000Z | 2020-05-11T03:24:15.000Z | setup.py | sj6077/DeepSpeed | c70b472a68bc9ca387b14a1b35814c582d0ec94b | [
"MIT"
] | null | null | null | setup.py | sj6077/DeepSpeed | c70b472a68bc9ca387b14a1b35814c582d0ec94b | [
"MIT"
] | null | null | null | """
Copyright 2020 The Microsoft DeepSpeed Team
DeepSpeed library
Create a new wheel via the following command: python setup.py bdist_wheel
The wheel will be located at: dist/*.whl
"""
import os
import torch
from deepspeed import __version__ as ds_version
from setuptools import setup, find_packages
from torch.utils.cpp_extension import CUDAExtension, BuildExtension
cmdclass = {}
ext_modules = []
cmdclass['build_ext'] = BuildExtension
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
if not torch.cuda.is_available():
# Fix to allow docker buils, similar to https://github.com/NVIDIA/apex/issues/486
print(
"[WARNING] Torch did not find cuda available, if cross-compling or running with cpu only "
"you can ignore this message. Adding compute capability for Pascal, Volta, and Turing "
"(compute capabilities 6.0, 6.1, 6.2)")
if os.environ.get("TORCH_CUDA_ARCH_LIST", None) is None:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5"
# Fix from apex that might be relevant for us as well, related to https://github.com/NVIDIA/apex/issues/456
version_ge_1_1 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 0):
version_ge_1_1 = ['-DVERSION_GE_1_1']
version_ge_1_3 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 2):
version_ge_1_3 = ['-DVERSION_GE_1_3']
version_ge_1_5 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 4):
version_ge_1_5 = ['-DVERSION_GE_1_5']
version_dependent_macros = version_ge_1_1 + version_ge_1_3 + version_ge_1_5
ext_modules.append(
CUDAExtension(name='fused_lamb_cuda',
sources=['csrc/fused_lamb_cuda.cpp',
'csrc/fused_lamb_cuda_kernel.cu'],
extra_compile_args={
'cxx': [
'-O3',
] + version_dependent_macros,
'nvcc': ['-O3',
'--use_fast_math'] + version_dependent_macros
}))
setup(name='deepspeed',
version=ds_version,
description='DeepSpeed library',
author='DeepSpeed Team',
author_email='deepspeed@microsoft.com',
url='http://aka.ms/deepspeed',
packages=find_packages(exclude=["docker",
"third_party",
"csrc"]),
scripts=['bin/deepspeed',
'bin/deepspeed.pt',
'bin/ds',
'bin/ds_ssh'],
classifiers=['Programming Language :: Python :: 3.6'],
ext_modules=ext_modules,
cmdclass=cmdclass)
| 36.657534 | 107 | 0.626682 |
7957e492189fddd46325789ca2dbdc47922b7552 | 1,468 | py | Python | guts/i18n.py | smallwormer/stable-liberty-guts | e635b710cdd210f70e9d50c3b85fffdeb53e8f01 | [
"Apache-2.0"
] | null | null | null | guts/i18n.py | smallwormer/stable-liberty-guts | e635b710cdd210f70e9d50c3b85fffdeb53e8f01 | [
"Apache-2.0"
] | null | null | null | guts/i18n.py | smallwormer/stable-liberty-guts | e635b710cdd210f70e9d50c3b85fffdeb53e8f01 | [
"Apache-2.0"
] | 1 | 2022-03-03T05:41:31.000Z | 2022-03-03T05:41:31.000Z | # Copyright (c) 2015 Aptira Pty Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""oslo.i18n integration module.
See http://docs.openstack.org/developer/oslo.i18n/usage.html .
"""
import oslo_i18n as i18n
DOMAIN = 'guts'
_translators = i18n.TranslatorFactory(domain=DOMAIN)
# The primary translation function using the well-known name "_"
_ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
def enable_lazy(enable=True):
return i18n.enable_lazy(enable)
def translate(value, user_locale=None):
return i18n.translate(value, user_locale)
def get_available_languages():
return i18n.get_available_languages(DOMAIN)
| 28.230769 | 78 | 0.746594 |
7957e49539d2b504a28ba959e0d1750f21d0a871 | 687 | py | Python | runscript_weak.py | SudeepDasari/video_prediction-1 | ef0953b514aa1b7a1f5e96fd30aebef01334fb2d | [
"MIT"
] | null | null | null | runscript_weak.py | SudeepDasari/video_prediction-1 | ef0953b514aa1b7a1f5e96fd30aebef01334fb2d | [
"MIT"
] | null | null | null | runscript_weak.py | SudeepDasari/video_prediction-1 | ef0953b514aa1b7a1f5e96fd30aebef01334fb2d | [
"MIT"
] | null | null | null | # strong scaling
import numpy as np
import os
ngpus = np.array([1,2,4,8])
bsizes = 16*ngpus
for i, g in enumerate(ngpus):
indexlist = [str(i_gpu) for i_gpu in range(g)]
gpustr = ','.join(indexlist)
bsize = bsizes[i]
cmd = 'CUDA_VISIBLE_DEVICES={} python scripts/train.py --input_dir /mnt/pushing_data/cartgripper_updown_sact/train --dataset cartgripper --model savp --model_hparams_dict hparams/bair/ours_deterministic_l1/model_hparams.json --model_hparams tv_weight=0.001,transformation=flow,last_frames=2,generate_scratch_image=false,batch_size={} --summary_freq 10 --timing_file timeb{}_{}.txt'.format(gpustr, bsize, bsize, gpustr)
print(cmd)
os.system(cmd) | 45.8 | 422 | 0.746725 |
7957e54dd8866aacb1eb46f69bf327bdcab0f6b9 | 108 | py | Python | frameworks/hdfs/tests/config.py | krisis/dcos-commons | 715d27f3e43a5e25b8ecb4beed97333b136fdd9a | [
"Apache-2.0"
] | 1 | 2021-01-06T21:14:00.000Z | 2021-01-06T21:14:00.000Z | frameworks/hdfs/tests/config.py | krisis/dcos-commons | 715d27f3e43a5e25b8ecb4beed97333b136fdd9a | [
"Apache-2.0"
] | null | null | null | frameworks/hdfs/tests/config.py | krisis/dcos-commons | 715d27f3e43a5e25b8ecb4beed97333b136fdd9a | [
"Apache-2.0"
] | null | null | null | PACKAGE_NAME = 'hdfs'
DEFAULT_TASK_COUNT = 10 # 3 data nodes, 3 journal nodes, 2 name nodes, 2 zkfc nodes
| 27 | 84 | 0.731481 |
7957e54efa843c2addec78cb6665162d571cf950 | 691 | py | Python | brainstorm/all.py | PyCN/brainstorm | 8f1fc886faf268b25085fa5c95bf106b1805d766 | [
"MIT"
] | 1,473 | 2015-10-25T19:12:45.000Z | 2022-03-13T01:00:51.000Z | brainstorm/all.py | PyCN/brainstorm | 8f1fc886faf268b25085fa5c95bf106b1805d766 | [
"MIT"
] | 50 | 2015-10-25T19:14:17.000Z | 2018-10-03T07:48:25.000Z | brainstorm/all.py | PyCN/brainstorm | 8f1fc886faf268b25085fa5c95bf106b1805d766 | [
"MIT"
] | 209 | 2015-10-25T20:22:06.000Z | 2021-07-23T00:00:39.000Z | #!/usr/bin/env python
# coding=utf-8
"""
Convenience namespace containing all relevant brainstorm objects and functions.
"""
from __future__ import division, print_function, unicode_literals
from brainstorm.data_iterators import *
from brainstorm.describable import create_from_description, get_description
from brainstorm.handlers import *
from brainstorm.hooks import *
from brainstorm.initializers import *
from brainstorm.layers import *
from brainstorm.randomness import global_rnd
from brainstorm.structure import Network, generate_architecture
from brainstorm.scorers import *
from brainstorm.tools import *
from brainstorm.training import *
from brainstorm.value_modifiers import *
| 34.55 | 79 | 0.837916 |
7957e59ae2ae56c3acf5d1d1eaec63c0a260530c | 403 | py | Python | integration/data/util.py | MalibuKoKo/longhorn-engine | b31b3f177ebaadb222f733073ca6015ad01ae3ef | [
"Apache-2.0"
] | null | null | null | integration/data/util.py | MalibuKoKo/longhorn-engine | b31b3f177ebaadb222f733073ca6015ad01ae3ef | [
"Apache-2.0"
] | null | null | null | integration/data/util.py | MalibuKoKo/longhorn-engine | b31b3f177ebaadb222f733073ca6015ad01ae3ef | [
"Apache-2.0"
] | null | null | null | import os
import hashlib
def file(f):
return os.path.join(_base(), '../../{}'.format(f))
def _base():
return os.path.dirname(__file__)
def read_file(file_path, offset, length):
assert os.path.exists(file_path)
f = open(file_path, 'r')
f.seek(offset)
data = f.read(length)
f.close()
return data
def checksum_data(data):
return hashlib.sha512(data).hexdigest()
| 16.791667 | 54 | 0.647643 |
7957e6dee1c7e20c491b7717c253307ccd8c663f | 1,653 | py | Python | util/canvas_api_extension.py | adgj-1/cs221bot | de6867b6eb97bc45148f6c8dd4f71ba553baf1d7 | [
"Unlicense"
] | null | null | null | util/canvas_api_extension.py | adgj-1/cs221bot | de6867b6eb97bc45148f6c8dd4f71ba553baf1d7 | [
"Unlicense"
] | null | null | null | util/canvas_api_extension.py | adgj-1/cs221bot | de6867b6eb97bc45148f6c8dd4f71ba553baf1d7 | [
"Unlicense"
] | null | null | null | from typing import List
from canvasapi.course import Course
from canvasapi.requester import Requester
from canvasapi.util import combine_kwargs, get_institution_url
def get_course_stream(course_id: int, base_url, access_token, **kwargs):
"""
Parameters
----------
course_id : `int`
Course id
base_url : `str`
Base URL of the Canvas instance's API
access_token : `str`
API key to authenticate requests with
Returns
-------
`dict`
JSON response for course activity stream
"""
access_token = access_token.strip()
base_url = get_institution_url(base_url)
requester = Requester(base_url, access_token)
response = requester.request(
"GET",
"courses/{}/activity_stream".format(course_id),
_kwargs=combine_kwargs(**kwargs)
)
return response.json()
def get_course_url(course_id: str, base_url) -> str:
"""
Parameters
----------
course_id : `str`
Course id
base_url : `str`
Base URL of the Canvas instance's API
Returns
-------
`str`
URL of course page
"""
base_url = get_institution_url(base_url)
return "{}/courses/{}".format(base_url, course_id)
def get_staff_ids(course: Course) -> List[int]:
"""
Parameters
----------
course : `Course`
The course to get staff IDs for
Returns
-------
`List[int]`
A list of the IDs of all professors and TAs in the given course.
"""
staff = course.get_users(enrollment_type=["teacher", "ta"])
staff_ids = list(map(lambda user: user.id, staff))
return staff_ids
| 22.04 | 72 | 0.62069 |
7957e71e5127b1f2c8d0aaece9157e181c25f1e6 | 1,450 | py | Python | bespin/task_finder.py | delfick/bespin | 4fa21875f0cdc32a70b33cdc90ce5196c0a2cbcd | [
"MIT"
] | 5 | 2017-04-05T00:46:41.000Z | 2017-11-09T01:21:44.000Z | bespin/task_finder.py | delfick/bespin | 4fa21875f0cdc32a70b33cdc90ce5196c0a2cbcd | [
"MIT"
] | 69 | 2016-10-11T04:40:09.000Z | 2022-01-12T23:57:27.000Z | bespin/task_finder.py | delfick/bespin | 4fa21875f0cdc32a70b33cdc90ce5196c0a2cbcd | [
"MIT"
] | 7 | 2016-10-11T04:32:21.000Z | 2017-12-18T05:59:17.000Z | """
Responsible for finding tasks in the configuration and executing them
"""
from bespin.actions import available_actions, default_actions
from bespin.option_spec.task_objs import Task
from bespin.errors import BadTask
class TaskFinder(object):
def __init__(self, collector):
self.tasks = {}
self.collector = collector
def stack_finder(self, task):
return getattr(self.tasks[task], "stack", self.collector.configuration['bespin'].chosen_stack)
def task_runner(self, task, **kwargs):
if task not in self.tasks:
raise BadTask("Unknown task", task=task, available=self.tasks.keys())
return self.tasks[task].run(self.collector, self.stack_finder(task), available_actions, self.tasks, **kwargs)
def default_tasks(self):
"""Return default tasks"""
return dict((name, Task(action=name, label="Bespin")) for name in default_actions)
def find_tasks(self, overrides):
"""Find the custom tasks and record the associated stack with each task"""
tasks = self.default_tasks()
configuration = self.collector.configuration
for stack in list(configuration["stacks"]):
path = configuration.path(["stacks", stack, "tasks"], joined="stacks.{0}.tasks".format(stack))
nxt = configuration.get(path, {})
tasks.update(nxt)
if overrides:
tasks.update(overrides)
self.tasks = tasks
| 35.365854 | 117 | 0.670345 |
7957e776bd689135b16f01ee8b510c69dc04336a | 10,980 | py | Python | dhl_delivery/pickup.py | ivangsm/dhl_delivery | aa7ae5251715350a1ffa3ecbc96c8ac233f5dd21 | [
"MIT"
] | null | null | null | dhl_delivery/pickup.py | ivangsm/dhl_delivery | aa7ae5251715350a1ffa3ecbc96c8ac233f5dd21 | [
"MIT"
] | null | null | null | dhl_delivery/pickup.py | ivangsm/dhl_delivery | aa7ae5251715350a1ffa3ecbc96c8ac233f5dd21 | [
"MIT"
] | null | null | null | import dhl_delivery
import dhl_delivery.config
import dhl_delivery.common
import dhl_delivery.dhl_process
import xmltodict
import datetime
import random
class Pickup:
def __init__(self):
self.date_now = datetime.datetime.now()
def schedule_pickup(self, dict_param):
return_status = True
return_message = ''
pickup_confirmation_number = ''
return_dict = {}
common_obj = dhl_delivery.common.Common()
# get the prepared XML data as string - this portion also check if xml wel formated - ie user sending correct data
xml_formated_data = self.pickup_xml(dict_param)
if xml_formated_data['status']:
xml_formated_data_string = xml_formated_data['data']
else:
return_dict.update(
{
'status': False,
'message': xml_formated_data['message'],
}
)
return return_dict
try:
url_response_data = {}
dhl_process_obj = dhl_delivery.dhl_process.DhlProcess()
returned_data = dhl_process_obj.process_pickup(
xml_formated_data_string)
if 'return_dhl_api_response' in returned_data.keys():
return_status = True
url_response_data = returned_data['return_dhl_api_response']
else:
return_status = False
return_message = dhl_delivery.config.message_max_response_time
return_dict.update(
{
'status': return_status,
'message': return_message,
}
)
return return_dict
except:
return_status = False
return_message = dhl_delivery.config.message_dhl_url_down
return_dict.update(
{
'status': return_status,
'message': return_message,
}
)
else:
# format the data to dictionary and return
dict_response = xmltodict.parse(url_response_data)
# get the action note returned by DHL and check if it is Success - if success then all are fine proceed
try:
action_note = common_obj.in_dictionary_all(
'ActionNote', dict_response)
try:
if action_note is False:
action_note = []
except Exception as e:
action_note = []
except Exception as e:
action_note = []
if 'Success' in action_note: # action_note is a list type
# get the confirmation number
pickup_confirmation_number = common_obj.in_dictionary_all(
'ConfirmationNumber', dict_response)
pickup_confirmation_number = ''.join(
map(str, pickup_confirmation_number))
return_status = True
return_message = dhl_delivery.config.message_pickup_success
else:
# some error by dhl which stopping to proceed
error_found = common_obj.in_dictionary_all(
'ConditionData', dict_response)
if error_found is not False:
# some error by dhl
return_status = False
return_message = error_found[0]
if dhl_delivery.dhl_response_flag:
return_dict.update({'dhl_response': dict_response})
if dhl_delivery.dhl_xml_flag:
return_dict.update(
{'request_xml_to_dhl': xml_formated_data_string})
return_dict.update(
{
'status': return_status,
'message': return_message,
'pickup_confirmation_number': str(pickup_confirmation_number),
}
)
return return_dict
def pickup_xml(self, dict_param):
try:
common_obj = dhl_delivery.common.Common()
addresses = dict_param['addresses'] # From and To Address
pieces = dict_param['pieces'] # Measurement Units
package = dict_param['package'] # delivery package
# pickup time and schedule
pickup_details = dict_param['pickup_details']
optional_data = dict_param['optional_data'] # Measurement Units
# as per DHL its must be between 28 to 32 char
message_reference = ''.join(
random.choice('0123456789') for i in range(28))
# pickup_date = datetime.datetime.now() # changable
# piece info
piece_details_info = '<Pieces>' + str(len(pieces)) + '</Pieces>'
piece_details_info += '<weight>'
piece_details_info += '<Weight>' + \
package['total_weight'] + '</Weight>'
piece_details_info += '<WeightUnit>' + \
package['weight_unit'] + '</WeightUnit>'
piece_details_info += '</weight>'
# reagon code based on country
region_code = common_obj.get_dhl_region_code(
addresses['from_country']) # AP|AM|EU
x_region_code_str = ''
xsd_region_code_str = ''
phone_extn_str = ''
requestor_company_name = ''
# place_details_str = '<DivisionName>' + addresses['from_state'] + '</DivisionName>' # State of the pickup place - Optional - Max 35 char
place_details_str = '<CountryCode>' + \
addresses['from_country'] + '</CountryCode>'
place_details_str += '<PostalCode>' + \
addresses['from_zipcode'] + '</PostalCode>'
region_code = addresses['from_region_code'] # AP|AM|EA
if region_code == 'AP':
x_region_code_str = region_code
xsd_region_code_str = ''
phone_extn_str = '<PhoneExtension></PhoneExtension>'
elif region_code == 'AM':
x_region_code_str = ''
xsd_region_code_str = ''
phone_extn_str = '<PhoneExtension></PhoneExtension>'
elif region_code == 'EA':
x_region_code_str = region_code
xsd_region_code_str = '_' + region_code
phone_extn_str = ''
piece_details_info = '' # only for EA
requestor_company_name = '<CompanyName>' + \
addresses['from_company_name'] + '</CompanyName>'
place_details_str = '<CountryCode>' + \
addresses['from_country'] + '</CountryCode>'
place_details_str += '<PostalCode>' + \
addresses['from_zipcode'] + '</PostalCode>'
# prepare the xml
xml_str = '<?xml version="1.0" encoding="UTF-8"?>'
xml_str += '<req:BookPickupRequest' + x_region_code_str + \
' xmlns:req="http://www.dhl.com" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" '
xml_str += 'xsi:schemaLocation="http://www.dhl.com ship-val-req' + \
xsd_region_code_str + '.xsd">'
# Request Service Header
xml_str += '<Request>'
xml_str += '<ServiceHeader>'
xml_str += '<MessageTime>' + \
self.date_now.strftime(
"%Y-%m-%dT%I:%M:%S+00:00") + '</MessageTime>'
xml_str += '<MessageReference>' + message_reference + '</MessageReference>'
xml_str += '<SiteID>' + dhl_delivery.dhl_site_id + '</SiteID>'
xml_str += '<Password>' + dhl_delivery.dhl_site_password + '</Password>'
xml_str += '</ServiceHeader>'
xml_str += '</Request>'
# Request Service Header ENDS
# Requestor -- ie the sender - ie from where package will be collected - ie the from address
xml_str += '<Requestor>'
# C (Credit Card) | D (DHL account).
xml_str += '<AccountType>D</AccountType>'
xml_str += '<AccountNumber>' + dhl_delivery.dhl_account_no + '</AccountNumber>' # ??
xml_str += '<RequestorContact>'
xml_str += '<PersonName>' + \
addresses['from_name'] + '</PersonName>'
xml_str += '<Phone>' + addresses['from_phone_no'] + '</Phone>'
xml_str += phone_extn_str
xml_str += '</RequestorContact>'
xml_str += requestor_company_name
xml_str += '</Requestor>'
# Place
xml_str += '<Place>'
# B (Business), R (Residence) C (Business/Residence)
xml_str += '<LocationType>' + \
addresses['from_location_type'] + '</LocationType>'
xml_str += '<CompanyName>' + \
addresses['from_company_name'] + '</CompanyName>'
xml_str += '<Address1>' + \
addresses['from_address_line_one'] + '</Address1>'
xml_str += '<Address2>' + \
addresses['from_address_line_two'] + '</Address2>'
# Package Location in the pickup place. E.g. Front Desk
xml_str += '<PackageLocation>' + \
addresses['from_package_location'] + '</PackageLocation>'
xml_str += '<City>' + addresses['from_city'] + '</City>'
xml_str += place_details_str
xml_str += '</Place>'
# Billing ENDS
# Pickup
xml_str += '<Pickup>'
xml_str += '<PickupDate>' + \
pickup_details['pickup_date'] + \
'</PickupDate>' # YYYY-MM-DD format
xml_str += '<ReadyByTime>' + \
pickup_details['ready_by_time'] + \
'</ReadyByTime>' # hh:mm ie 14:35
xml_str += '<CloseTime>' + \
pickup_details['close_time'] + '</CloseTime>' # hh:mm ie 15:35
xml_str += piece_details_info
xml_str += '</Pickup>'
# Pickup ENDS
# Pickup Contact
xml_str += '<PickupContact>'
xml_str += '<PersonName>' + \
addresses['from_name'] + '</PersonName>'
xml_str += '<Phone>' + addresses['from_phone_no'] + '</Phone>'
xml_str += phone_extn_str
xml_str += '</PickupContact>'
# Pickup Contact ENDS
xml_str += '</req:BookPickupRequest' + x_region_code_str + '>'
return_dict = {
'status': True,
'message': '',
'data': xml_str,
}
return return_dict
except:
return_dict = {
'status': False,
'message': dhl_delivery.config.message_request_data_bad_format,
'data': '',
}
return return_dict
| 42.068966 | 150 | 0.528506 |
7957e777486bcb995d1c62c383c546ba04ce3643 | 227 | py | Python | feder/letters/logs/apps.py | dzemeuksis/feder | 32ef7793af6256d4ecada61505c7baf334b34419 | [
"MIT"
] | 16 | 2015-08-11T17:20:26.000Z | 2022-02-11T20:15:41.000Z | feder/letters/logs/apps.py | dzemeuksis/feder | 32ef7793af6256d4ecada61505c7baf334b34419 | [
"MIT"
] | 534 | 2015-08-04T00:10:54.000Z | 2022-03-17T10:44:47.000Z | feder/letters/logs/apps.py | dzemeuksis/feder | 32ef7793af6256d4ecada61505c7baf334b34419 | [
"MIT"
] | 10 | 2017-08-30T13:34:32.000Z | 2022-02-18T13:00:35.000Z | from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class LogsConfig(AppConfig):
name = "feder.letters.logs"
verbose_name = _("Logs of letter")
def ready(self):
pass
| 20.636364 | 55 | 0.713656 |
7957e84148b391409ec52964b9df28d896ff0582 | 29 | py | Python | Server/app/models/MongoDB/__init__.py | Jaws-bar/Entry3.0-InterviewSystem | 15385f9982c0c4e9aed970263b7ea1e50d6163ca | [
"MIT"
] | null | null | null | Server/app/models/MongoDB/__init__.py | Jaws-bar/Entry3.0-InterviewSystem | 15385f9982c0c4e9aed970263b7ea1e50d6163ca | [
"MIT"
] | null | null | null | Server/app/models/MongoDB/__init__.py | Jaws-bar/Entry3.0-InterviewSystem | 15385f9982c0c4e9aed970263b7ea1e50d6163ca | [
"MIT"
] | null | null | null | from .interview_data import * | 29 | 29 | 0.827586 |
7957e880de5bf42cfa98c523d38828ff2a8f80b0 | 15,338 | py | Python | shap/_explanation.py | regev-lab/shap | 3e822194c83c327320f70e9a836294f768ebf468 | [
"MIT"
] | 1 | 2020-08-31T02:46:13.000Z | 2020-08-31T02:46:13.000Z | shap/_explanation.py | WisePanda007/shap | 13c6141ebe459f34c0b69293135124eca1211fa1 | [
"MIT"
] | null | null | null | shap/_explanation.py | WisePanda007/shap | 13c6141ebe459f34c0b69293135124eca1211fa1 | [
"MIT"
] | 2 | 2020-12-06T22:39:08.000Z | 2021-12-25T01:10:16.000Z |
import pandas as pd
import numpy as np
import scipy as sp
import sys
import warnings
import copy
from slicer import Slicer, Alias
# from ._order import Order
from .utils._general import OpChain
# slicer confuses pylint...
# pylint: disable=no-member
op_chain_root = OpChain("shap.Explanation")
class MetaExplanation(type):
""" This metaclass exposes the Explanation object's methods for creating template op chains.
"""
def __getitem__(cls, item):
return op_chain_root.__getitem__(item)
@property
def abs(cls):
return op_chain_root.abs
@property
def argsort(cls):
return op_chain_root.argsort
@property
def sum(cls):
return op_chain_root.sum
@property
def max(cls):
return op_chain_root.max
@property
def min(cls):
return op_chain_root.min
@property
def mean(cls):
return op_chain_root.mean
@property
def sample(cls):
return op_chain_root.sample
@property
def hclust(cls):
return op_chain_root.hclust
class Explanation(object, metaclass=MetaExplanation):
""" This is currently an experimental feature don't depend on this object yet! :)
"""
def __init__(
self,
values,
base_values = None,
data = None,
display_data = None,
instance_names = None,
feature_names = None,
output_names = None,
output_indexes = None,
lower_bounds = None,
upper_bounds = None,
main_effects = None,
hierarchical_values = None,
clustering = None
):
self.transform_history = []
# cloning. TODO: better cloning :)
if issubclass(type(values), Explanation):
e = values
values = e.values
base_values = e.base_values
data = e.data
output_dims = compute_output_dims(values, base_values, data)
if len(_compute_shape(feature_names)) == 1: # TODO: should always be an alias once slicer supports per-row aliases
values_shape = _compute_shape(values)
if len(values_shape) >= 1 and len(feature_names) == values_shape[0]:
feature_names = Alias(feature_names, 0)
elif len(values_shape) >= 2 and len(feature_names) == values_shape[1]:
feature_names = Alias(feature_names, 1)
self._s = Slicer(
values = values,
base_values = base_values,
data = data,
display_data = display_data,
instance_names = None if instance_names is None else Alias(instance_names, 0),
feature_names = feature_names,
output_names = None if output_names is None else Alias(output_names, output_dims),
output_indexes = None if output_indexes is None else (output_dims, output_indexes),
lower_bounds = lower_bounds,
upper_bounds = lower_bounds,
main_effects = main_effects,
hierarchical_values = hierarchical_values, #Obj(hierarchical_values, (0,None)),
clustering = clustering
)
@property
def shape(self):
return _compute_shape(self._s.values)
@property
def values(self):
return self._s.values
@values.setter
def values(self, new_values):
self._s.values = new_values
@property
def base_values(self):
return self._s.base_values
@base_values.setter
def base_values(self, new_base_values):
self._s.base_values = new_base_values
@property
def data(self):
return self._s.data
@data.setter
def data(self, new_data):
self._s.data = new_data
@property
def display_data(self):
return self._s.display_data
@display_data.setter
def display_data(self, new_display_data):
self._s.display_data = new_display_data
@property
def instance_names(self):
return self._s.instance_names
@property
def output_names(self):
return self._s.output_names
@property
def output_indexes(self):
return self._s.output_indexes
@property
def feature_names(self):
return self._s.feature_names
@feature_names.setter
def feature_names(self, new_feature_names):
self._s.feature_names = new_feature_names
@property
def lower_bounds(self):
return self._s.lower_bounds
@property
def upper_bounds(self):
return self._s.upper_bounds
@property
def main_effects(self):
return self._s.main_effects
@main_effects.setter
def main_effects(self, new_main_effects):
self._s.main_effects = new_main_effects
@property
def hierarchical_values(self):
return self._s.hierarchical_values
@hierarchical_values.setter
def hierarchical_values(self, new_hierarchical_values):
self._s.hierarchical_values = new_hierarchical_values
@property
def clustering(self):
return self._s.clustering
@clustering.setter
def clustering(self, new_clustering):
self._s.clustering = new_clustering
def __repr__(self):
out = ".values =\n"+self.values.__repr__()
if self.base_values is not None:
out += "\n\n.base_values =\n"+self.base_values.__repr__()
if self.data is not None:
out += "\n\n.data =\n"+self.data.__repr__()
return out
def __getitem__(self, item):
""" This adds support for magic string indexes like "rank(0)".
"""
if not isinstance(item, tuple):
item = (item,)
# convert any OpChains or magic strings
for i,t in enumerate(item):
orig_t = t
if issubclass(type(t), OpChain):
t = t.apply(self)
if issubclass(type(t), (np.int64, np.int32)): # because slicer does not like numpy indexes
t = int(t)
elif issubclass(type(t), np.ndarray):
t = [int(v) for v in t] # slicer wants lists not numpy arrays for indexing
elif issubclass(type(t), Explanation):
t = t.values
elif type(t) is str:
if is_1d(self.feature_names):
ind = np.where(np.array(self.feature_names) == t)[0][0]
t = int(ind)
else:
new_values = []
new_data = []
for i in range(len(self.values)):
for s,v,d in zip(self.feature_names[i], self.values[i], self.data[i]):
if s == t:
new_values.append(v)
new_data.append(d)
new_self = copy.deepcopy(self)
new_self.values = new_values
new_self.data = new_data
new_self.feature_names = t
new_self.clustering = None
return new_self
if issubclass(type(t), np.ndarray):
t = [int(j) for j in t]
elif issubclass(type(t), (np.int8, np.int16, np.int32, np.int64)):
t = int(t)
if t is not orig_t:
tmp = list(item)
tmp[i] = t
item = tuple(tmp)
# call slicer for the real work
new_self = copy.copy(self)
new_self.transform_history.append(("__getitem__", (item,)))
new_self._s = self._s.__getitem__(item)
return new_self
def __len__(self):
return self.shape[0]
def __copy__(self):
return Explanation(
self.values,
self.base_values,
self.data,
self.display_data,
self.instance_names,
self.feature_names,
self.output_names,
self.output_indexes,
self.lower_bounds,
self.upper_bounds,
self.main_effects,
self.hierarchical_values,
self.clustering
)
@property
def abs(self):
new_self = copy.copy(self)
new_self.values = np.abs(new_self.values)
new_self.transform_history.append(("abs", None))
return new_self
def _numpy_func(self, fname, **kwargs):
new_self = copy.copy(self)
axis = kwargs.get("axis", None)
# collapse the slicer to right shape
if axis == 0:
new_self = new_self[0]
elif axis == 1:
new_self = new_self[1]
elif axis == 2:
new_self = new_self[2]
if self.feature_names is not None and not is_1d(self.feature_names) and axis == 0:
new_values = self._flatten_feature_names()
new_self.feature_names = np.array(list(new_values.keys()))
new_self.values = np.array([getattr(np, fname)(v) for v in new_values.values()])
new_self.clustering = None
else:
new_self.values = getattr(np, fname)(np.array(self.values), **kwargs)
if new_self.data is not None:
try:
new_self.data = getattr(np, fname)(np.array(self.data), **kwargs)
except:
new_self.data = None
if new_self.base_values is not None and issubclass(type(axis), int) and len(self.base_values.shape) > axis:
new_self.base_values = getattr(np, fname)(self.base_values, **kwargs)
elif issubclass(type(axis), int):
new_self.base_values = None
if axis == 0 and self.clustering is not None and len(self.clustering.shape) == 3:
if self.clustering.std(0).sum() < 1e-8:
new_self.clustering = self.clustering[0]
else:
new_self.clustering = None
new_self.transform_history.append((fname, kwargs))
return new_self
def mean(self, axis):
return self._numpy_func("mean", axis=axis)
def max(self, axis):
return self._numpy_func("max", axis=axis)
def min(self, axis):
return self._numpy_func("min", axis=axis)
def sum(self, axis):
return self._numpy_func("sum", axis=axis)
@property
def abs(self):
return self._numpy_func("abs")
@property
def argsort(self):
return self._numpy_func("argsort")
@property
def flip(self):
return self._numpy_func("flip")
def hclust(self, metric="sqeuclidean", axis=0):
""" Computes an optimal leaf ordering sort order using hclustering.
hclust(metric="sqeuclidean")
Parameters
----------
metric : string
A metric supported by scipy clustering.
axis : int
The axis to cluster along.
"""
values = self.values
if len(values.shape) != 2:
raise Exception("The hclust order only supports 2D arrays right now!")
if axis == 1:
values = values.T
# compute a hierarchical clustering and return the optimal leaf ordering
D = sp.spatial.distance.pdist(values, metric)
cluster_matrix = sp.cluster.hierarchy.complete(D)
inds = sp.cluster.hierarchy.leaves_list(sp.cluster.hierarchy.optimal_leaf_ordering(cluster_matrix, D))
return inds
def sample(self, max_samples, replace=False, random_state=0):
""" Randomly samples the instances (rows) of the Explanation object.
Parameters
----------
max_samples : int
The number of rows to sample. Note that if replace=False then less than
fewer than max_samples will be drawn if explanation.shape[0] < max_samples.
replace : bool
Sample with or without replacement.
"""
prev_seed = np.random.seed(random_state)
inds = np.random.choice(self.shape[0], min(max_samples, self.shape[0]), replace=replace)
np.random.seed(prev_seed)
return self[list(inds)]
def _flatten_feature_names(self):
new_values = {}
for i in range(len(self.values)):
for s,v in zip(self.feature_names[i], self.values[i]):
if s not in new_values:
new_values[s] = []
new_values[s].append(v)
return new_values
def _use_data_as_feature_names(self):
new_values = {}
for i in range(len(self.values)):
for s,v in zip(self.data[i], self.values[i]):
if s not in new_values:
new_values[s] = []
new_values[s].append(v)
return new_values
def percentile(self, q, axis=None):
new_self = copy.deepcopy(self)
if self.feature_names is not None and not is_1d(self.feature_names) and axis == 0:
new_values = self._flatten_feature_names()
new_self.feature_names = np.array(list(new_values.keys()))
new_self.values = np.array([np.percentile(v, q) for v in new_values.values()])
new_self.clustering = None
else:
new_self.values = np.percentile(new_self.values, q, axis)
new_self.data = np.percentile(new_self.data, q, axis)
#new_self.data = None
new_self.transform_history.append(("percentile", (axis,)))
return new_self
def compute_output_dims(values, base_values, data):
values_shape = _compute_shape(values)
# input shape matches the data shape
if data is not None:
data_shape = _compute_shape(data)
# if we are not given any data we assume it would be the same shape as the given values
else:
data_shape = values_shape
# output shape is known from the base values
if base_values is not None:
output_shape = _compute_shape(base_values)[1:]
else:
output_shape = tuple()
interaction_order = len(values_shape) - len(data_shape) - len(output_shape)
values_dims = list(range(len(values_shape)))
output_dims = range(len(data_shape) + interaction_order, values_dims[-1])
return tuple(output_dims)
def is_1d(val):
return not (isinstance(val[0], list) or isinstance(val[0], np.ndarray))
class Op():
pass
class Percentile(Op):
def __init__(self, percentile):
self.percentile = percentile
def add_repr(self, s, verbose=False):
return "percentile("+s+", "+str(self.percentile)+")"
def _compute_shape(x):
if not hasattr(x, "__len__"):
return tuple()
elif len(x) > 0 and isinstance(x[0], str):
return (None,)
else:
if type(x) == dict:
return (len(x),) + _compute_shape(x[next(iter(x))])
# 2D arrays we just take their shape as-is
if len(getattr(x, "shape", tuple())) > 1:
return x.shape
# 1D arrays we need to look inside
if len(x) == 0:
return (0,)
elif len(x) == 1:
return (len(x),) + _compute_shape(x[0])
else:
first_shape = _compute_shape(x[0])
for i in range(1,len(x)):
shape = _compute_shape(x[i])
if shape != first_shape:
return (len(x), None)
return (len(x),) + first_shape | 31.954167 | 122 | 0.5858 |
7957e95ff85ad978cca910a65d00a3494cc9567b | 1,897 | py | Python | example/example_python_api_extended.py | maxipi/kostra | c3b58518b04a9d5b804cbd17cc67dcedf9970fb1 | [
"MIT"
] | 20 | 2018-11-09T17:35:12.000Z | 2022-03-30T18:27:03.000Z | example/example_python_api_extended.py | maxipi/kostra | c3b58518b04a9d5b804cbd17cc67dcedf9970fb1 | [
"MIT"
] | 3 | 2021-03-07T13:09:00.000Z | 2021-10-15T13:26:48.000Z | example/example_python_api_extended.py | maxipi/kostra | c3b58518b04a9d5b804cbd17cc67dcedf9970fb1 | [
"MIT"
] | 8 | 2018-11-13T07:14:13.000Z | 2021-12-03T06:48:20.000Z | from idf_analysis import IntensityDurationFrequencyAnalyse
from idf_analysis.definitions import *
import pandas as pd
from os import path
# sub-folder for the results
output_directory = path.join('ehyd_112086_idf_data')
# initialize of the analysis class
idf = IntensityDurationFrequencyAnalyse(series_kind=SERIES.PARTIAL, worksheet=METHOD.KOSTRA, extended_durations=True)
# reading the pandas series of the precipitation (data from ehyd.gv.at - ID=112086)
series = pd.read_parquet('ehyd_112086.parquet')['precipitation']
# setting the series for the analysis
idf.set_series(series)
# auto-save the calculated parameter so save time for a later use
idf.auto_save_parameters(path.join(output_directory, 'idf_parameters.yaml'))
# --------
# idf.write_return_periods_frame(path.join(output_directory, 'idf_return_periods_frame.parq'))
# exit()
# idf.auto_save_return_periods_frame(path.join(output_directory, 'idf_return_periods_frame.parq'))
# --------
# events = idf.rain_events
# idf.add_max_return_periods_to_events(events)
# idf.write_rain_events(path.join(output_directory, 'events.csv'), sep=',', decimal='.')
# exit()
# idf.auto_save_rain_events(path.join(output_directory, 'events.csv'), sep=',', decimal='.')
# --------
# idf.event_report(path.join(output_directory, 'idf_event_analysis.pdf'), min_event_rain_sum=60, min_return_period=0.5, durations=None)
# --------
# idf.event_return_period_report(path.join(output_directory, 'idf_event_return_period_analysis.pdf'))
# --------
e = idf.rain_events
e = e[e[COL.LP] > 10]
# idf.add_max_return_periods_to_events(idf.rain_events)
# e = e[e[COL.MAX_PERIOD] > 2]
event = idf.rain_events.loc[125]
fig, caption = idf.event_plot(event, durations=idf.duration_steps[:11])
fig.tight_layout()
fig.show()
fig, caption = idf.event_plot(event)
fig.tight_layout()
fig.show()
rpf = idf.return_periods_frame[event[COL.START]:event[COL.END]] | 33.280702 | 135 | 0.768055 |
7957ea89b762763566139edfbf0a75401dc4e268 | 11,487 | py | Python | mmcv/utils/misc.py | BIGWangYuDong/mmcv | c46deb0576edaff5cd5a7d384c617478c7a73a70 | [
"Apache-2.0"
] | 1 | 2022-03-18T02:41:11.000Z | 2022-03-18T02:41:11.000Z | mmcv/utils/misc.py | BIGWangYuDong/mmcv | c46deb0576edaff5cd5a7d384c617478c7a73a70 | [
"Apache-2.0"
] | 1 | 2022-01-23T13:28:49.000Z | 2022-01-23T13:28:49.000Z | mmcv/utils/misc.py | BIGWangYuDong/mmcv | c46deb0576edaff5cd5a7d384c617478c7a73a70 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) OpenMMLab. All rights reserved.
import collections.abc
import functools
import itertools
import subprocess
import warnings
from collections import abc
from importlib import import_module
from inspect import getfullargspec
from itertools import repeat
# From PyTorch internals
def _ntuple(n):
def parse(x):
if isinstance(x, collections.abc.Iterable):
return x
return tuple(repeat(x, n))
return parse
to_1tuple = _ntuple(1)
to_2tuple = _ntuple(2)
to_3tuple = _ntuple(3)
to_4tuple = _ntuple(4)
to_ntuple = _ntuple
def is_str(x):
"""Whether the input is an string instance.
Note: This method is deprecated since python 2 is no longer supported.
"""
return isinstance(x, str)
def import_modules_from_strings(imports, allow_failed_imports=False):
"""Import modules from the given list of strings.
Args:
imports (list | str | None): The given module names to be imported.
allow_failed_imports (bool): If True, the failed imports will return
None. Otherwise, an ImportError is raise. Default: False.
Returns:
list[module] | module | None: The imported modules.
Examples:
>>> osp, sys = import_modules_from_strings(
... ['os.path', 'sys'])
>>> import os.path as osp_
>>> import sys as sys_
>>> assert osp == osp_
>>> assert sys == sys_
"""
if not imports:
return
single_import = False
if isinstance(imports, str):
single_import = True
imports = [imports]
if not isinstance(imports, list):
raise TypeError(
f'custom_imports must be a list but got type {type(imports)}')
imported = []
for imp in imports:
if not isinstance(imp, str):
raise TypeError(
f'{imp} is of type {type(imp)} and cannot be imported.')
try:
imported_tmp = import_module(imp)
except ImportError:
if allow_failed_imports:
warnings.warn(f'{imp} failed to import and is ignored.',
UserWarning)
imported_tmp = None
else:
raise ImportError
imported.append(imported_tmp)
if single_import:
imported = imported[0]
return imported
def iter_cast(inputs, dst_type, return_type=None):
"""Cast elements of an iterable object into some type.
Args:
inputs (Iterable): The input object.
dst_type (type): Destination type.
return_type (type, optional): If specified, the output object will be
converted to this type, otherwise an iterator.
Returns:
iterator or specified type: The converted object.
"""
if not isinstance(inputs, abc.Iterable):
raise TypeError('inputs must be an iterable object')
if not isinstance(dst_type, type):
raise TypeError('"dst_type" must be a valid type')
out_iterable = map(dst_type, inputs)
if return_type is None:
return out_iterable
else:
return return_type(out_iterable)
def list_cast(inputs, dst_type):
"""Cast elements of an iterable object into a list of some type.
A partial method of :func:`iter_cast`.
"""
return iter_cast(inputs, dst_type, return_type=list)
def tuple_cast(inputs, dst_type):
"""Cast elements of an iterable object into a tuple of some type.
A partial method of :func:`iter_cast`.
"""
return iter_cast(inputs, dst_type, return_type=tuple)
def is_seq_of(seq, expected_type, seq_type=None):
"""Check whether it is a sequence of some type.
Args:
seq (Sequence): The sequence to be checked.
expected_type (type): Expected type of sequence items.
seq_type (type, optional): Expected sequence type.
Returns:
bool: Whether the sequence is valid.
"""
if seq_type is None:
exp_seq_type = abc.Sequence
else:
assert isinstance(seq_type, type)
exp_seq_type = seq_type
if not isinstance(seq, exp_seq_type):
return False
for item in seq:
if not isinstance(item, expected_type):
return False
return True
def is_list_of(seq, expected_type):
"""Check whether it is a list of some type.
A partial method of :func:`is_seq_of`.
"""
return is_seq_of(seq, expected_type, seq_type=list)
def is_tuple_of(seq, expected_type):
"""Check whether it is a tuple of some type.
A partial method of :func:`is_seq_of`.
"""
return is_seq_of(seq, expected_type, seq_type=tuple)
def slice_list(in_list, lens):
"""Slice a list into several sub lists by a list of given length.
Args:
in_list (list): The list to be sliced.
lens(int or list): The expected length of each out list.
Returns:
list: A list of sliced list.
"""
if isinstance(lens, int):
assert len(in_list) % lens == 0
lens = [lens] * int(len(in_list) / lens)
if not isinstance(lens, list):
raise TypeError('"indices" must be an integer or a list of integers')
elif sum(lens) != len(in_list):
raise ValueError('sum of lens and list length does not '
f'match: {sum(lens)} != {len(in_list)}')
out_list = []
idx = 0
for i in range(len(lens)):
out_list.append(in_list[idx:idx + lens[i]])
idx += lens[i]
return out_list
def concat_list(in_list):
"""Concatenate a list of list into a single list.
Args:
in_list (list): The list of list to be merged.
Returns:
list: The concatenated flat list.
"""
return list(itertools.chain(*in_list))
def check_prerequisites(
prerequisites,
checker,
msg_tmpl='Prerequisites "{}" are required in method "{}" but not '
'found, please install them first.'): # yapf: disable
"""A decorator factory to check if prerequisites are satisfied.
Args:
prerequisites (str of list[str]): Prerequisites to be checked.
checker (callable): The checker method that returns True if a
prerequisite is meet, False otherwise.
msg_tmpl (str): The message template with two variables.
Returns:
decorator: A specific decorator.
"""
def wrap(func):
@functools.wraps(func)
def wrapped_func(*args, **kwargs):
requirements = [prerequisites] if isinstance(
prerequisites, str) else prerequisites
missing = []
for item in requirements:
if not checker(item):
missing.append(item)
if missing:
print(msg_tmpl.format(', '.join(missing), func.__name__))
raise RuntimeError('Prerequisites not meet.')
else:
return func(*args, **kwargs)
return wrapped_func
return wrap
def _check_py_package(package):
try:
import_module(package)
except ImportError:
return False
else:
return True
def _check_executable(cmd):
if subprocess.call(f'which {cmd}', shell=True) != 0:
return False
else:
return True
def requires_package(prerequisites):
"""A decorator to check if some python packages are installed.
Example:
>>> @requires_package('numpy')
>>> func(arg1, args):
>>> return numpy.zeros(1)
array([0.])
>>> @requires_package(['numpy', 'non_package'])
>>> func(arg1, args):
>>> return numpy.zeros(1)
ImportError
"""
return check_prerequisites(prerequisites, checker=_check_py_package)
def requires_executable(prerequisites):
"""A decorator to check if some executable files are installed.
Example:
>>> @requires_executable('ffmpeg')
>>> func(arg1, args):
>>> print(1)
1
"""
return check_prerequisites(prerequisites, checker=_check_executable)
def deprecated_api_warning(name_dict, cls_name=None):
"""A decorator to check if some arguments are deprecate and try to replace
deprecate src_arg_name to dst_arg_name.
Args:
name_dict(dict):
key (str): Deprecate argument names.
val (str): Expected argument names.
Returns:
func: New function.
"""
def api_warning_wrapper(old_func):
@functools.wraps(old_func)
def new_func(*args, **kwargs):
# get the arg spec of the decorated method
args_info = getfullargspec(old_func)
# get name of the function
func_name = old_func.__name__
if cls_name is not None:
func_name = f'{cls_name}.{func_name}'
if args:
arg_names = args_info.args[:len(args)]
for src_arg_name, dst_arg_name in name_dict.items():
if src_arg_name in arg_names:
warnings.warn(
f'"{src_arg_name}" is deprecated in '
f'`{func_name}`, please use "{dst_arg_name}" '
'instead', DeprecationWarning)
arg_names[arg_names.index(src_arg_name)] = dst_arg_name
if kwargs:
for src_arg_name, dst_arg_name in name_dict.items():
if src_arg_name in kwargs:
assert dst_arg_name not in kwargs, (
f'The expected behavior is to replace '
f'the deprecated key `{src_arg_name}` to '
f'new key `{dst_arg_name}`, but got them '
f'in the arguments at the same time, which '
f'is confusing. `{src_arg_name} will be '
f'deprecated in the future, please '
f'use `{dst_arg_name}` instead.')
warnings.warn(
f'"{src_arg_name}" is deprecated in '
f'`{func_name}`, please use "{dst_arg_name}" '
'instead', DeprecationWarning)
kwargs[dst_arg_name] = kwargs.pop(src_arg_name)
# apply converted arguments to the decorated method
output = old_func(*args, **kwargs)
return output
return new_func
return api_warning_wrapper
def is_method_overridden(method, base_class, derived_class):
"""Check if a method of base class is overridden in derived class.
Args:
method (str): the method name to check.
base_class (type): the class of the base class.
derived_class (type | Any): the class or instance of the derived class.
"""
assert isinstance(base_class, type), \
"base_class doesn't accept instance, Please pass class instead."
if not isinstance(derived_class, type):
derived_class = derived_class.__class__
base_method = getattr(base_class, method)
derived_method = getattr(derived_class, method)
return derived_method != base_method
def has_method(obj: object, method: str) -> bool:
"""Check whether the object has a method.
Args:
method (str): The method name to check.
obj (object): The object to check.
Returns:
bool: True if the object has the method else False.
"""
return hasattr(obj, method) and callable(getattr(obj, method))
| 30.388889 | 79 | 0.60538 |
7957eb4f7134f636027b9ff69b04071e4507f9dc | 612 | py | Python | scripts/inspect_docker_network.py | giavac/docker-scripts | a8ec3e3bce63593f3393bb34610940c73f9c6386 | [
"MIT"
] | 3 | 2016-02-06T12:45:21.000Z | 2016-08-24T16:01:55.000Z | scripts/inspect_docker_network.py | giavac/docker-scripts | a8ec3e3bce63593f3393bb34610940c73f9c6386 | [
"MIT"
] | null | null | null | scripts/inspect_docker_network.py | giavac/docker-scripts | a8ec3e3bce63593f3393bb34610940c73f9c6386 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# Get the output of 'docker network inspect' for a given network
# and return containers names and their IP address
import json
import subprocess
import sys
network_name = sys.argv[1]
network_json = subprocess.check_output(["docker", "network", "inspect", network_name])
network = json.loads(network_json)
containers = network[0]['Containers']
for container_id in containers:
container_name = subprocess.check_output(["docker", "inspect", "--format", "'{{ .Name }}'", container_id])
print container_id + " " + container_name.strip() + ": " + containers[container_id]['IPv4Address']
| 34 | 110 | 0.730392 |
7957ec2e9a2f4dbb69131b0e7e49c4edfaf32c42 | 855 | py | Python | cog_translator/scripts/cli.py | vincentsarago/cog-translator | 46499244b81bb5df044e90c47cceb4a5daeeecaa | [
"BSD-2-Clause"
] | 5 | 2018-11-04T14:18:01.000Z | 2019-11-14T08:05:10.000Z | cog_translator/scripts/cli.py | vincentsarago/cog-translator | 46499244b81bb5df044e90c47cceb4a5daeeecaa | [
"BSD-2-Clause"
] | 1 | 2018-11-02T15:22:11.000Z | 2018-11-02T15:22:11.000Z | cog_translator/scripts/cli.py | vincentsarago/cog-translator | 46499244b81bb5df044e90c47cceb4a5daeeecaa | [
"BSD-2-Clause"
] | 1 | 2021-01-30T14:40:21.000Z | 2021-01-30T14:40:21.000Z | """Worker."""
import os
import json
import sys
import logging
from cog_translator import process
logger = logging.getLogger("cog_translator")
logger.setLevel(logging.INFO)
def main():
"""Load message and start process."""
try:
message = json.loads(os.environ["Message"])
logger.info(message["Message"])
message = json.loads(message["Message"])
url = message["url"]
bucket = message["bucket"]
key = message["key"]
options = {}
if message.get("profile"):
options["profile"] = message["profile"]
if message.get("bidx"):
options["bidx"] = message["bidx"]
process(url, bucket, key, **options)
except Exception as err:
logger.error(err)
sys.exit(3)
else:
sys.exit(0)
if __name__ == "__main__":
main()
| 19.883721 | 51 | 0.583626 |
7957ecb5bc5fbcd7568be172a943dccb826568e9 | 3,696 | py | Python | Validation/EcalRecHits/test/Pion_Pt60GeV_all_cfg.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | Validation/EcalRecHits/test/Pion_Pt60GeV_all_cfg.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | Validation/EcalRecHits/test/Pion_Pt60GeV_all_cfg.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
process = cms.Process("EcalHitsValid")
# initialize MessageLogger
process.load("FWCore.MessageLogger.MessageLogger_cfi")
# initialize magnetic field
process.load("Configuration.StandardSequences.MagneticField_cff")
# geometry (Only Ecal)
process.load("Geometry.EcalCommonData.EcalOnly_cfi")
process.load("Geometry.CaloEventSetup.CaloGeometry_cff")
process.load("Geometry.CaloEventSetup.EcalTrigTowerConstituents_cfi")
process.load("Geometry.EcalMapping.EcalMapping_cfi")
process.load("Geometry.EcalMapping.EcalMappingRecord_cfi")
# DQM services
process.load("DQMServices.Core.DQM_cfg")
process.load("SimGeneral.HepPDTESSource.pythiapdt_cfi")
# run simulation, with EcalHits Validation specific watcher
process.load("SimG4Core.Application.g4SimHits_cfi")
# ECAL hits validation sequence
process.load("Validation.EcalHits.ecalSimHitsValidationSequence_cff")
# Mixing Module
process.load("SimGeneral.MixingModule.mixNoPU_cfi")
process.load("CalibCalorimetry.Configuration.Ecal_FakeConditions_cff")
# ECAL digitization sequence
process.load("SimCalorimetry.Configuration.ecalDigiSequence_cff")
# ECAL digis validation sequence
process.load("Validation.EcalDigis.ecalDigisValidationSequence_cff")
# ECAL LocalReco sequence
process.load("RecoLocalCalo.EcalRecProducers.ecalLocalRecoSequence_cff")
# ECAL rechits validation sequence
process.load("Validation.EcalRecHits.ecalRecHitsValidationSequence_cff")
# End of process
process.load("Configuration.StandardSequences.EndOfProcess_cff")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(2000)
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:Pion_Pt60GeV_all.root')
)
process.USER = cms.OutputModule("PoolOutputModule",
outputCommands = cms.untracked.vstring('keep *',
'drop *_simEcalUnsuppressedDigis_*_*',
'drop *_simEcalDigis_*_*',
'drop *_simEcalPreshowerDigis_*_*',
'drop *_ecalRecHit_*_*',
'drop *_ecalPreshowerRecHit_*_*'),
fileName = cms.untracked.string('Pion_Pt60GeV_all_EcalValidation.root')
)
process.RandomNumberGeneratorService = cms.Service("RandomNumberGeneratorService",
moduleSeeds = cms.PSet(
g4SimHits = cms.untracked.uint32(9876),
simEcalUnsuppressedDigis = cms.untracked.uint32(12345)
)
)
process.randomEngineStateProducer = cms.EDProducer("RandomEngineStateProducer")
process.Timing = cms.Service("Timing")
process.SimpleMemoryCheck = cms.Service("SimpleMemoryCheck")
process.simhits = cms.Sequence(process.g4SimHits*process.ecalSimHitsValidationSequence)
process.digis = cms.Sequence(process.mix*process.ecalDigiSequence*process.ecalDigisValidationSequence)
process.rechits = cms.Sequence(process.ecalLocalRecoSequence*process.ecalRecHitsValidationSequence)
process.p1 = cms.Path(process.simhits)
process.p2 = cms.Path(process.digis)
process.p3 = cms.Path(process.rechits)
process.p4 = cms.Path(process.randomEngineStateProducer)
process.p5 = cms.Path(process.endOfProcess)
process.outpath = cms.EndPath(process.USER)
process.schedule = cms.Schedule(process.p1,process.p2,process.p3,process.p4,process.p5,process.outpath)
process.DQM.collectorHost = ''
process.g4SimHits.Generator.HepMCProductLabel = 'source'
process.g4SimHits.Watchers = cms.VPSet(cms.PSet(
instanceLabel = cms.untracked.string('EcalValidInfo'),
type = cms.string('EcalSimHitsValidProducer'),
verbose = cms.untracked.bool(False)
))
process.ecalUncalibRecHit.EBdigiCollection = 'simEcalDigis:ebDigis'
process.ecalUncalibRecHit.EEdigiCollection = 'simEcalDigis:eeDigis'
process.ecalPreshowerRecHit.ESdigiCollection = 'simEcalPreshowerDigis'
| 35.538462 | 103 | 0.802219 |
7957edf68cc8a1461fccfc2de93ad5250dc9fdb5 | 21,833 | py | Python | tensorflow/contrib/rnn/python/kernel_tests/lstm_ops_test.py | M155K4R4/Tensorflow | e5e03ef3148303b3dfed89a1492dedf92b45be25 | [
"Apache-2.0"
] | 24 | 2018-02-01T15:49:22.000Z | 2021-01-11T16:31:18.000Z | tensorflow/contrib/rnn/python/kernel_tests/lstm_ops_test.py | M155K4R4/Tensorflow | e5e03ef3148303b3dfed89a1492dedf92b45be25 | [
"Apache-2.0"
] | 2 | 2018-09-09T07:29:07.000Z | 2019-03-11T07:14:45.000Z | tensorflow/contrib/rnn/python/kernel_tests/lstm_ops_test.py | M155K4R4/Tensorflow | e5e03ef3148303b3dfed89a1492dedf92b45be25 | [
"Apache-2.0"
] | 7 | 2019-05-31T02:57:03.000Z | 2020-08-09T20:15:25.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""LSTM Block Cell ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.rnn.python.kernel_tests import benchmarking
from tensorflow.contrib.rnn.python.ops import lstm_ops
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
block_lstm = lstm_ops._block_lstm # pylint: disable=protected-access
def blocks_match(sess, use_peephole):
batch_size = 2
input_size = 3
cell_size = 4
sequence_length = 4
inputs = []
for _ in range(sequence_length):
inp = ops.convert_to_tensor(
np.random.randn(batch_size, input_size), dtype=dtypes.float32)
inputs.append(inp)
stacked_inputs = array_ops.stack(inputs)
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=19890212)
with variable_scope.variable_scope("test", initializer=initializer):
# magic naming so that the cells pick up these variables and resuse them
if use_peephole:
wci = variable_scope.get_variable(
"rnn/lstm_cell/w_i_diag", shape=[cell_size], dtype=dtypes.float32)
wcf = variable_scope.get_variable(
"rnn/lstm_cell/w_f_diag", shape=[cell_size], dtype=dtypes.float32)
wco = variable_scope.get_variable(
"rnn/lstm_cell/w_o_diag", shape=[cell_size], dtype=dtypes.float32)
w = variable_scope.get_variable(
"rnn/lstm_cell/kernel",
shape=[input_size + cell_size, cell_size * 4],
dtype=dtypes.float32)
b = variable_scope.get_variable(
"rnn/lstm_cell/bias",
shape=[cell_size * 4],
dtype=dtypes.float32,
initializer=init_ops.zeros_initializer())
basic_cell = rnn_cell.LSTMCell(
cell_size, use_peepholes=use_peephole, state_is_tuple=True, reuse=True)
basic_outputs_op, basic_state_op = rnn.static_rnn(
basic_cell, inputs, dtype=dtypes.float32)
if use_peephole:
_, _, _, _, _, _, block_outputs_op = block_lstm(
ops.convert_to_tensor(sequence_length, dtype=dtypes.int64),
inputs,
w,
b,
wci=wci,
wcf=wcf,
wco=wco,
cell_clip=0,
use_peephole=True)
else:
_, _, _, _, _, _, block_outputs_op = block_lstm(
ops.convert_to_tensor(sequence_length, dtype=dtypes.int64),
inputs,
w,
b,
cell_clip=0)
fused_cell = lstm_ops.LSTMBlockFusedCell(
cell_size, cell_clip=0, use_peephole=use_peephole, reuse=True,
name="rnn/lstm_cell")
fused_outputs_op, fused_state_op = fused_cell(
stacked_inputs, dtype=dtypes.float32)
sess.run([variables.global_variables_initializer()])
basic_outputs, basic_state = sess.run([basic_outputs_op, basic_state_op[0]])
basic_grads = sess.run(gradients_impl.gradients(basic_outputs_op, inputs))
xs = [w, b]
if use_peephole:
xs += [wci, wcf, wco]
basic_wgrads = sess.run(gradients_impl.gradients(basic_outputs_op, xs))
block_outputs = sess.run(block_outputs_op)
block_grads = sess.run(gradients_impl.gradients(block_outputs_op, inputs))
block_wgrads = sess.run(gradients_impl.gradients(block_outputs_op, xs))
xs = [w, b]
if use_peephole:
xs += [wci, wcf, wco]
fused_outputs, fused_state = sess.run([fused_outputs_op, fused_state_op[0]])
fused_grads = sess.run(gradients_impl.gradients(fused_outputs_op, inputs))
fused_wgrads = sess.run(gradients_impl.gradients(fused_outputs_op, xs))
return (basic_state, fused_state, basic_outputs, block_outputs,
fused_outputs, basic_grads, block_grads, fused_grads, basic_wgrads,
block_wgrads, fused_wgrads)
class LSTMBlockCellTest(test.TestCase):
def testNoneDimsWithDynamicRNN(self):
with self.test_session(use_gpu=True, graph=ops.Graph()) as sess:
batch_size = 4
num_steps = 5
input_dim = 6
cell_size = 7
cell = lstm_ops.LSTMBlockCell(cell_size)
x = array_ops.placeholder(dtypes.float32, shape=(None, None, input_dim))
output, _ = rnn.dynamic_rnn(
cell, x, time_major=True, dtype=dtypes.float32)
sess.run(variables.global_variables_initializer())
feed = {}
feed[x] = np.random.randn(num_steps, batch_size, input_dim)
sess.run(output, feed)
def testLSTMBlockCell(self):
with self.test_session(use_gpu=True, graph=ops.Graph()) as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m0 = array_ops.zeros([1, 2])
m1 = array_ops.zeros([1, 2])
m2 = array_ops.zeros([1, 2])
m3 = array_ops.zeros([1, 2])
g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell(
[lstm_ops.LSTMBlockCell(2)
for _ in range(2)], state_is_tuple=True)(x, ((m0, m1), (m2, m3)))
sess.run([variables.global_variables_initializer()])
res = sess.run([g, out_m0, out_m1, out_m2, out_m3], {
x.name: np.array([[1., 1.]]),
m0.name: 0.1 * np.ones([1, 2]),
m1.name: 0.1 * np.ones([1, 2]),
m2.name: 0.1 * np.ones([1, 2]),
m3.name: 0.1 * np.ones([1, 2])
})
self.assertEqual(len(res), 5)
self.assertAllClose(res[0], [[0.24024698, 0.24024698]])
# These numbers are from testBasicLSTMCell and only test c/h.
self.assertAllClose(res[1], [[0.68967271, 0.68967271]])
self.assertAllClose(res[2], [[0.44848421, 0.44848421]])
self.assertAllClose(res[3], [[0.39897051, 0.39897051]])
self.assertAllClose(res[4], [[0.24024698, 0.24024698]])
def testCompatibleNames(self):
with self.test_session(use_gpu=True, graph=ops.Graph()):
cell = rnn_cell.LSTMCell(10)
pcell = rnn_cell.LSTMCell(10, use_peepholes=True)
inputs = [array_ops.zeros([4, 5])] * 6
rnn.static_rnn(cell, inputs, dtype=dtypes.float32, scope="basic")
rnn.static_rnn(pcell, inputs, dtype=dtypes.float32, scope="peephole")
basic_names = {
v.name: v.get_shape()
for v in variables.trainable_variables()
}
with self.test_session(use_gpu=True, graph=ops.Graph()):
cell = lstm_ops.LSTMBlockCell(10)
pcell = lstm_ops.LSTMBlockCell(10, use_peephole=True)
inputs = [array_ops.zeros([4, 5])] * 6
rnn.static_rnn(cell, inputs, dtype=dtypes.float32, scope="basic")
rnn.static_rnn(pcell, inputs, dtype=dtypes.float32, scope="peephole")
block_names = {
v.name: v.get_shape()
for v in variables.trainable_variables()
}
with self.test_session(use_gpu=True, graph=ops.Graph()):
cell = lstm_ops.LSTMBlockFusedCell(10)
pcell = lstm_ops.LSTMBlockFusedCell(10, use_peephole=True)
inputs = array_ops.stack([array_ops.zeros([4, 5])] * 6)
cell(inputs, dtype=dtypes.float32, scope="basic/lstm_cell")
pcell(inputs, dtype=dtypes.float32, scope="peephole/lstm_cell")
fused_names = {
v.name: v.get_shape()
for v in variables.trainable_variables()
}
self.assertEqual(basic_names, block_names)
self.assertEqual(basic_names, fused_names)
def testLSTMBasicToBlockCell(self):
with self.test_session(use_gpu=True) as sess:
x = array_ops.zeros([1, 2])
x_values = np.random.randn(1, 2)
m0_val = 0.1 * np.ones([1, 2])
m1_val = -0.1 * np.ones([1, 2])
m2_val = -0.2 * np.ones([1, 2])
m3_val = 0.2 * np.ones([1, 2])
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=19890212)
with variable_scope.variable_scope("basic", initializer=initializer):
m0 = array_ops.zeros([1, 2])
m1 = array_ops.zeros([1, 2])
m2 = array_ops.zeros([1, 2])
m3 = array_ops.zeros([1, 2])
g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell(
[rnn_cell.BasicLSTMCell(2, state_is_tuple=True) for _ in range(2)],
state_is_tuple=True)(x, ((m0, m1), (m2, m3)))
sess.run([variables.global_variables_initializer()])
basic_res = sess.run([g, out_m0, out_m1, out_m2, out_m3], {
x.name: x_values,
m0.name: m0_val,
m1.name: m1_val,
m2.name: m2_val,
m3.name: m3_val
})
with variable_scope.variable_scope("block", initializer=initializer):
m0 = array_ops.zeros([1, 2])
m1 = array_ops.zeros([1, 2])
m2 = array_ops.zeros([1, 2])
m3 = array_ops.zeros([1, 2])
g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell(
[lstm_ops.LSTMBlockCell(2)
for _ in range(2)], state_is_tuple=True)(x, ((m0, m1), (m2, m3)))
sess.run([variables.global_variables_initializer()])
block_res = sess.run([g, out_m0, out_m1, out_m2, out_m3], {
x.name: x_values,
m0.name: m0_val,
m1.name: m1_val,
m2.name: m2_val,
m3.name: m3_val
})
self.assertEqual(len(basic_res), len(block_res))
for basic, block in zip(basic_res, block_res):
self.assertAllClose(basic, block)
def testLSTMBasicToBlockCellPeeping(self):
with self.test_session(use_gpu=True) as sess:
x = array_ops.zeros([1, 2])
x_values = np.random.randn(1, 2)
m0_val = 0.1 * np.ones([1, 2])
m1_val = -0.1 * np.ones([1, 2])
m2_val = -0.2 * np.ones([1, 2])
m3_val = 0.2 * np.ones([1, 2])
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=19890212)
with variable_scope.variable_scope("basic", initializer=initializer):
m0 = array_ops.zeros([1, 2])
m1 = array_ops.zeros([1, 2])
m2 = array_ops.zeros([1, 2])
m3 = array_ops.zeros([1, 2])
g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell(
[
rnn_cell.LSTMCell(2, use_peepholes=True, state_is_tuple=True)
for _ in range(2)
],
state_is_tuple=True)(x, ((m0, m1), (m2, m3)))
sess.run([variables.global_variables_initializer()])
basic_res = sess.run([g, out_m0, out_m1, out_m2, out_m3], {
x.name: x_values,
m0.name: m0_val,
m1.name: m1_val,
m2.name: m2_val,
m3.name: m3_val
})
with variable_scope.variable_scope("block", initializer=initializer):
m0 = array_ops.zeros([1, 2])
m1 = array_ops.zeros([1, 2])
m2 = array_ops.zeros([1, 2])
m3 = array_ops.zeros([1, 2])
g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell(
[lstm_ops.LSTMBlockCell(2, use_peephole=True) for _ in range(2)],
state_is_tuple=True)(x, ((m0, m1), (m2, m3)))
sess.run([variables.global_variables_initializer()])
block_res = sess.run([g, out_m0, out_m1, out_m2, out_m3], {
x.name: x_values,
m0.name: m0_val,
m1.name: m1_val,
m2.name: m2_val,
m3.name: m3_val
})
self.assertEqual(len(basic_res), len(block_res))
for basic, block in zip(basic_res, block_res):
self.assertAllClose(basic, block)
def testLSTMBasicToBlock(self):
with self.test_session(use_gpu=True) as sess:
(basic_state, fused_state, basic_outputs, block_outputs, fused_outputs,
basic_grads, block_grads, fused_grads, basic_wgrads, block_wgrads,
fused_wgrads) = blocks_match(
sess, use_peephole=False)
self.assertAllClose(basic_outputs, block_outputs)
self.assertAllClose(basic_grads, block_grads)
for basic, block in zip(basic_wgrads, block_wgrads):
self.assertAllClose(basic, block, rtol=1e-6, atol=1e-6)
self.assertAllClose(basic_outputs, fused_outputs)
self.assertAllClose(basic_state, fused_state)
self.assertAllClose(basic_grads, fused_grads)
for basic, fused in zip(block_wgrads, fused_wgrads):
self.assertAllClose(basic, fused, rtol=1e-6, atol=1e-6)
def testLSTMBasicToBlockPeeping(self):
with self.test_session(use_gpu=True) as sess:
(basic_state, fused_state, basic_outputs, block_outputs, fused_outputs,
basic_grads, block_grads, fused_grads, basic_wgrads, block_wgrads,
fused_wgrads) = blocks_match(
sess, use_peephole=True)
self.assertAllClose(basic_outputs, block_outputs)
self.assertAllClose(basic_grads, block_grads)
for basic, block in zip(basic_wgrads, block_wgrads):
self.assertAllClose(basic, block, rtol=1e-6, atol=1e-6)
self.assertAllClose(basic_outputs, fused_outputs)
self.assertAllClose(basic_state, fused_state)
self.assertAllClose(basic_grads, fused_grads)
for basic, fused in zip(block_wgrads, fused_wgrads):
self.assertAllClose(basic, fused, rtol=1e-6, atol=1e-6)
def testLSTMFusedSequenceLengths(self):
"""Verify proper support for sequence lengths in LSTMBlockFusedCell."""
with self.test_session(use_gpu=True) as sess:
batch_size = 3
input_size = 4
cell_size = 5
max_sequence_length = 6
inputs = []
for _ in range(max_sequence_length):
inp = ops.convert_to_tensor(
np.random.randn(batch_size, input_size), dtype=dtypes.float32)
inputs.append(inp)
seq_lengths = constant_op.constant([3, 4, 5])
cell_inputs = array_ops.stack(inputs)
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=19890213)
with variable_scope.variable_scope("lstm_cell", initializer=initializer):
# magic naming so that the cells pick up these variables and reuse them
variable_scope.get_variable(
"kernel",
shape=[input_size + cell_size, cell_size * 4],
dtype=dtypes.float32)
variable_scope.get_variable(
"bias",
shape=[cell_size * 4],
dtype=dtypes.float32,
initializer=init_ops.zeros_initializer())
cell = lstm_ops.LSTMBlockFusedCell(
cell_size, cell_clip=0, use_peephole=False, reuse=True,
name="lstm_cell")
fused_outputs_op, fused_state_op = cell(
cell_inputs, dtype=dtypes.float32, sequence_length=seq_lengths)
cell_vars = [
v for v in variables.trainable_variables()
if v.name.endswith("kernel") or v.name.endswith("bias")
]
# Verify that state propagation works if we turn our sequence into
# tiny (single-time) subsequences, i.e. unfuse the cell
unfused_outputs_op = []
state = None
with variable_scope.variable_scope(
variable_scope.get_variable_scope(), reuse=True):
for i, inp in enumerate(inputs):
lengths = [int(i < l) for l in seq_lengths.eval()]
output, state = cell(
array_ops.expand_dims(inp, 0),
initial_state=state,
dtype=dtypes.float32,
sequence_length=lengths)
unfused_outputs_op.append(output[0])
unfused_outputs_op = array_ops.stack(unfused_outputs_op)
sess.run([variables.global_variables_initializer()])
unfused_outputs, unfused_state = sess.run([unfused_outputs_op, state[0]])
unfused_grads = sess.run(
gradients_impl.gradients(unfused_outputs_op, inputs))
unfused_wgrads = sess.run(
gradients_impl.gradients(unfused_outputs_op, cell_vars))
fused_outputs, fused_state = sess.run(
[fused_outputs_op, fused_state_op[0]])
fused_grads = sess.run(gradients_impl.gradients(fused_outputs_op, inputs))
fused_wgrads = sess.run(
gradients_impl.gradients(fused_outputs_op, cell_vars))
self.assertAllClose(fused_outputs, unfused_outputs)
self.assertAllClose(fused_state, unfused_state)
self.assertAllClose(fused_grads, unfused_grads)
for fused, unfused in zip(fused_wgrads, unfused_wgrads):
self.assertAllClose(fused, unfused, rtol=1e-6, atol=1e-6)
#### Benchmarking.
class BenchmarkLSTMBlock(test.Benchmark):
def benchmarkLSTMBlockCellFpropWithDynamicRNN(self):
print("BlockLSTMCell forward propagation via dynamic_rnn().")
print("--------------------------------------------------------------")
print("LSTMBlockCell Seconds per inference.")
print("batch_size,cell_size,input_size,time_steps,use_gpu,wall_time")
iters = 10
for config in benchmarking.dict_product({
"batch_size": [1, 8, 13, 32, 67, 128],
"cell_size": [128, 250, 512, 650, 1024, 1350],
"time_steps": [40],
"use_gpu": [True, False]
}):
with ops.Graph().as_default():
with benchmarking.device(use_gpu=config["use_gpu"]):
inputs = variable_scope.get_variable(
"x",
[config["time_steps"], config["batch_size"], config["cell_size"]])
cell = lstm_ops.LSTMBlockCell(config["cell_size"])
outputs = rnn.dynamic_rnn(
cell, inputs, time_major=True, dtype=dtypes.float32)
init_op = variables.global_variables_initializer()
with session.Session() as sess:
sess.run(init_op)
wall_time = benchmarking.seconds_per_run(outputs, sess, iters)
# Print to stdout. If the TEST_REPORT_FILE_PREFIX environment variable
# is set, this will produce a copy-paste-able CSV file.
print(",".join(
map(str, [
config["batch_size"], config["cell_size"], config["cell_size"],
config["time_steps"], config["use_gpu"], wall_time
])))
benchmark_name_template = "_".join([
"LSTMBlockCell_fprop", "BS%(batch_size)i", "CS%(cell_size)i",
"IS%(cell_size)i", "TS%(time_steps)i", "gpu_%(use_gpu)s"
])
self.report_benchmark(
name=benchmark_name_template % config,
iters=iters,
wall_time=wall_time,
extras=config)
def benchmarkLSTMBlockCellBpropWithDynamicRNN(self):
print("BlockLSTMCell backward propagation via dynamic_rnn().")
print("--------------------------------------------------------------")
print("LSTMBlockCell Seconds per inference.")
print("batch_size,cell_size,input_size,time_steps,use_gpu,wall_time")
iters = 10
for config in benchmarking.dict_product({
"batch_size": [1, 8, 13, 32, 67, 128],
"cell_size": [128, 250, 512, 650, 1024, 1350],
"time_steps": [40],
"use_gpu": [True, False]
}):
with ops.Graph().as_default():
with benchmarking.device(use_gpu=config["use_gpu"]):
time_steps = config["time_steps"]
batch_size = config["batch_size"]
cell_size = input_size = config["cell_size"]
inputs = variable_scope.get_variable(
"x", [time_steps, batch_size, cell_size],
trainable=False,
dtype=dtypes.float32)
with variable_scope.variable_scope(
"rnn", reuse=variable_scope.AUTO_REUSE):
w = variable_scope.get_variable(
"rnn/lstm_cell/kernel",
shape=[input_size + cell_size, cell_size * 4],
dtype=dtypes.float32)
b = variable_scope.get_variable(
"rnn/lstm_cell/bias",
shape=[cell_size * 4],
dtype=dtypes.float32,
initializer=init_ops.zeros_initializer())
cell = lstm_ops.LSTMBlockCell(cell_size)
outputs = rnn.dynamic_rnn(
cell, inputs, time_major=True, dtype=dtypes.float32)
grads = gradients_impl.gradients(outputs, [inputs, w, b])
init_op = variables.global_variables_initializer()
with session.Session() as sess:
sess.run(init_op)
wall_time = benchmarking.seconds_per_run(grads, sess, iters)
# Print to stdout. If the TEST_REPORT_FILE_PREFIX environment variable
# is set, this will produce a copy-paste-able CSV file.
print(",".join(
map(str, [
batch_size, cell_size, cell_size, time_steps, config["use_gpu"],
wall_time
])))
benchmark_name_template = "_".join([
"LSTMBlockCell_bprop", "BS%(batch_size)i", "CS%(cell_size)i",
"IS%(cell_size)i", "TS%(time_steps)i", "gpu_%(use_gpu)s"
])
self.report_benchmark(
name=benchmark_name_template % config,
iters=iters,
wall_time=wall_time,
extras=config)
if __name__ == "__main__":
test.main()
| 40.134191 | 80 | 0.637292 |
7957ee9c298fe3d0525e4f5ee664e0469349cd20 | 4,591 | py | Python | contrib/testgen/gen_base58_test_vectors.py | ecryptoproject/bitcoinreco | 82ba8f9b62b5398ff82a244e81fe5639933e8567 | [
"MIT"
] | null | null | null | contrib/testgen/gen_base58_test_vectors.py | ecryptoproject/bitcoinreco | 82ba8f9b62b5398ff82a244e81fe5639933e8567 | [
"MIT"
] | null | null | null | contrib/testgen/gen_base58_test_vectors.py | ecryptoproject/bitcoinreco | 82ba8f9b62b5398ff82a244e81fe5639933e8567 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2012-2017 The Bitcoinreco Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Generate valid and invalid base58 address and private key test vectors.
Usage:
gen_base58_test_vectors.py valid 50 > ../../src/test/data/base58_keys_valid.json
gen_base58_test_vectors.py invalid 50 > ../../src/test/data/base58_keys_invalid.json
'''
# 2012 Wladimir J. van der Laan
# Released under MIT License
import os
from itertools import islice
from base58 import b58encode_chk, b58decode_chk, b58chars
import random
from binascii import b2a_hex
# key types
PUBKEY_ADDRESS = 0
SCRIPT_ADDRESS = 5
PUBKEY_ADDRESS_TEST = 111
SCRIPT_ADDRESS_TEST = 196
PRIVKEY = 128
PRIVKEY_TEST = 239
metadata_keys = ['isPrivkey', 'isTestnet', 'addrType', 'isCompressed']
# templates for valid sequences
templates = [
# prefix, payload_size, suffix, metadata
# None = N/A
((PUBKEY_ADDRESS,), 20, (), (False, False, 'pubkey', None)),
((SCRIPT_ADDRESS,), 20, (), (False, False, 'script', None)),
((PUBKEY_ADDRESS_TEST,), 20, (), (False, True, 'pubkey', None)),
((SCRIPT_ADDRESS_TEST,), 20, (), (False, True, 'script', None)),
((PRIVKEY,), 32, (), (True, False, None, False)),
((PRIVKEY,), 32, (1,), (True, False, None, True)),
((PRIVKEY_TEST,), 32, (), (True, True, None, False)),
((PRIVKEY_TEST,), 32, (1,), (True, True, None, True))
]
def is_valid(v):
'''Check vector v for validity'''
result = b58decode_chk(v)
if result is None:
return False
for template in templates:
prefix = bytearray(template[0])
suffix = bytearray(template[2])
if result.startswith(prefix) and result.endswith(suffix):
if (len(result) - len(prefix) - len(suffix)) == template[1]:
return True
return False
def gen_valid_vectors():
'''Generate valid test vectors'''
while True:
for template in templates:
prefix = bytearray(template[0])
payload = bytearray(os.urandom(template[1]))
suffix = bytearray(template[2])
rv = b58encode_chk(prefix + payload + suffix)
assert is_valid(rv)
metadata = {x: y for x, y in zip(metadata_keys,template[3]) if y is not None}
hexrepr = b2a_hex(payload)
if isinstance(hexrepr, bytes):
hexrepr = hexrepr.decode('utf8')
yield (rv, hexrepr, metadata)
def gen_invalid_vector(template, corrupt_prefix, randomize_payload_size, corrupt_suffix):
'''Generate possibly invalid vector'''
if corrupt_prefix:
prefix = os.urandom(1)
else:
prefix = bytearray(template[0])
if randomize_payload_size:
payload = os.urandom(max(int(random.expovariate(0.5)), 50))
else:
payload = os.urandom(template[1])
if corrupt_suffix:
suffix = os.urandom(len(template[2]))
else:
suffix = bytearray(template[2])
return b58encode_chk(prefix + payload + suffix)
def randbool(p = 0.5):
'''Return True with P(p)'''
return random.random() < p
def gen_invalid_vectors():
'''Generate invalid test vectors'''
# start with some manual edge-cases
yield "",
yield "x",
while True:
# kinds of invalid vectors:
# invalid prefix
# invalid payload length
# invalid (randomized) suffix (add random data)
# corrupt checksum
for template in templates:
val = gen_invalid_vector(template, randbool(0.2), randbool(0.2), randbool(0.2))
if random.randint(0,10)<1: # line corruption
if randbool(): # add random character to end
val += random.choice(b58chars)
else: # replace random character in the middle
n = random.randint(0, len(val))
val = val[0:n] + random.choice(b58chars) + val[n+1:]
if not is_valid(val):
yield val,
if __name__ == '__main__':
import sys
import json
iters = {'valid':gen_valid_vectors, 'invalid':gen_invalid_vectors}
try:
uiter = iters[sys.argv[1]]
except IndexError:
uiter = gen_valid_vectors
try:
count = int(sys.argv[2])
except IndexError:
count = 0
data = list(islice(uiter(), count))
json.dump(data, sys.stdout, sort_keys=True, indent=4)
sys.stdout.write('\n')
| 34.518797 | 91 | 0.614027 |
7957ef32dd29a6497a901b44a59be90800ea46cd | 820 | py | Python | ou_noise.py | TitovaEkaterina/Snake_RL | 883cd560c008f7f7351d5c5e87f7ea5732a8fbfa | [
"MIT"
] | null | null | null | ou_noise.py | TitovaEkaterina/Snake_RL | 883cd560c008f7f7351d5c5e87f7ea5732a8fbfa | [
"MIT"
] | null | null | null | ou_noise.py | TitovaEkaterina/Snake_RL | 883cd560c008f7f7351d5c5e87f7ea5732a8fbfa | [
"MIT"
] | null | null | null | import numpy as np
import numpy.random as nr
class OUNoise:
"""docstring for OUNoise"""
def __init__(self,action_dimension,mu=0, theta=0.3, sigma=0.2):
self.action_dimension = action_dimension
self.mu = mu
self.theta = theta
self.sigma = sigma
self.state = np.ones(self.action_dimension) * self.mu
self.reset()
def reset(self):
self.state = np.ones(self.action_dimension) * self.mu
def noise(self):
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * nr.randn(len(x))
self.state = x + dx
return self.state
if __name__ == '__main__':
ou = OUNoise(3)
states = []
for i in range(1000):
states.append(ou.noise())
import matplotlib.pyplot as plt
plt.plot(states)
plt.show()
| 25.625 | 71 | 0.6 |
7957efae53cc1d4551244cca2c8efd8c5f01c911 | 14,639 | py | Python | history/management/commands/triggers.py | lbrant1/django-history-triggers | d955be0ffaf28e006e0fe5f4bc078364b336ccd8 | [
"BSD-2-Clause"
] | null | null | null | history/management/commands/triggers.py | lbrant1/django-history-triggers | d955be0ffaf28e006e0fe5f4bc078364b336ccd8 | [
"BSD-2-Clause"
] | null | null | null | history/management/commands/triggers.py | lbrant1/django-history-triggers | d955be0ffaf28e006e0fe5f4bc078364b336ccd8 | [
"BSD-2-Clause"
] | null | null | null | from django.core.management.base import BaseCommand
from django.db import connections, transaction
from six import string_types
from django.utils.encoding import force_bytes
from history import conf
import hashlib
def truncate_long_name(name):
# This is copied from django to shorten names that would exceed postgres's limit of 63 characters
# Originally found in django/db/backends/utils.py in "truncate_name"
# Django source code: https://github.com/django/django/blob/stable/1.5.x/django/db/backends/util.py#L133
hsh = hashlib.md5(force_bytes(name)).hexdigest()[:5]
return '%s_%s' % (name[:57], hsh) if len(name) > 63 else name
def maybe_quote(value):
"""
Used for quoting the HISTORY_DEFAULT_USER value, if it's a string.
"""
if value is None:
return 'NULL'
elif isinstance(value, string_types):
return "'%s'" % value.replace("'", "''")
return value
class Command (BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
'--drop',
action='store_true',
dest='drop',
default=False,
help='Drop triggers instead of creating them'
)
parser.add_argument(
'--clear',
action='store_true',
dest='clear',
default=False,
help='Drop the history schema in addition to triggers'
)
@transaction.atomic
def handle(self, *args, **options):
cursor = connections['default'].cursor()
dropping = options.get('drop', False)
if not dropping:
create_history_schema(cursor)
table_names = get_base_tables(cursor)
action_verb = 'Dropping' if dropping else 'Creating'
for table_name in sorted(table_names):
pk_name, pk_type = table_names[table_name]
if not dropping and create_history_table(cursor, table_name, pk_name, pk_type):
print('Created history table for %s (pk=%s)' % (table_name, pk_name))
print('%s triggers for %s' % (action_verb, table_name))
for trigger_type in ('insert', 'update', 'delete'):
if dropping:
drop_trigger(cursor, trigger_type, table_name)
else:
create_trigger(cursor, trigger_type, table_name, pk_name)
print('%s triggers is complete. No errors were raised.' % action_verb)
if options['clear']:
print('Dropping schema "%s"' % conf.SCHEMA_NAME)
cursor.execute("DROP SCHEMA IF EXISTS %s CASCADE" % conf.SCHEMA_NAME)
def schema_exists(cursor, schema_name):
"""
Returns whether or not a schema exists in the DB given the schema name.
"""
cursor.execute("""
SELECT EXISTS (SELECT 1 FROM information_schema.schemata WHERE schema_name = %s)
""", (schema_name,))
return cursor.fetchone()[0]
def table_exists(cursor, table_name, schema_name='public'):
"""
Returns whether or not a table exists in the DB given the table and schema name (default 'public').
"""
cursor.execute("""
SELECT EXISTS (SELECT 1 FROM information_schema.tables WHERE table_name = %s AND table_schema = %s)
""", (table_name, schema_name))
return cursor.fetchone()[0]
def trigger_exists(cursor, trigger_name, schema_name='public'):
"""
Returns whether or not the trigger function exists in the DB.
"""
cursor.execute("""
SELECT EXISTS (SELECT 1 FROM information_schema.triggers WHERE trigger_schema = %s AND trigger_name = %s)
""", (schema_name, trigger_name))
return cursor.fetchone()[0]
def get_base_tables(cursor, schema_name='public'):
"""
Returns all of the tables in the public schema which are not lkups and not in the exclude list.
"""
cursor.execute("""
SELECT t.table_name, COALESCE(kcu.column_name, ''), c.data_type, c.character_maximum_length FROM information_schema.tables t
LEFT OUTER JOIN information_schema.table_constraints tc ON
tc.table_schema = t.table_schema AND tc.table_name = t.table_name AND tc.constraint_type = 'PRIMARY KEY'
LEFT OUTER JOIN information_schema.key_column_usage kcu ON
kcu.table_schema = tc.table_schema AND kcu.table_name = tc.table_name AND kcu.constraint_name = tc.constraint_name
LEFT OUTER JOIN information_schema.columns c ON
c.table_schema = kcu.table_schema AND c.table_name = kcu.table_name AND c.column_name = kcu.column_name
WHERE t.table_schema = %s AND t.table_type != 'VIEW'
""", (schema_name,))
table_names = {}
for row in cursor.fetchall():
name = row[0].strip().lower()
valid = True
if name in conf.IGNORED_TABLES:
valid = False
for prefix in conf.IGNORED_PREFIXES:
if name.startswith(prefix):
valid = False
# Ignore tables without a PRIMARY KEY defined.
if not row[1].strip():
valid = False
if valid:
pk_type = row[2]
# Add maximum length if pk_type is character varying.
if row[3]:
pk_type += '(%s)' % (row[3])
table_names[row[0]] = (row[1], pk_type)
return table_names
def get_table_columns(cursor, table_name, schema_name='public'):
"""
Returns a list of columns for the given table but excludes text and binary columns
as well as any column named date_modified or date_last_modified since we don't track
history for those.
"""
params = {
'schema': schema_name,
'table': table_name,
}
cursor.execute("""
SELECT
column_name,
CASE WHEN data_type = 'USER-DEFINED' THEN udt_name ELSE data_type END
FROM information_schema.columns
WHERE table_schema = '%(schema)s' AND table_name = '%(table)s'
ORDER BY column_name
""" % params)
for row in cursor.fetchall():
if row[1].lower() in conf.IGNORED_TYPES:
continue
if row[0].lower() in conf.IGNORED_COLUMNS:
continue
if '%s.%s' % (table_name, row[0].lower()) in conf.IGNORED_COLUMNS:
continue
yield row[0], row[1]
def create_history_schema(cursor):
"""
Create the history schema if it doesn't already exist.
"""
if not schema_exists(cursor, conf.SCHEMA_NAME):
params = {
'name': conf.SCHEMA_NAME,
'role': conf.DB_ROLE,
}
cursor.execute("""
CREATE SCHEMA %(name)s AUTHORIZATION %(role)s;
GRANT ALL ON SCHEMA %(name)s TO %(role)s;
REVOKE ALL ON SCHEMA %(name)s FROM public;
""" % params)
def create_history_table(cursor, base_table, pk_name, pk_type):
"""
Builds the history table (if it doesn't already exist) given the base table name.
"""
history_table = truncate_long_name(base_table + '_history')
if not table_exists(cursor, history_table, conf.SCHEMA_NAME):
params = {
'schema': conf.SCHEMA_NAME,
'table': history_table,
'role': conf.DB_ROLE,
'timestamp_type': 'timestamp with time zone' if conf.USE_TIMEZONES else 'timestamp',
'pk_name': pk_name,
'pk_type': pk_type,
'user_field': conf.USER_FIELD,
'user_type': conf.USER_TYPE,
'field_column': '' if conf.USE_JSON else 'field_name varchar(64) not null,',
'value_type': 'jsonb' if conf.USE_JSON else 'text',
}
cursor.execute("""
CREATE TABLE %(schema)s.%(table)s (
%(pk_name)s %(pk_type)s not null,
%(field_column)s
old_value %(value_type)s,
new_value %(value_type)s,
date_modified %(timestamp_type)s not null,
%(user_field)s %(user_type)s,
transaction_type char(1) not null
);
ALTER TABLE %(schema)s.%(table)s OWNER TO %(role)s;
REVOKE ALL ON TABLE %(schema)s.%(table)s FROM %(role)s;
GRANT INSERT, SELECT ON TABLE %(schema)s.%(table)s TO %(role)s;
""" % params)
return True
return False
def get_field_history_sql(trigger_type, table_name, field_name, field_type, pk_name):
history_table_name = truncate_long_name(table_name + "_history")
params = {
'field': field_name,
'history_table': '%s.%s' % (conf.SCHEMA_NAME, history_table_name),
'pk_name': pk_name,
'user_field': conf.USER_FIELD,
}
if trigger_type == 'insert':
return """
-- %(field)s
INSERT INTO %(history_table)s (%(pk_name)s, field_name, old_value, new_value, date_modified, %(user_field)s, transaction_type)
VALUES (NEW.%(pk_name)s, '%(field)s', NULL, NEW."%(field)s", _dlm, _user_id, '+');
""" % params
elif trigger_type == 'delete':
return """
-- %(field)s
INSERT INTO %(history_table)s (%(pk_name)s, field_name, old_value, new_value, date_modified, %(user_field)s, transaction_type)
VALUES (OLD.%(pk_name)s, '%(field)s', OLD."%(field)s", NULL, _dlm, _user_id, '-');
""" % params
elif trigger_type == 'update':
return """
-- %(field)s
IF (OLD."%(field)s" IS DISTINCT FROM NEW."%(field)s") THEN
INSERT INTO %(history_table)s (%(pk_name)s, field_name, old_value, new_value, date_modified, %(user_field)s, transaction_type)
VALUES (OLD.%(pk_name)s, '%(field)s', OLD."%(field)s", NEW."%(field)s", _dlm, _user_id, '~');
END IF;
""" % params
else:
raise ValueError('Invalid trigger type: "%s"' % trigger_type)
def get_json_history_sql(trigger_type, table_name, pk_name):
history_table_name = truncate_long_name(table_name + "_history")
params = {
'history_table': '%s.%s' % (conf.SCHEMA_NAME, history_table_name),
'pk_name': pk_name,
'user_field': conf.USER_FIELD,
}
if trigger_type == 'insert':
return """
INSERT INTO %(history_table)s (%(pk_name)s, old_value, new_value, date_modified, %(user_field)s, transaction_type)
VALUES (NEW.%(pk_name)s, NULL, row_to_json(NEW), _dlm, _user_id, '+');
""" % params
elif trigger_type == 'delete':
return """
INSERT INTO %(history_table)s (%(pk_name)s, old_value, new_value, date_modified, %(user_field)s, transaction_type)
VALUES (OLD.%(pk_name)s, row_to_json(OLD), NULL, _dlm, _user_id, '-');
""" % params
elif trigger_type == 'update':
return """
INSERT INTO %(history_table)s (%(pk_name)s, old_value, new_value, date_modified, %(user_field)s, transaction_type)
VALUES (OLD.%(pk_name)s, row_to_json(OLD), row_to_json(NEW), _dlm, _user_id, '~');
""" % params
else:
raise ValueError('Invalid trigger type: "%s"' % trigger_type)
def create_trigger(cursor, trigger_type, table_name, pk_name, table_schema='public'):
"""
Creates a history trigger of the specified type (update, insert, or delete) on the specified table.
"""
assert trigger_type in ('insert', 'update', 'delete')
if not table_exists(cursor, table_name, table_schema):
return
# First, create the function that the trigger will call for each row.
body_sql = []
if conf.USE_JSON:
body_sql.append(get_json_history_sql(trigger_type, table_name, pk_name))
else:
for field_name, field_type in get_table_columns(cursor, table_name, table_schema):
body_sql.append(get_field_history_sql(trigger_type, table_name, field_name, field_type, pk_name))
trigger_name = "trig_%s_%s" % (table_name, trigger_type)
fx_name = truncate_long_name(trigger_name)
params = {
'fx_name': fx_name,
'body': ''.join(body_sql),
'history_user_table': conf.USER_TEMP_TABLE,
'history_user_field': conf.USER_FIELD,
'return': 'OLD' if trigger_type == 'delete' else 'NEW',
'role': conf.DB_ROLE,
'timestamp_type': 'timestamp with time zone' if conf.USE_TIMEZONES else 'timestamp',
'user_type': conf.USER_TYPE,
'default_user': maybe_quote(conf.DEFAULT_USER),
'default_user_error': 'true' if conf.DEFAULT_USER_ERROR else 'false',
}
cursor.execute("""
CREATE OR REPLACE FUNCTION %(fx_name)s() RETURNS trigger AS $BODY$
DECLARE
_dlm %(timestamp_type)s := now();
_user_id %(user_type)s := %(default_user)s;
_exists boolean;
BEGIN
EXECUTE 'select exists (select 1 from information_schema.tables where table_name = ''%(history_user_table)s'')' INTO _exists;
IF _exists THEN
EXECUTE 'select %(history_user_field)s from %(history_user_table)s' INTO _user_id;
ELSIF %(default_user_error)s THEN
RAISE EXCEPTION '%(history_user_table)s does not exist.';
END IF;
%(body)s
RETURN %(return)s;
END;$BODY$
LANGUAGE 'plpgsql' VOLATILE;
ALTER FUNCTION %(fx_name)s() OWNER TO %(role)s;
GRANT EXECUTE ON FUNCTION %(fx_name)s() TO %(role)s;
REVOKE ALL ON FUNCTION %(fx_name)s() FROM public;
""" % params)
# Now create the actual trigger.
calling_fx_long = 'tr_%s_%s' % (table_name, trigger_type)
calling_fx = truncate_long_name(calling_fx_long)
params = {
'calling_fx': calling_fx,
'when': 'AFTER' if trigger_type == 'delete' else 'BEFORE',
'trans_type': trigger_type.upper(),
'table': table_name,
'fx_name': fx_name,
}
cursor.execute("""
DROP TRIGGER IF EXISTS %(calling_fx)s ON "%(table)s";
CREATE TRIGGER %(calling_fx)s
%(when)s %(trans_type)s ON "%(table)s"
FOR EACH ROW EXECUTE PROCEDURE %(fx_name)s();
""" % params)
def drop_trigger(cursor, trigger_type, table_name, table_schema='public'):
calling_fx_long = 'tr_%s_%s' % (table_name, trigger_type)
calling_fx = truncate_long_name(calling_fx_long)
cursor.execute('DROP TRIGGER IF EXISTS %(calling_fx)s ON "%(table)s";' % {
'calling_fx': calling_fx,
'table': table_name,
})
fx_name_long = 'trig_%s_%s' % (table_name, trigger_type)
fx_name = truncate_long_name(fx_name_long)
cursor.execute('DROP FUNCTION IF EXISTS %(fx_name)s();' % {
'fx_name': fx_name,
})
| 41.353107 | 146 | 0.614045 |
7957efbac9aebc86f25f6565c80d21f727899e79 | 43,249 | py | Python | vrotas_tsp_opensource.py | victorccaldas/vrotas_tsp_pcv | 0d68e54aa3edf9b07e553f5f76c206c2588acf1b | [
"MIT"
] | null | null | null | vrotas_tsp_opensource.py | victorccaldas/vrotas_tsp_pcv | 0d68e54aa3edf9b07e553f5f76c206c2588acf1b | [
"MIT"
] | null | null | null | vrotas_tsp_opensource.py | victorccaldas/vrotas_tsp_pcv | 0d68e54aa3edf9b07e553f5f76c206c2588acf1b | [
"MIT"
] | null | null | null | from tkinter.scrolledtext import ScrolledText
from geopy.geocoders import Nominatim
from datetime import timedelta
from datetime import datetime
from tkinter import *
import webbrowser
import googlemaps
import requests
import polyline
import folium
import psutil
import time
import sys
import os
#chrome_path = 'C:/Program Files (x86)/Google/Chrome/Application/chrome.exe %s'
browser_path = 'C:/Program Files (x86)/Microsoft/Edge/Application/msedge.exe %s'
geolocator = Nominatim(user_agent="enderecos_routing")
google_key = ''
cor_dos_labels = 'white'
# Iniciando container
def iniciarDocker():
print("Verificando se docker está aberto...")
docker_parado = True
while docker_parado:
print('Tentando abrir o Docker..')
# start_osrm retorna 0 se tiver com docker aberto e conseguir ativar, ou se tiver com docker fechado.
# start_osrm retorna 1 se tiver com docker aberto e não conseguir ativar.
nome_processo = 'Docker Desktop.exe'
nome_container = 'osrm_container'
# Verificar se processo docker está aberto
if nome_processo in (p.name() for p in psutil.process_iter()):
# Se sim, iniciar osrm container
start_osrm = os.system('docker start '+nome_container)
if start_osrm == 0:
print("OSRM iniciado.")
elif start_osrm == 1:
# resetar DOCKER
for proc in psutil.process_iter():
# check whether the process name matches
if proc.name() == nome_processo:
print("Deletando processo")
proc.kill()
else:
os.startfile('C:\Program Files\Docker\Docker\Docker Desktop.exe')
print("Abrindo processo")
# Lançar start até abrir
start_osrm = os.system('docker start '+nome_container)
while start_osrm:
start_osrm = os.system('docker start '+nome_container)
time.sleep(1)
try:
print("Tentando enviar request...")
requests.get('http://localhost:5000')
docker_parado = False
print("Request enviado com sucesso!")
except:
print("não conseguiu Requestar localhost:5000.")
time.sleep(1)
def aplicarRotas():
global lista_enderecos
global obs_google
iniciarDocker()
lista_enderecos = end_clientes.get(1.0, END).split("\n")
#lista_enderecos.remove
while '' in lista_enderecos:
lista_enderecos.remove('')
try:
canvas.delete(vermapa1_button)
except:
pass
try:
canvas.delete(vermapa2_button)
except:
pass
try:
canvas.delete(obs_google)
except:
pass
if servico_escolhido.get() == "Google":
servicoRotas_label.config(bg='#d4d0cf', text=servico_escolhido.get())
obs_google = canvas.create_window(460, 440, window=Label(root_ends_routing, bg='#d4d0cf',text="Obs: o Google otimiza\n no máximo 23 endereços\ne desenha no máx.\n10 caminhos", font=('helvetica', 8)))
try:
G_routing()
except:
print("Deu errado")
canvas.delete(distance_notoptimal_label)
canvas.delete(duration_notoptimal_label)
canvas.delete(distance_optimal_label)
canvas.delete(duration_optimal_label)
#erro_label.delete('1.0',END)
#erro_label.insert(END,sys.exc_info())
#erro_label.configure(bg='#bf8e88')
elif servico_escolhido.get() == "OSRM":
try: canvas.delete(obs_google)
except: pass
servicoRotas_label.config(bg='#d4d0cf', text=servico_escolhido.get())
otimização = OSRM_routing('otimizar',lista_enderecos, endereco_inicio.get(), endereco_fim.get())
if type(otimização) == list:
resultado_otimizado.config(bg='#d39994') # light red
resultado_otimizado.delete(1.0,END)
resultado_otimizado.insert(END,'Endereços inválidos:\n\n'+'\n'.join(otimização))
return
duracao = otimização['duration']/60
distancia = otimização['distance']/1000
enderecos_otimizados = [lista_enderecos[int(x)] for x in otimização['best_order']]
não_otimização = OSRM_routing('nao_otimizar',lista_enderecos, endereco_inicio.get(), endereco_fim.get())
duracao_ñotima = não_otimização['duration']/60
distancia_ñotima = não_otimização['distance']/1000
distance_optimal_label = canvas.create_window(600, 430, window=Label(root_ends_routing, bg='#d3e6d3', text=("Distância: "+str("%.1f" % distancia)+" km"), anchor="w", font=('helvetica', 10)))
duration_optimal_label = canvas.create_window(600, 455, window=Label(root_ends_routing, bg='#d3e6d3', text=("Duração: "+str("%.1f" % duracao)+" min"), anchor="w", font=('helvetica', 10)))
distance_notoptimal_label = canvas.create_window(90, 430, window=Label(root_ends_routing, bg='#e8d8b7', text=("Distância: "+str("%.1f" % distancia_ñotima)+" km"), anchor='w', font=('helvetica', 10)))
duration_notoptimal_label = canvas.create_window(90, 455, window=Label(root_ends_routing, bg='#e8d8b7', text=("Duração: "+str(("%.1f" % duracao_ñotima)+" min")), anchor='w', font=('helvetica', 10)))
# inserir outro text box com enderecos_otimizados
resultado_otimizado.delete(1.0,END)
resultado_otimizado.insert(END,'\n'.join(enderecos_otimizados))
resultado_otimizado.config(bg='white')
# Botões de ver as rotas (mapa1.html e mapa2.html)
vermapa1_button = canvas.create_window(260, 440, window=Button(text="Ver rota antes", width=15,command=lambda:verRotaNoMapa('mapa1'), bg='#579482', fg='white', font=('helvetica', 13, 'bold')))
vermapa2_button = canvas.create_window(770, 440, window=Button(text="Ver rota depois", width=15,command=lambda:verRotaNoMapa('mapa2'), bg='#579482', fg='white', font=('helvetica', 13, 'bold')))
def Waze_routing():
start_address = endereco_inicio.get()
end_address = endereco_fim.get()
region = 'SA'
route = WazeRouteCalculator.WazeRouteCalculator(start_address, end_address, region)
route.calc_route_info()
def get_map_folium(melhor_rota, coords_clientes, nome_mapa):
# criando mapa
mapa = folium.Map(location=[(melhor_rota['start_point'][0] + melhor_rota['end_point'][0])/2,
(melhor_rota['start_point'][1] + melhor_rota['end_point'][1])/2],
zoom_start=12)
# traçando caminho
#print(melhor_rota['polyline'])
folium.PolyLine(melhor_rota['route'], weight=8, color='blue', opacity=0.5).add_to(mapa)
# plotando inicio e fim
folium.Marker(location=melhor_rota['start_point'], tooltip="Início", icon=folium.Icon(icon='play', color='green')).add_to(mapa)
folium.Marker(location=melhor_rota['end_point'],tooltip="Final", icon=folium.Icon(icon='stop', color='red')).add_to(mapa)
# plotando clientes
contagem = 0
for coordenada in coords_clientes:
contagem +=1
lat = coordenada.split(',')[0]
lng = coordenada.split(',')[1]
folium.Marker(location=(lat, lng), popup="Este é o "+str(contagem)+"º waypoint", tooltip=contagem).add_to(mapa)
mapa.save(nome_mapa+".html")
def osrm_obter_trajeto_nao_otimo(lista_waypoints, coordenada_inicio, coordenada_final):
# invertendo ordem das coordenadas inicio e fim
coord_inicial_lng_lat = (coordenada_inicio.split(",")[1]+','+coordenada_inicio.split(",")[0]).replace(' ','')
coord_final_lng_lat = (coordenada_final.split(",")[1]+','+coordenada_final.split(",")[0]).replace(' ','')
wps_lng_lat = [(i.split(',')[1]+","+i.split(',')[0]).replace(" ","") for i in lista_waypoints]
# transformando waypoints em uma string unica , e adicionando coord inicial e final
wp_string = coord_inicial_lng_lat+';'
for i in wps_lng_lat:
wp_string += i+';'
wp_string += coord_final_lng_lat
# Resultados ROUTE
route_url = 'http://localhost:5000/route/v1/driving/{}?steps=true&annotations=true&continue_straight=false'.format(wp_string)
print(route_url)
route_results = requests.get(url=route_url)
route_data = route_results.json()
polylines_list = polyline.decode(route_data['routes'][0]['geometry'])
# organizando resultados
rota = {'polyline':route_data['routes'][0]['geometry'],
'route': polylines_list,
'legs':route_data['routes'][0]['legs'],
'start_point': [route_data['waypoints'][0]['location'][1], route_data['waypoints'][0]['location'][0]],
'end_point': [route_data['waypoints'][-1]['location'][1], route_data['waypoints'][-1]['location'][0]],
'duration':route_data['routes'][0]['duration'],
'distance':route_data['routes'][0]['distance']}
get_map_folium(rota, lista_waypoints, 'mapa1')
return rota
def osrm_obter_trajeto(lista_waypoints,coordenada_inicio, coordenada_final):
# invertendo ordem das coordenadas inicio e fim
coord_inicial_lng_lat = (coordenada_inicio.split(",")[1]+','+coordenada_inicio.split(",")[0]).replace(' ','')
coord_final_lng_lat = (coordenada_final.split(",")[1]+','+coordenada_final.split(",")[0]).replace(' ','')
# eliminando duplicatas e invertendo ordem dos waypoints
#waypoints_sem_duplicatadas = list(dict.fromkeys(lista_waypoints))
#wps_lng_lat = [(i.split(',')[1]+","+i.split(',')[0]).replace(" ","") for i in waypoints_sem_duplicatadas]
wps_lng_lat = [(i.split(',')[1]+","+i.split(',')[0]).replace(" ","") for i in lista_waypoints]
# transformando waypoints em uma string unica , e adicionando coord inicial e final
wp_string = coord_inicial_lng_lat+';'
for i in wps_lng_lat:
wp_string += i+';'
wp_string += coord_final_lng_lat
# Resultados TRIP
trip_url = 'http://localhost:5000/trip/v1/driving/{}?roundtrip=false&source=first&destination=last&steps=true&annotations=true&geometries=polyline&overview=simplified'.format(wp_string)
#print(trip_url)
trip_results = requests.get(url=trip_url)
trip_data = trip_results.json()
polylines_list = polyline.decode(trip_data['trips'][0]['geometry'])
#print(trip_data['waypoints'])
#waypoints_optimal_order = [(i['waypoint_index'], i['location']) for i in trip_data['waypoints']]
# Esta optimal order é diferente da do Google: pro exemplo [5,2,3,1,0,4]
# No Google, seria 0- 5º end passado, 1- 2º end passado, 2- 3º end passado, 3- 1º end passado, 4- 0º end passado, 5- 4º end passado
# No OSRM, deve ser: 5- 0º end passado, 2- 1º end passado, 3- 2º end passado, 1- 3º end passado, 0- 4º end passado, 4- 5º end passado
# E depois colocado em ordem: 0- 4º end passado, 1- 3º end passado, 2- 1º end passado, 3- 2º end passado, 4- 5º end passado, 5- 0º end passado
# então a ordem ótima em relação aos ends passados na vdd é: [4,3,1,2,5,0]
# Portanto preciso ordenar a lista_waypoints na ordem certa.
# nessa lista, os endereços foram passados em ordem do primeiro ao ultimo. Os numeros representam a ordem que
# devem ser visitados no formato otimizado
recommended_order = [i['waypoint_index']-1 for i in trip_data['waypoints'][1:-1]]
lista_reordenando = [(x,recommended_order.index(x)) for x in recommended_order]
lista_reordenando.sort()
waypoints_optimal_order = [x[1] for x in lista_reordenando]
# organizando resultados
melhor_rota = {'polyline':trip_data['trips'][0]['geometry'],
'route': polylines_list,
'legs':trip_data['trips'][0]['legs'],
'start_point': [trip_data['waypoints'][0]['location'][1], trip_data['waypoints'][0]['location'][0]],
'end_point': [trip_data['waypoints'][-1]['location'][1], trip_data['waypoints'][-1]['location'][0]],
'best_order': waypoints_optimal_order,
'duration':trip_data['trips'][0]['duration'],
'distance':trip_data['trips'][0]['distance']}
coords_otimizadas = [lista_waypoints[int(x)] for x in melhor_rota['best_order']]
get_map_folium(melhor_rota, coords_otimizadas, 'mapa2')
return melhor_rota
def achar_coordenada(endereco):
import time
global resultado
global achar_coordenada_servico_utilizado
if str(endereco) == 'nan' or str(endereco) == '':
return 'Não encontrada'
# 1) Primeiro tentar nominatim
def meu_geolocator(client, endereco):
return client.geocode(endereco)
retrys = 0
while True:
try:
resultado = meu_geolocator(geolocator,endereco)
break
except Exception as erro:
print("Tentativas: "+str(retrys)+"\nErro: \n"+str(erro)+'\n')
retrys += 1
time.sleep(1)
pass
if retrys > 5:
resultado = None
break
if resultado != None:
resultado = (resultado.latitude,resultado.longitude)
#resultado = str(geolocator.geocode(end).latitude)+','+str(geolocator.geocode(end).longitude)
achar_coordenada_servico_utilizado = "Nominatim"
LAT = resultado[0]
LNG = resultado[1]
if -17 < LAT < -14 and -49 < LNG < -46:
resultado = str(LAT)+","+str(LNG) #str(resultado).replace(')','').replace('(','')
return resultado
else:
resultado = "Inválida: "+str(resultado)
# 2) Se ñ encontrada ou fora de Brasília, tentar Google
if type(resultado) == str or resultado == None:
while True:
try:
resultado = meu_geolocator(gmaps_client,endereco)
break
except Exception as erro:
print("Tentativas: "+str(retrys)+"\nErro: \n"+str(erro)+'\n')
retrys += 1
time.sleep(1)
pass
if retrys > 5:
resultado = []
break
if resultado != []:
resultado = (resultado[0]['geometry']['location']['lat'], resultado[0]['geometry']['location']['lng'])
achar_coordenada_servico_utilizado = "Google"
LAT = resultado[0]
LNG = resultado[1]
if -17 < LAT < -14 and -49 < LNG < -46:
resultado = str(LAT)+","+str(LNG)
return resultado
else:
resultado = "Inválida: "+str(resultado)
if type(resultado) == str or resultado == [] or resultado == None:
resultado = 'Não encontrada'
achar_coordenada_servico_utilizado = 'nenhum'
else:
resultado = "Bugou em algum lugar..."
return resultado
def geocode_osm(endereços):
coords_intermediarias = []
ends_invalidos = []
for end in endereços:
try:
coord_end = str(geolocator.geocode(end).latitude)+','+str(geolocator.geocode(end).longitude)
coords_intermediarias.append(coord_end)
except:
ends_invalidos.append(end)
return coords_intermediarias, ends_invalidos
def geocode_osm_google(endereços):
coords_intermediarias = []
ends_invalidos = []
for end in endereços:
try:
coord_end = achar_coordenada(end)
if coord_end != 'Não encontrada':
coords_intermediarias.append(coord_end)
else:
ends_invalidos.append(end)
except:
ends_invalidos.append(end)
return coords_intermediarias, ends_invalidos
def OSRM_routing(otimizar,intermediarios, inicio, fim):
coord_inicial = str(geolocator.geocode(inicio).latitude)+','+str(geolocator.geocode(inicio).longitude)
coord_final = str(geolocator.geocode(fim).latitude)+','+str(geolocator.geocode(fim).longitude)
if google_key == '':
coords_intermediarias, ends_invalidos = geocode_osm(intermediarios)
else:
coords_intermediarias, ends_invalidos = geocode_osm_google(intermediarios)
if len(ends_invalidos) > 0:
print("Endereços inválidos:\n")
[print(end) for end in ends_invalidos]
return ends_invalidos
else:
if otimizar == 'otimizar':
melhor_rota = osrm_obter_trajeto(coords_intermediarias, coord_inicial, coord_final)
return melhor_rota
else:
rota = osrm_obter_trajeto_nao_otimo(coords_intermediarias, coord_inicial, coord_final)
return rota
def G_routing():
global coords_otimizado
global nova_rota
global distance_notoptimal_label
global duration_notoptimal_label
global distance_optimal_label
global duration_optimal_label
global enderecoslidos
global start_address
global vermapa1_button
global vermapa2_button
try:
horario_string = horario_inicio.get()
horario_datetime = datetime.strptime(horario_string, '%d/%m/%y %H:%M')
except:
print("A data está no formato errado.")
enderecoerrado_label.delete('1.0',END)
enderecoerrado_label.insert(END,horario_string)
enderecoerrado_label.configure(bg='#bf8e88')
return 'stop'
if (datetime.now() - horario_datetime).days > 0:
print("Erro: A data está no passado.")
enderecoerrado_label.delete('1.0',END)
enderecoerrado_label.insert(END,horario_string)
enderecoerrado_label.configure(bg='#bf8e88')
return 'stop'
#location = gmaps_client.geocode(verificacao)
lista_coords = []
bsb_geocode = gmaps_client.geocode("Brasilia,DF")
bsb_coords = (bsb_geocode[0]['geometry']['location']['lat'], bsb_geocode[0]['geometry']['location']['lng'])
start_address = endereco_inicio.get()
start_geocode = gmaps_client.geocode(start_address)
end_address = endereco_fim.get()
end_geocode = gmaps_client.geocode(end_address)
try:
start_coords = (start_geocode[0]['geometry']['location']['lat'],start_geocode[0]['geometry']['location']['lng'])
enderecoerrado_label.delete('1.0',END)
enderecoerrado_label.insert(END,'<Debug>')
enderecoerrado_label.configure(bg='white')
except:
enderecoerrado_label.delete('1.0',END)
enderecoerrado_label.insert(END,start_address)
enderecoerrado_label.configure(bg='#bf8e88')
sys.exit()
try:
end_coords = (end_geocode[0]['geometry']['location']['lat'],end_geocode[0]['geometry']['location']['lng'])
enderecoerrado_label.configure(bg='white')
enderecoerrado_label.delete('1.0',END)
enderecoerrado_label.insert(END,'<Debug>')
except:
enderecoerrado_label.configure(bg='#bf8e88')
enderecoerrado_label.delete('1.0',END)
enderecoerrado_label.insert(END,end_address)
sys.exit()
end_coords = (end_geocode[0]['geometry']['location']['lat'],end_geocode[0]['geometry']['location']['lng'])
# criar listas de coordenadas, latitude e longitude com base nos endereços escolhidos
enderecos_lidos = []
try:
for endereco in lista_enderecos:
location = gmaps_client.geocode(endereco)
#print(location.address)
#print((location.latitude, location.longitude))
lista_coords.append((location[0]['geometry']['location']['lat'], location[0]['geometry']['location']['lng']))
enderecos_lidos.append(endereco)
except:
for end in lista_enderecos:
if end in enderecos_lidos:
pass
else:
print(end)
enderecoerrado_label.configure(bg='#bf8e88')
enderecoerrado_label.delete('1.0',END)
enderecoerrado_label.insert(END,end)
sys.exit()
# Rota NÃO otimizada : tempo e distância
global directions_NaoOtimizado
directions_NaoOtimizado = gmaps_client.directions(start_coords, end_coords, waypoints=lista_coords, departure_time=horario_datetime)
distance_notoptimal = 0
duration_notoptimal = 0
legs = directions_NaoOtimizado[0]['legs']
for leg in legs:
distance_notoptimal = distance_notoptimal + leg['distance']['value']
duration_notoptimal = duration_notoptimal + leg['duration']['value']
try:
canvas.delete(distance_notoptimal_label)
canvas.delete(duration_notoptimal_label)
except:
pass
distance_notoptimal_label = canvas.create_window(90, 430, window=Label(root_ends_routing, bg='#e8d8b7', text=("Distância: "+str("%.1f" % (distance_notoptimal/1000))+" km"), anchor='w', font=('helvetica', 10)))
duration_notoptimal_label = canvas.create_window(90, 455, window=Label(root_ends_routing, bg='#e8d8b7', text=("Duração: "+str(("%.1f" % (duration_notoptimal/60))+" min")), anchor='w', font=('helvetica', 10)))
directions_result = gmaps_client.directions(start_coords, end_coords, waypoints=lista_coords, optimize_waypoints = True, traffic_model = 'best_guess', departure_time=horario_datetime)
# somando distancias e durações de cada etapa da rota
distance = 0
duration = 0
legs = directions_result[0]['legs']
for leg in legs:
#distance = distance + leg.get("distance").get("value") # ambas as formas dão no mesmo
distance = distance + legs[0]['distance']['value']
duration = duration + legs[0]['duration']['value']
# mostrar distância, tempo e ordem otimizada da rota
print(str("%.1f" % (distance/1000))+" km")
print(str(duration/60)+" min")
print(directions_result[0]['waypoint_order'])
# lista com endereços e coordenadas reorganizados no formato otimizado
nova_rota = []
coords_otimizado = []
for wp_num in directions_result[0]['waypoint_order']:
nova_rota.append(lista_enderecos[wp_num])
coords_otimizado.append(lista_coords[wp_num])
# transformando enderecos de lista pra string
enderecos_otimizados = '\n'.join(nova_rota)
#n = 0
#while n < len(nova_rota):
# enderecos_otimizados = enderecos_otimizados + nova_rota[n]+"\n"
# n += 1
# criar plotter, criar direções e plotar no mapa (mapa1)
plotmap = gmplot.GoogleMapPlotter(bsb_coords[0],bsb_coords[1], 13, apikey=g_apikey)
plotmap.directions(start_coords, end_coords, waypoints=lista_coords, optimize_waypoints = True, traffic_model = 'best_guess',departure_time=horario_datetime)
# Plotando enderecos (markers) na ordem não-otimizada
marker_label = 1
plotmap.marker(start_coords[0],start_coords[1],precision=10, label=str(marker_label),title=str(marker_label))
marker_label += 1
for i in lista_coords:
plotmap.marker(i[0],i[1],precision=10, label=str(marker_label),title=str(marker_label))
marker_label += 1
plotmap.marker(end_coords[0],end_coords[1],precision=10, label=str(marker_label),title=str(marker_label))
plotmap.draw('mapa1.html')
global duracao_otima
global duracao_rota
global dict_rotas
# Plottando mapa2.html
distancia_otima = 0
duracao_otima = 0
marker_label = 1
anterior = start_coords
enderecos_rotalidos = []
dict_rotas = {}
duracao_rota = 0
duracao_total_da_rota = 0
todosends = []
plotmap = gmplot.GoogleMapPlotter(bsb_coords[0],bsb_coords[1], 13, apikey=g_apikey)
plotmap.marker(start_coords[0],start_coords[1],precision=10, label=str(marker_label),title=start_address, color='indianred',draggable=True)
marker_label += 1
# PENSAR EM UMA FORMA de fazer o datetime.now() calcular corretamente, visto que todos os pontos do trajeto estarão
# com departure no horário mencionado no user_input 'horario de inicio'
for i in coords_otimizado:
plotmap.directions(anterior, i, traffic_model ='best_guess', departure_time=horario_datetime)
plotmap.marker(i[0],i[1],precision=10, label=str(marker_label),title=nova_rota[(marker_label-2)], color='indianred',draggable=True)
caminho_1po1 = gmaps_client.directions(anterior, i, traffic_model ='best_guess', departure_time=horario_datetime)
distancia_otima = distancia_otima + caminho_1po1[0]['legs'][0]['distance']['value']
duracao_otima = duracao_otima + caminho_1po1[0]['legs'][0]['duration']['value']
marker_label += 1
anterior = i
duracao_rota = duracao_rota + caminho_1po1[0]['legs'][0]['duration']['value']
enderecos_rotalidos.append(i)
duracao_total_da_rota = (len(enderecos_rotalidos)*600) + duracao_rota # qtd de ends dos clientes lidos multiplicados por 2000 (20 min) + duracao do trajeto
#print("total: "+str(duracao_total_da_rota))
#print("otima: "+str(duracao_otima))
#print("rota: "+str(duracao_rota))
if duracao_total_da_rota > 6000: # 6000s = 100min (1h40); 50 minutos de folga do final. máx = 140min
dict_rotas[(duracao_total_da_rota)] = enderecos_rotalidos
#todosends = todosends + enderecos_rotalidos
enderecos_rotalidos = []
#enderecos_rotalidos.append(i)
duracao_rota = 0
#ctypes.windll.user32.MessageBoxW(0, dict_rotas, 'Rotas otimizadas:', 1)
plotmap.directions(anterior, end_coords, traffic_model ='best_guess', departure_time=horario_datetime)
plotmap.marker(end_coords[0],end_coords[1],precision=10, label=str(marker_label),title=end_address, color='indianred',draggable=True)
# Ñ preciso incluir o tempo do ultimo endereço até o final na duracao_rota, já q não influencia no tempo pros clientes
caminho_1po1 = gmaps_client.directions(anterior, end_coords, traffic_model ='best_guess', departure_time=horario_datetime)
ultimoend_dur = caminho_1po1[0]['legs'][0]['duration']['value']
distancia_otima = distancia_otima + caminho_1po1[0]['legs'][0]['distance']['value']
duracao_otima = duracao_otima + caminho_1po1[0]['legs'][0]['duration']['value']
#duracao_rota = duracao_rota + caminho_1po1[0]['legs'][0]['duration']['value']
#enderecos_rotalidos.append(end_coords)
#print("ultimo: "+str(ultimoend_dur/60))
duracao_total_da_rota = (len(enderecos_rotalidos)*600) + duracao_rota # 1200 = 20 min
if (duracao_total_da_rota) > 0:
dict_rotas[(duracao_total_da_rota)] = enderecos_rotalidos
#enderecos_clienteslidos = []
#duracao_casaclientes = 0
#duracao_otima = 0
#print("otima: "+str(duracao_otima))
#print("rota: "+str(duracao_rota))
somadict = 0
for i in dict_rotas:
somadict = somadict +i
#print("otima: "+str(duracao_otima/60))
#print("soma dict: "+str(somadict/60))
print("Resultado = "+str(len(coords_otimizado)*10)+" + "+str((duracao_otima/60)-(ultimoend_dur/60))+" == "+str(somadict/60))
# (duracao_otima/60)-(ultimoend_dur/60) tem que ser igual à soma do dict - (enderecos*20)
print(dict_rotas)
#print("Distancia do mapa2: "+str("%.1f" % (distancia_otima/1000))+" km")
#print("Duração do mapa2: "+str(duracao_otima/60)+" min")
try:
canvas.delete(distance_optimal_label)
canvas.delete(duration_optimal_label)
except:
pass
distance_optimal_label = canvas.create_window(600, 430, window=Label(root_ends_routing, bg='#d3e6d3', text=("Distância: "+str("%.1f" % (distancia_otima/1000))+" km"), anchor="w", font=('helvetica', 10)))
duration_optimal_label = canvas.create_window(600, 455, window=Label(root_ends_routing, bg='#d3e6d3', text=("Duração: "+str("%.1f" % (duracao_otima/60))+" min"), anchor="w", font=('helvetica', 10)))
plotmap.draw('mapa2.html') # Trajeto otimizado, porém meio bugado
# Removendo Markers A, B do mapa:
arquivo_mapa = open(os.path.dirname(__file__)+'/img/mapa2.html').read()
if 'suppress' not in arquivo_mapa:
replaced = arquivo_mapa.replace('map: map', 'map: map, suppressMarkers: true')
writer = open(os.path.dirname(__file__)+'\img\mapa2.html','w')
writer.write(replaced)
writer.close()
# inserir outro text box com enderecos_otimizados
resultado_otimizado.delete(1.0,END)
resultado_otimizado.insert(END,enderecos_otimizados)
resultado_otimizado.config(bg='white')
# Botões de ver as rotas (mapa1.html e mapa2.html)
vermapa1_button = canvas.create_window(260, 440, window=Button(text="Ver rota antes", width=15,command=lambda:verRotaNoMapa('mapa1'), bg='#229399', fg='white', font=('helvetica', 13, 'bold')))
vermapa2_button = canvas.create_window(770, 440, window=Button(text="Ver rota depois", width=15,command=lambda:verRotaNoMapa('mapa2'), bg='#229399', fg='white', font=('helvetica', 13, 'bold')))
def verEnderecoNoMapa(tup_lat_lng, endereço):
plotmap = folium.Map(location=tup_lat_lng, zoom_start=12)
folium.Marker(location=tup_lat_lng, popup="Coordenada "+str(tup_lat_lng)+"\n\n"+str(endereço), tooltip=endereço).add_to(plotmap)
plotmap.save("mapas\map_verloc_individual_folium.html")
webbrowser.get(browser_path).open(os.path.dirname(__file__)+"\mapas\map_verloc_individual_folium.html")
def verRotaNoMapa(mapanum):
webbrowser.get(browser_path).open(os.path.dirname(__file__)+"\mapas\{}.html".format(mapanum))
def aplicarVerificacao(tupla_solicitada):
global verif_result_label
def criarEntryResultados():
global verif_result_label
global canvas
verif_result_label = Entry(root_ends_routing, width=25, text=verif_result_var.get(), font=('helvetica', 9))
canvas.create_window(355, 80, window=verif_result_label)
try: verif_result_label.get()
except:
criarEntryResultados()
if tupla_solicitada.count(1) == 0:
checkboxes_label.config(bg='#d4d0cf', relief="solid",text="Selecione um serviço")
elif tupla_solicitada.count(1) > 1:
#checkboxes_label.config(bg='#d4d0cf', relief="solid",text="Marque apenas uma opção")
checkboxes_label.config(bg='#d4d0cf', relief="solid",text="OSRM e Google")
verificarEndereco_ambos()
elif tupla_solicitada[0] == 1:
checkboxes_label.config(bg='#d4d0cf', relief="solid",text="Open Street Map")
verificarEndereco_openstreetmap()
else:
checkboxes_label.config(bg='#d4d0cf', relief="solid",text="Google Maps")
verificarEndereco_google()
def verificarEndereco_ambos():
global verificar_coords
global vernomapa_button
global gmaps_client
if google_key == "":
verif_result_label.delete(0,END)
verif_result_label.insert(0, 'Serviço Google indisponível. Insira uma chave de API')
return
verificarEndereco_openstreetmap()
if verif_result_label.get() == 'Endereço não encontrado.':
verificarEndereco_google()
def verificarEndereco_openstreetmap():
global vernomapa_button
global verificar_coords
verificacao = verificar_end.get()
try:
canvas.delete(vernomapa_button)
except:
pass
try:
location = geolocator.geocode(verificacao)
verificar_coords = (location.latitude, location.longitude)
verificar_lat = location.latitude
verificar_long = location.longitude
verif_result_label.config(bg='#d2e8ae', width=45, relief="solid")
verif_result_label.delete(0,END)
verif_result_label.insert(0, str('Local encontrado: '+str(verificar_coords)+' '))
#verif_result_var.set(str('Endereço existe! '+str(verificar_coords)+' '))
vernomapa_button = canvas.create_window(545, 45, window=Button(text="Ver no mapa", width=15,command=lambda:verEnderecoNoMapa(verificar_coords, verificacao), bg='#229399', fg='white', font=('helvetica', 12, 'bold')))
except:
verificar_lat = 0
verificar_long = 0
verif_result_label.config(bg='#c98f8b', width=25, relief="solid")
verif_result_label.delete(0,END)
verif_result_label.insert(0, 'Endereço não encontrado.')
print(sys.exc_info())
def verificarEndereco_google():
global verificar_coords
global vernomapa_button
if google_key == "":
verif_result_label.delete(0,END)
verif_result_label.insert(0, 'Serviço Google indisponível. Insira uma chave de API')
return
else:
gmaps_client = googlemaps.Client(key=google_key)
verificacao = verificar_end.get()
try:
canvas.delete(vernomapa_button)
except:
pass
try:
location = gmaps_client.geocode(verificacao)
verificar_coords = (location[0]['geometry']['location']['lat'], location[0]['geometry']['location']['lng'])
verif_result_label.config(bg='#d2e8ae', width=45, relief="solid")
verif_result_label.delete(0,END)
verif_result_label.insert(0, str('Local encontrado: '+str(verificar_coords)+' '))
vernomapa_button = canvas.create_window(555, 45, window=Button(text="Ver no mapa", width=15,command=lambda:verEnderecoNoMapa(verificar_coords, verificacao), bg='#229399', fg='white', font=('helvetica', 12, 'bold')))
except:
verificar_lat = 0
verificar_long = 0
verif_result_label.config(bg='#c98f8b', width=25, relief="solid")
verif_result_label.delete(0,END)
verif_result_label.insert(0, 'Endereço não encontrado.')
print(sys.exc_info())
def inserirApiGoogle():
global api_label
root_api_google = Toplevel()
root_api_google.title('Inserir API do Google')
canvas2 = Canvas(root_api_google, height = 150, width = 500, bg = '#bad5e8', relief = 'raised')
canvas2.pack()
api_atual_label = canvas2.create_window(70, 25, window=Label(root_api_google, text='API inserido:', bg = '#bad5e8', justify=LEFT, font=('helvetica', 8,'bold')))
if google_key != '':
api_label = canvas2.create_window(250, 25, window=Label(root_api_google, bg=cor_dos_labels, justify=LEFT, text=google_key, font=('helvetica', 8,'bold')))
else:
api_label = canvas2.create_window(250, 25, window=Label(root_api_google, bg=cor_dos_labels, justify=LEFT, text='Nenhuma chave encontrada.', font=('helvetica', 8,'bold')))
key_input = Entry(root_api_google,width=70, bg='#f5f0f0')
key_input.insert(END,google_key)
canvas2.create_window(250, 60, window=key_input)
def alterar():
global link
global api_label
global google_key
global servicoGoogle_ativo
global gmaps_client
try: canvas2.delete(api_label)
except: pass
try:
# executar um teste com a key antes de atribuir à var google_key
gmaps_client = googlemaps.Client(key=key_input.get())
gmaps_client.geocode('Unb, Brasília, DF')
google_key = key_input.get()
api_label = canvas2.create_window(250, 25, window=Label(root_api_google, bg='#dcf4e4', justify=LEFT, text=google_key, font=('helvetica', 8,'bold')))
servicoGoogle_ativo = canvas.create_window(460, 420, window=Label(root_ends_routing, bg='#dcf4e4',text='Geocode Google ativo', font=('helvetica', 9)))
google_checkbox_var.set(1)
google_checkbox.config(state=NORMAL)
except googlemaps.exceptions.ApiError:
api_label = canvas2.create_window(250, 25, window=Label(root_api_google, bg='#c98d79', justify=LEFT, text="Chave API não tem autorização para ser acessada", font=('helvetica', 8,'bold')))
except:
api_label = canvas2.create_window(250, 25, window=Label(root_api_google, bg='#c98d79', justify=LEFT, text="Chave API não encontrada", font=('helvetica', 8,'bold')))
def desativar():
global api_label
global google_key
global servicoGoogle_ativo
google_key = ''
try: canvas2.delete(api_label); canvas.delete(servicoGoogle_ativo)
except: pass
api_label = canvas2.create_window(250, 25, window=Label(root_api_google, bg='white', justify=LEFT, text='Nenhuma chave ativa.', font=('helvetica', 8,'bold')))
google_checkbox_var.set(0)
google_checkbox.config(state=DISABLED)
alterar_api_button = Button(root_api_google,text="Alterar API", height=1, width=12,command=lambda:alterar(),wraplength=90, bg='#9ebaa7', font=('helvetica', 10, 'bold'))
canvas2.create_window(250, 95, window=alterar_api_button)
desativar_api_button = Button(root_api_google,text="Desativar API", height=1, width=12,command=lambda:desativar(),wraplength=90, bg='#9ebaa7', font=('helvetica', 10, 'bold'))
canvas2.create_window(250, 130, window=desativar_api_button)
root_ends_routing = Tk()
root_ends_routing.title('vRotas - TSP / PCV Solver')
#root_ends_routing.iconbitmap('C:/Users/Victor/Desktop/market ocr/icone ricardinho.gif')
#Main window
canvas = Canvas(root_ends_routing, height = 500, width = 1000, bg = "white", relief = 'raised')
canvas.pack()
# Logo vRotas
logo_vrotas = PhotoImage(file = os.path.dirname(__file__)+"\img\logo vRotas 9.0 mini.png")
logo_v_label = Label(root_ends_routing, image=logo_vrotas, bg='white')
logo_v_label.place(x=850, y=-5)
# Ícone
root_ends_routing.iconphoto(True, PhotoImage(file=os.path.dirname(__file__)+'\img\icone vRotas 9.0 mini.png'))
# faiô
# checkboxes
servico_busca_selecionado = 'nenhum'
osm_checkbox_var = IntVar(value=1)
osm_checkbox = Checkbutton(root_ends_routing, text="Open Street Map", variable=osm_checkbox_var)
canvas.create_window(340, 10, window=osm_checkbox)
google_checkbox_var = IntVar(value=0)
google_checkbox = Checkbutton(root_ends_routing, text="Google", variable=google_checkbox_var, state=DISABLED)
canvas.create_window(440, 10, window=google_checkbox)
chaveapi_google = Button(root_ends_routing,text="Inserir chave API Google",width=20,command=lambda:inserirApiGoogle(), bg='#bad5e8', font=('helvetica', 9, 'bold'))
canvas.create_window(580, 10, window=chaveapi_google)
# checkbox labels
checkboxes_label_var = StringVar(root_ends_routing)
checkboxes_label_var.set('')
checkboxes_label = Label(root_ends_routing, text=checkboxes_label_var.get(), font=('helvetica', 9))
canvas.create_window(140, 70, window=checkboxes_label)
# verificar se endereço existe
verificar_label = Label(root_ends_routing, bg=cor_dos_labels, text='Checar coordenadas de endereços:', font=('helvetica', 11, 'bold'))
canvas.create_window(140, 10, window=verificar_label)
verificar_end = Entry(root_ends_routing,width=40, bg='#e8e6e6')
verificar_end.insert(END,"Escreva o endereço aqui")
canvas.create_window(140, 45, window=verificar_end)
verificarButton = Button(root_ends_routing,text="Verificar endereço",width=15,command=lambda:aplicarVerificacao((osm_checkbox_var.get(), google_checkbox_var.get())), bg='#579482', fg='white', font=('helvetica', 13, 'bold'))
canvas.create_window(355, 45, window=verificarButton)
verif_result_var = StringVar(root_ends_routing)
verif_result_var.set('')
# endereço inicial
end_inicial_label = Label(root_ends_routing, bg=cor_dos_labels, text='Endereço inicial:', font=('helvetica', 13,'bold'))
canvas.create_window(95, 150, window=end_inicial_label)
endereco_inicio = Entry(root_ends_routing,width=25, bg='#f5f0f0')
endereco_inicio.insert(END,"UnB, Brasília, DF")
canvas.create_window(110, 180, window=endereco_inicio)
# endereço final
end_final_label = Label(root_ends_routing, bg=cor_dos_labels, text='Endereço final:', font=('helvetica', 13,'bold'))
canvas.create_window(290, 150, window=end_final_label)
endereco_fim = Entry(root_ends_routing,width=25, bg='#f5f0f0')
endereco_fim.insert(END,"Aeroporto Internacional de Brasilia, DF")
canvas.create_window(310, 180, window=endereco_fim)
# horario de partida
#hr_partida_label = Label(root_ends_routing, bg=cor_dos_labels, text='Data e horário de partida (00h - 23h59)', font=('helvetica', 13,'bold'))
#canvas.create_window(720, 150, window=hr_partida_label)
#horario_inicio = Entry(root_ends_routing,width=40, bg='#f5f0f0')
#horario_inicio.insert(END,str(datetime.strftime((datetime.now() + timedelta(days=1)), '%d/%m/%y %H:%M')))
#canvas.create_window(700, 180, window=horario_inicio)
# enderecos intermediarios
ends_rota_label = Label(root_ends_routing, bg=cor_dos_labels, text='Endereços intermediários (um por linha):', font=('helvetica', 13,'bold'))
canvas.create_window(195, 215, window=ends_rota_label)
#end_clientes = ScrolledText(root_ends_routing, xscrollcommand = scrollbar.set)
end_clientes = ScrolledText(root_ends_routing, wrap="none")
scrollbar_endclientes = Scrollbar(root_ends_routing, orient=HORIZONTAL, command=end_clientes.xview)
#end_clientes['xscrollcommand'] = scrollbar.set
end_clientes.configure(xscrollcommand=scrollbar_endclientes.set)
scrollbar_endclientes.place(x=30,y=400,width=350)
end_clientes.place(x=30, y=235, width=350, height=170)
end_clientes.insert(END,"Asa Sul, Brasília, DF\nAsa Norte, Brasilia, DF")
# Botão "Otimizar Rota"
canvas.create_window(460, 350, window=Label(root_ends_routing, text='→', font=('helvetica', 54))) # seta
otimizarButton = Button(root_ends_routing,text="Otimizar rota", height=2, width=12,command=lambda:aplicarRotas(), bg='#229399', fg='white', font=('helvetica', 13, 'bold'))
canvas.create_window(460, 305, window=otimizarButton)
# enderecos otimizados
canvas.create_window(670, 215, window=Label(root_ends_routing, bg=cor_dos_labels, text='Resultado / Endereços otimizados:', font=('helvetica', 13, 'bold')))
resultado_otimizado = ScrolledText(root_ends_routing, wrap="none")
scrollbar_resultado = Scrollbar(root_ends_routing, orient=HORIZONTAL, command=resultado_otimizado.xview)
resultado_otimizado.configure(xscrollcommand=scrollbar_resultado.set)
scrollbar_resultado.place(x=540,y=400,width=350)
resultado_otimizado.place(x=540, y=235, width=350, height=170)
resultado_otimizado.config(bg='#e6e6e6')
# Drop-down table
#servico_dropdown = OptionMenu(master, variable, "one", "two", "three")
servico_escolhido = StringVar(root_ends_routing)
servico_escolhido.set("OSRM")
canvas.create_window(460, 255, window=OptionMenu(root_ends_routing, servico_escolhido, "OSRM")) # ,"Google"))
servicoRotas_label = Label(root_ends_routing, bg='#d4d0cf',text=servico_escolhido.get(), font=('helvetica', 9))
canvas.create_window(460, 390, window=servicoRotas_label)
root_ends_routing.mainloop()
| 46.206197 | 224 | 0.664085 |
7957f02e6de61c5c0b9a34d31923a99bf138fd55 | 2,657 | py | Python | backend/products/tests/test_models.py | waterunder/dashboard | 3790537ce29f83d631e0c14c12fff77515759e5b | [
"MIT"
] | null | null | null | backend/products/tests/test_models.py | waterunder/dashboard | 3790537ce29f83d631e0c14c12fff77515759e5b | [
"MIT"
] | 75 | 2020-08-17T15:22:42.000Z | 2021-12-09T09:11:37.000Z | backend/products/tests/test_models.py | waterunder/dashboard | 3790537ce29f83d631e0c14c12fff77515759e5b | [
"MIT"
] | null | null | null | from django.contrib.auth import get_user_model
from django.test import TestCase
from django.utils import timezone
from products.factories import ProductFactory
from products.models import Product
from sellers.factories import SellerFactory, UserFactory
class ProductModelTests(TestCase):
def setUp(self):
self.superuser = get_user_model().objects.create_superuser(
username='superuser',
email='superuser@email.com',
password='superpass123'
)
self.user_1 = UserFactory()
self.user_2 = UserFactory()
self.seller_1 = SellerFactory(owner=self.user_1)
self.seller_2 = SellerFactory(owner=self.user_2)
self.product_1 = ProductFactory(seller=self.seller_1)
self.product_2 = ProductFactory(seller=self.seller_2)
def test_product_model_listing(self):
self.assertEqual(Product.objects.count(), 2)
self.assertEqual(self.product_1.seller, self.seller_1)
self.assertEqual(self.product_2.seller, self.seller_2)
self.assertEqual(self.product_1.seller.owner, self.user_1)
self.assertEqual(self.product_2.seller.owner, self.user_2)
def test_only_user_who_created_the_product_can_update_it(self):
self.assertTrue(self.product_1.can_update(self.user_1))
self.assertTrue(self.product_2.can_update(self.user_2))
self.assertFalse(self.product_1.can_update(self.user_2))
self.assertFalse(self.product_2.can_update(self.user_1))
def test_only_user_who_created_the_product_can_delete_it(self):
self.assertTrue(self.product_1.can_delete(self.user_1))
self.assertTrue(self.product_2.can_delete(self.user_2))
self.assertFalse(self.product_1.can_delete(self.user_2))
self.assertFalse(self.product_2.can_delete(self.user_1))
def test_superuser_can_update_any_product(self):
self.assertTrue(self.product_1.can_update(self.superuser))
self.assertTrue(self.product_2.can_update(self.superuser))
def test_superuser_can_delete_any_product(self):
self.assertTrue(self.product_1.can_delete(self.superuser))
self.assertTrue(self.product_2.can_delete(self.superuser))
def test_most_recent_products_are_listed_first(self):
yesterday = timezone.now() + timezone.timedelta(days=-1)
last_week = timezone.now() + timezone.timedelta(days=-7)
self.product_1.date_updated = last_week
self.product_2.date_updated = yesterday
self.product_1.save()
self.product_2.save()
latest_product = Product.objects.all()[0]
self.assertEqual(latest_product, self.product_2)
| 39.073529 | 67 | 0.728641 |
7957f0783e40964525b6ca1ce55159c5aed7de0a | 1,629 | py | Python | libioc/Config/Host/__init__.py | himrock922/libioc | 83111de2320c96946234eec852c00de72482ea0f | [
"BSD-2-Clause"
] | 37 | 2016-09-15T12:23:39.000Z | 2018-11-16T15:43:00.000Z | libioc/Config/Host/__init__.py | himrock922/libioc | 83111de2320c96946234eec852c00de72482ea0f | [
"BSD-2-Clause"
] | 351 | 2017-08-19T16:45:44.000Z | 2018-12-30T15:21:17.000Z | libioc/Config/Host/__init__.py | himrock922/libioc | 83111de2320c96946234eec852c00de72482ea0f | [
"BSD-2-Clause"
] | 10 | 2016-10-10T02:10:59.000Z | 2018-11-15T13:06:45.000Z | # Copyright (c) 2017-2019, Stefan Grönke
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted providing that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Host Configuration."""
import typing
import libioc.Config.Jail.File.RCConf
class RCConf(libioc.Config.Jail.File.RCConf.RCConf):
"""Host RCConf file /etc/rc.conf."""
def save(self) -> bool:
"""Prevent saving the host /etc/rc.conf file."""
raise NotImplementedError("Host rc.conf cannot be saved.")
rc_conf = RCConf()
| 41.769231 | 73 | 0.7655 |
7957f0abbebee332784c717aaa05fb6cc799657b | 1,146 | py | Python | HttpRunnerManager/urls.py | zhouhuiyin/HttpRunnerNewManager | 5cbee907cf1bbe358e106f61b4aae0ee3f2ec04e | [
"MIT"
] | 5 | 2020-04-29T06:17:08.000Z | 2020-07-07T11:52:47.000Z | HttpRunnerManager/urls.py | zhouhuiyin/HttpRunnerNewManager | 5cbee907cf1bbe358e106f61b4aae0ee3f2ec04e | [
"MIT"
] | 3 | 2020-04-29T06:57:36.000Z | 2020-05-19T00:58:30.000Z | HttpRunnerManager/urls.py | zhouhuiyin/HttpRunnerNewManager | 5cbee907cf1bbe358e106f61b4aae0ee3f2ec04e | [
"MIT"
] | 1 | 2020-07-07T11:53:21.000Z | 2020-07-07T11:53:21.000Z | """HttpRunnerManager URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.views.generic import RedirectView
from ApiManager import views
from HttpRunnerManager.activator import process
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^favicon\.ico$', RedirectView.as_view(url='/static/assets/img/favicon.ico')),
url('^(?P<app>(\w+))/(?P<function>(\w+))/$', process),
url('^(?P<app>(\w+))/(?P<function>(\w+))/(?P<id>(\w+))/$', process),
url(r'^$', views.index),
]
| 39.517241 | 87 | 0.684991 |
7957f0d384986c7c176fde9208f13d792e96282d | 3,860 | py | Python | scripts/utils/check-kube-config.py | acmenezes/community-operators | 9e33ce298168d4f0e319b95c8c05b2c3191b1b52 | [
"Apache-2.0"
] | null | null | null | scripts/utils/check-kube-config.py | acmenezes/community-operators | 9e33ce298168d4f0e319b95c8c05b2c3191b1b52 | [
"Apache-2.0"
] | 5 | 2019-05-07T00:09:05.000Z | 2019-11-04T18:51:35.000Z | scripts/utils/check-kube-config.py | acmenezes/community-operators | 9e33ce298168d4f0e319b95c8c05b2c3191b1b52 | [
"Apache-2.0"
] | 2 | 2020-03-04T11:13:17.000Z | 2020-04-06T08:54:17.000Z | import yaml
import re
from sys import exit
from lib import pick
from os import path, environ, system, WEXITSTATUS
from pathlib import Path
class bcolors:
OK = "\033[0;32m"
WARN = "\033[0;33m"
ERR = "\033[0;31m"
NC = "\033[0m"
class messages:
CONFIG = 'Find kube config \t [%s %s %s]'
CLUSTER = 'Find kube cluster \t [%s %s %s]'
CONTEXT = 'Find kube context \t [%s %s %s]'
MASTER = 'Try kube master \t [%s %s %s]'
def get_kube_config(config_path):
if path.isfile(config_path):
print((messages.CONFIG % (bcolors.OK, config_path, bcolors.NC)).expandtabs(49))
with open(config_path, 'r') as stream:
try:
kube_config = yaml.safe_load(stream)
return kube_config
except yaml.YAMLError as exc:
print(exc)
raise Exception('Not found')
print((messages.CONFIG % (bcolors.WARN, 'Not found', bcolors.NC)).expandtabs(49))
def parse_current_context(kube_config):
current_context = kube_config.get('current-context')
contexts = kube_config.get('contexts')
options = []
selected = 0
cluster = ''
for i in range(len(contexts)):
context = contexts[i]
options.append(context.get('name'))
if context.get('name') == current_context:
selected = i
cluster = context.get('context').get('cluster')
if len(contexts) > 1:
title = 'Please choose your context for testing: '
option, index = pick(options, title, indicator='=>', default_index=selected)
current_context = option
for i in range(len(contexts)):
context = contexts[i]
if context.get('name') == current_context:
cluster = context.get('context').get('cluster')
if cluster == '':
print((messages.CLUSTER % (bcolors.ERR, 'Not found', bcolors.NC)).expandtabs(49))
raise Exception('Not found')
if current_context:
print((messages.CONTEXT % (bcolors.OK, current_context, bcolors.NC)).expandtabs(49))
return current_context, cluster
print((messages.CONTEXT % (bcolors.WARN, 'Not found', bcolors.NC)).expandtabs(49))
raise Exception('Not found')
def write_context_to_config_file(config_path, kube_context, kube_config):
if kube_context != kube_config.get('current-context'):
f = open(config_path, "w+")
kube_config['current-context'] = kube_context
f.write(yaml.safe_dump(kube_config))
f.close()
def check_availability_of_cluster(cluster_name, config):
clusters = config.get('clusters')
server = ''
for i in range(len(clusters)):
cluster = clusters[i]
if cluster.get('name') == cluster_name:
server = cluster.get('cluster').get('server')
server = re.split('^.+://', server)[1]
server = server.split(':')
print((messages.MASTER % (bcolors.WARN, '%s:%s' % (server[0], server[1]), bcolors.NC)).expandtabs(49))
command = 'nc -zvw3 -G 3 %s %s 2> /dev/null' % (server[0], server[1])
exit_code = system(command)
if exit_code > 0:
print((messages.MASTER % (bcolors.ERR, 'Not responding on %s:%s' % (server[0], server[1]), bcolors.NC)).expandtabs(49))
raise Exception('Not found')
def main():
try:
env_kube_config = environ.get('KUBECONFIG', '')
config_path = env_kube_config if env_kube_config != '' else path.join(Path.home(), '.kube/config')
kube_config = get_kube_config(config_path)
kube_context, cluster_name = parse_current_context(kube_config)
check_availability_of_cluster(cluster_name, kube_config)
write_context_to_config_file(config_path, kube_context, kube_config)
except Exception as e:
system('make minikube.start')
exit(0)
if __name__ == "__main__":
main() | 33.565217 | 127 | 0.624611 |
7957f16952fec8e0a077def861ec87ef3e6fbbac | 18,827 | py | Python | venv/Lib/site-packages/sklearn/linear_model/_passive_aggressive.py | arnoyu-hub/COMP0016miemie | 59af664dcf190eab4f93cefb8471908717415fea | [
"MIT"
] | null | null | null | venv/Lib/site-packages/sklearn/linear_model/_passive_aggressive.py | arnoyu-hub/COMP0016miemie | 59af664dcf190eab4f93cefb8471908717415fea | [
"MIT"
] | null | null | null | venv/Lib/site-packages/sklearn/linear_model/_passive_aggressive.py | arnoyu-hub/COMP0016miemie | 59af664dcf190eab4f93cefb8471908717415fea | [
"MIT"
] | null | null | null | # Authors: Rob Zinkov, Mathieu Blondel
# License: BSD 3 clause
from ._stochastic_gradient import BaseSGDClassifier
from ._stochastic_gradient import BaseSGDRegressor
from ._stochastic_gradient import DEFAULT_EPSILON
class PassiveAggressiveClassifier(BaseSGDClassifier):
"""Passive Aggressive Classifier.
Read more in the :ref:`User Guide <passive_aggressive>`.
Parameters
----------
C : float, default=1.0
Maximum step size (regularization). Defaults to 1.0.
fit_intercept : bool, default=True
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered.
max_iter : int, default=1000
The maximum number of passes over the training data (aka epochs).
It only impacts the behavior in the ``fit`` method, and not the
:meth:`partial_fit` method.
.. versionadded:: 0.19
tol : float or None, default=1e-3
The stopping criterion. If it is not None, the iterations will stop
when (loss > previous_loss - tol).
.. versionadded:: 0.19
early_stopping : bool, default=False
Whether to use early stopping to terminate training when validation.
score is not improving. If set to True, it will automatically set aside
a stratified fraction of training data as validation and terminate
training when validation score is not improving by at least tol for
n_iter_no_change consecutive epochs.
.. versionadded:: 0.20
validation_fraction : float, default=0.1
The proportion of training data to set aside as validation set for
early stopping. Must be between 0 and 1.
Only used if early_stopping is True.
.. versionadded:: 0.20
n_iter_no_change : int, default=5
Number of iterations with no improvement to wait before early stopping.
.. versionadded:: 0.20
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
verbose : int, default=0
The verbosity level.
loss : str, default="hinge"
The loss function to be used:
hinge: equivalent to PA-I in the reference paper.
squared_hinge: equivalent to PA-II in the reference paper.
n_jobs : int or None, default=None
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
random_state : int, RandomState instance, default=None
Used to shuffle the training data, when ``shuffle`` is set to
``True``. Pass an int for reproducible output across multiple
function calls.
See :term:`Glossary <random_state>`.
warm_start : bool, default=False
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
See :term:`the Glossary <warm_start>`.
Repeatedly calling fit or partial_fit when warm_start is True can
result in a different solution than when calling fit a single time
because of the way the data is shuffled.
class_weight : dict, {class_label: weight} or "balanced" or None, \
default=None
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
.. versionadded:: 0.17
parameter *class_weight* to automatically weight samples.
average : bool or int, default=False
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
.. versionadded:: 0.19
parameter *average* to use weights averaging in SGD.
Attributes
----------
coef_ : ndarray of shape (1, n_features) if n_classes == 2 else \
(n_classes, n_features)
Weights assigned to the features.
intercept_ : ndarray of shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_iter_ : int
The actual number of iterations to reach the stopping criterion.
For multiclass fits, it is the maximum over every binary fit.
classes_ : ndarray of shape (n_classes,)
The unique classes labels.
t_ : int
Number of weight updates performed during training.
Same as ``(n_iter_ * n_samples)``.
loss_function_ : callable
Loss function used by the algorithm.
See Also
--------
SGDClassifier : Incrementally trained logistic regression.
Perceptron : Linear perceptron classifier.
References
----------
Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
Examples
--------
>>> from sklearn.linear_model import PassiveAggressiveClassifier
>>> from sklearn.datasets import make_classification
>>> X, y = make_classification(n_features=4, random_state=0)
>>> clf = PassiveAggressiveClassifier(max_iter=1000, random_state=0,
... tol=1e-3)
>>> clf.fit(X, y)
PassiveAggressiveClassifier(random_state=0)
>>> print(clf.coef_)
[[0.26642044 0.45070924 0.67251877 0.64185414]]
>>> print(clf.intercept_)
[1.84127814]
>>> print(clf.predict([[0, 0, 0, 0]]))
[1]
"""
def __init__(
self,
*,
C=1.0,
fit_intercept=True,
max_iter=1000,
tol=1e-3,
early_stopping=False,
validation_fraction=0.1,
n_iter_no_change=5,
shuffle=True,
verbose=0,
loss="hinge",
n_jobs=None,
random_state=None,
warm_start=False,
class_weight=None,
average=False,
):
super().__init__(
penalty=None,
fit_intercept=fit_intercept,
max_iter=max_iter,
tol=tol,
early_stopping=early_stopping,
validation_fraction=validation_fraction,
n_iter_no_change=n_iter_no_change,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
eta0=1.0,
warm_start=warm_start,
class_weight=class_weight,
average=average,
n_jobs=n_jobs,
)
self.C = C
self.loss = loss
def partial_fit(self, X, y, classes=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Subset of the training data.
y : array-like of shape (n_samples,)
Subset of the target values.
classes : ndarray of shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
Returns
-------
self : object
Fitted estimator.
"""
self._validate_params(for_partial_fit=True)
if self.class_weight == "balanced":
raise ValueError(
"class_weight 'balanced' is not supported for "
"partial_fit. For 'balanced' weights, use "
"`sklearn.utils.compute_class_weight` with "
"`class_weight='balanced'`. In place of y you "
"can use a large enough subset of the full "
"training set target to properly estimate the "
"class frequency distributions. Pass the "
"resulting weights as the class_weight "
"parameter."
)
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._partial_fit(
X,
y,
alpha=1.0,
C=self.C,
loss="hinge",
learning_rate=lr,
max_iter=1,
classes=classes,
sample_weight=None,
coef_init=None,
intercept_init=None,
)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
coef_init : ndarray of shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : ndarray of shape (n_classes,)
The initial intercept to warm-start the optimization.
Returns
-------
self : object
Fitted estimator.
"""
self._validate_params()
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._fit(
X,
y,
alpha=1.0,
C=self.C,
loss="hinge",
learning_rate=lr,
coef_init=coef_init,
intercept_init=intercept_init,
)
class PassiveAggressiveRegressor(BaseSGDRegressor):
"""Passive Aggressive Regressor.
Read more in the :ref:`User Guide <passive_aggressive>`.
Parameters
----------
C : float, default=1.0
Maximum step size (regularization). Defaults to 1.0.
fit_intercept : bool, default=True
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
max_iter : int, default=1000
The maximum number of passes over the training data (aka epochs).
It only impacts the behavior in the ``fit`` method, and not the
:meth:`partial_fit` method.
.. versionadded:: 0.19
tol : float or None, default=1e-3
The stopping criterion. If it is not None, the iterations will stop
when (loss > previous_loss - tol).
.. versionadded:: 0.19
early_stopping : bool, default=False
Whether to use early stopping to terminate training when validation.
score is not improving. If set to True, it will automatically set aside
a fraction of training data as validation and terminate
training when validation score is not improving by at least tol for
n_iter_no_change consecutive epochs.
.. versionadded:: 0.20
validation_fraction : float, default=0.1
The proportion of training data to set aside as validation set for
early stopping. Must be between 0 and 1.
Only used if early_stopping is True.
.. versionadded:: 0.20
n_iter_no_change : int, default=5
Number of iterations with no improvement to wait before early stopping.
.. versionadded:: 0.20
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
verbose : int, default=0
The verbosity level.
loss : str, default="epsilon_insensitive"
The loss function to be used:
epsilon_insensitive: equivalent to PA-I in the reference paper.
squared_epsilon_insensitive: equivalent to PA-II in the reference
paper.
epsilon : float, default=0.1
If the difference between the current prediction and the correct label
is below this threshold, the model is not updated.
random_state : int, RandomState instance, default=None
Used to shuffle the training data, when ``shuffle`` is set to
``True``. Pass an int for reproducible output across multiple
function calls.
See :term:`Glossary <random_state>`.
warm_start : bool, default=False
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
See :term:`the Glossary <warm_start>`.
Repeatedly calling fit or partial_fit when warm_start is True can
result in a different solution than when calling fit a single time
because of the way the data is shuffled.
average : bool or int, default=False
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
.. versionadded:: 0.19
parameter *average* to use weights averaging in SGD.
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_iter_ : int
The actual number of iterations to reach the stopping criterion.
t_ : int
Number of weight updates performed during training.
Same as ``(n_iter_ * n_samples)``.
See Also
--------
SGDRegressor : Linear model fitted by minimizing a regularized
empirical loss with SGD.
References
----------
Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006).
Examples
--------
>>> from sklearn.linear_model import PassiveAggressiveRegressor
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(n_features=4, random_state=0)
>>> regr = PassiveAggressiveRegressor(max_iter=100, random_state=0,
... tol=1e-3)
>>> regr.fit(X, y)
PassiveAggressiveRegressor(max_iter=100, random_state=0)
>>> print(regr.coef_)
[20.48736655 34.18818427 67.59122734 87.94731329]
>>> print(regr.intercept_)
[-0.02306214]
>>> print(regr.predict([[0, 0, 0, 0]]))
[-0.02306214]
"""
def __init__(
self,
*,
C=1.0,
fit_intercept=True,
max_iter=1000,
tol=1e-3,
early_stopping=False,
validation_fraction=0.1,
n_iter_no_change=5,
shuffle=True,
verbose=0,
loss="epsilon_insensitive",
epsilon=DEFAULT_EPSILON,
random_state=None,
warm_start=False,
average=False,
):
super().__init__(
penalty=None,
l1_ratio=0,
epsilon=epsilon,
eta0=1.0,
fit_intercept=fit_intercept,
max_iter=max_iter,
tol=tol,
early_stopping=early_stopping,
validation_fraction=validation_fraction,
n_iter_no_change=n_iter_no_change,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
warm_start=warm_start,
average=average,
)
self.C = C
self.loss = loss
def partial_fit(self, X, y):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Subset of training data.
y : numpy array of shape [n_samples]
Subset of target values.
Returns
-------
self : object
Fitted estimator.
"""
self._validate_params(for_partial_fit=True)
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
return self._partial_fit(
X,
y,
alpha=1.0,
C=self.C,
loss="epsilon_insensitive",
learning_rate=lr,
max_iter=1,
sample_weight=None,
coef_init=None,
intercept_init=None,
)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : numpy array of shape [n_samples]
Target values.
coef_init : array, shape = [n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [1]
The initial intercept to warm-start the optimization.
Returns
-------
self : object
Fitted estimator.
"""
self._validate_params()
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
return self._fit(
X,
y,
alpha=1.0,
C=self.C,
loss="epsilon_insensitive",
learning_rate=lr,
coef_init=coef_init,
intercept_init=intercept_init,
)
| 34.168784 | 81 | 0.597759 |
7957f256cb6812f2483d1008e33e8ffdb1c20da5 | 6,153 | py | Python | dragonfly/exd/unittest_domains.py | anonymous-submission000/mobo | 090f774d742c7155c5e5ba01c10e7db7b93b6a0a | [
"MIT"
] | 1 | 2022-02-17T08:50:47.000Z | 2022-02-17T08:50:47.000Z | dragonfly/exd/unittest_domains.py | anonymous-submission000/mobo | 090f774d742c7155c5e5ba01c10e7db7b93b6a0a | [
"MIT"
] | null | null | null | dragonfly/exd/unittest_domains.py | anonymous-submission000/mobo | 090f774d742c7155c5e5ba01c10e7db7b93b6a0a | [
"MIT"
] | null | null | null | """
Unit tests for domains.py
-- kandasamy@cs.cmu.edu
"""
# pylint: disable=invalid-name
from __future__ import absolute_import
import numpy as np
# Local imports
from . import domains
from ..utils.base_test_class import BaseTestClass, execute_tests
from ..utils.general_utils import map_to_bounds
class DomainBaseTestCase(object):
""" Unit tests for Base class. """
# pylint: disable=no-member
def setUp(self):
""" Set up. """
self.domain_obj = None
self.points = None
self.non_points = None
self._child_set_up()
def _child_set_up(self):
""" Child set up. """
raise NotImplementedError('Implement in a child class.')
def test_instantiation(self):
""" Tests instantiation of an object. """
self.report('Testing instantiation of %s class.' % (type(self.domain_obj)))
self.report('Created object: %s' % (self.domain_obj), 'test_result')
def test_membership(self):
""" Testing membership. """
self.report('Testing membership of %s class.' % (type(self.domain_obj)))
points_are_in = [self.domain_obj.is_a_member(pt) for pt in self.points]
non_points_are_not_in = [not self.domain_obj.is_a_member(pt) for pt in
self.non_points]
assert all(points_are_in)
assert all(non_points_are_not_in)
class EuclideanDomainTestCase(DomainBaseTestCase, BaseTestClass):
""" Test class for Euclidean Objects. """
def _child_set_up(self):
""" Child set up. """
self.domain_obj = domains.EuclideanDomain([[0, 2.3], [3.4, 8.9], [0.12, 1.0]])
self.points = [map_to_bounds(np.random.random((self.domain_obj.dim,)),
self.domain_obj.bounds)
for _ in range(5)]
self.non_points = [map_to_bounds(np.random.random((self.domain_obj.dim,)),
np.array([[3.5, 9.8], [-1.0, 1.1], [2.3, 4.5]]))
for _ in range(5)]
class IntegralDomainTestCase(DomainBaseTestCase, BaseTestClass):
""" Test class for IntegralDomain Objects. """
def _child_set_up(self):
""" Child set up. """
self.domain_obj = domains.IntegralDomain([[0, 10], [-10, 100], [45, 78.4]])
self.points = [[9, 56, 78], [5, 0, 68], [0, -1, 70]]
self.non_points = [[11, 0, 67], [5.6, 11, 58], [4, 3.0, 70], [9, 56, 67, 9]]
class DiscreteDomainTestCase(DomainBaseTestCase, BaseTestClass):
""" Discrete Domain. """
def _child_set_up(self):
""" Child set up. """
self.domain_obj = domains.DiscreteDomain(['abc', 5, 6.5, int, 'k'])
self.points = ['k', type(4), 6.5]
self.non_points = ['ab', 75.8, 'qwerty', None]
class DiscreteNumericDomainTestCase(DomainBaseTestCase, BaseTestClass):
""" Discrete Numeric Domain. """
def _child_set_up(self):
""" Child set up. """
self.domain_obj = domains.DiscreteNumericDomain([4, 5, 207.2, -2.3])
self.points = [207.2, 5, -2.3]
self.non_points = [5.4, -1.1, 'kky', None]
def test_non_numeric_discrete_domain(self):
""" Constructor. """
self.report('Testing if exception is raised for non numeric elements in a ' +
'discrete domain.')
exception_raised = False
try:
domains.DiscreteNumericDomain(['abc', 5, 6.5, int, 'k'])
except ValueError:
exception_raised = True
assert exception_raised
class ProdDiscreteDomainTestCase(DomainBaseTestCase, BaseTestClass):
""" ProdDiscreteDomain Domain. """
def _child_set_up(self):
""" Child set up. """
self.domain_obj = domains.ProdDiscreteDomain([['abc', 5, 6.5], [None, float]])
self.points = [('abc', float), [6.5, None], [5, None]]
self.non_points = [['abc', float, float], [5, 7], 'qwerty', [99], 6]
class ProdDiscreteNumericDomain(DomainBaseTestCase, BaseTestClass):
""" ProdDiscreteDomain Domain. """
def _child_set_up(self):
""" Child set up. """
self.domain_obj = domains.ProdDiscreteNumericDomain(
[[4.3, 2.1, 9.8, 10],
[11.2, -23.1, 19.8],
[1123, 213, 1980, 1.1]])
self.points = [[2.1, -23.1, 1980], (10, 11.2, 1.1), [9.8, 19.8, 1123]]
self.non_points = [[2.1 - 13.1, 1980], ('kky', 11.2, 1.1), [9.8, 19.8, 1123, 21]]
def test_non_numeric_prod_discrete_domain(self):
""" Constructor. """
self.report('Testing if exception is raised non numeric product discrete domain.')
exception_raised = False
try:
domains.ProdDiscreteNumericDomain(
[[2.1, 9.8, 10],
[11.2, -23.1, 19.8],
[1123, 213, 'squirrel', 1.1]])
except ValueError:
exception_raised = True
assert exception_raised
class CartesianProductDomainTestCase(DomainBaseTestCase, BaseTestClass):
""" Unit tests for CartesianProductDomain. """
def _child_set_up(self):
""" Child set up. """
list_of_domains = [
domains.EuclideanDomain([[0, 2.3], [3.4, 8.9], [0.12, 1.0]]),
domains.IntegralDomain([[0, 10], [-10, 100], [45, 78.4]]),
domains.ProdDiscreteDomain([['abc', 5, 6.5], [None, float]]),
domains.DiscreteDomain(['abc', 5, 6.5, int, 'k']),
]
self.domain_obj = domains.CartesianProductDomain(list_of_domains)
self.points = [
[[2.2, 3.6, 0.99], [5, 0, 68], ('abc', float), int],
[[1.2, 6.4, 0.2], [0, -1, 70], [6.5, None], 'k'],
[[0.2, 7.9, 0.5], [0, -1, 69], ['abc', None], 5],
]
self.non_points = [
[[2.2, 3.6, 0.99], [5, 0, 68], ('abc', float), float],
[[1.2, 6.4, 2.2], [0, -1, 70], [6.5, None], 'k'],
[[0.2, 7.9, 0.5], [0, -1, 69], ['abc', None, float], 5],
[[3.2, 7.9, 0.5], [-20, -1, 69], ['abc', None, float], 5],
[[2.2, 3.6, 0.99], [5, 0, 68], ('abc', float), int, (3.2, 8.5)],
]
if __name__ == '__main__':
execute_tests()
| 36.625 | 90 | 0.561515 |
7957f28c7459bb90c718ea2ebc120bf9c36783cb | 2,165 | py | Python | hazelcast/protocol/codec/custom/sql_error_codec.py | tonytheonlypony/hazelcast-python-client | 3aafeaf2ebc05aee4f2386c62c079db496a7c81f | [
"Apache-2.0"
] | null | null | null | hazelcast/protocol/codec/custom/sql_error_codec.py | tonytheonlypony/hazelcast-python-client | 3aafeaf2ebc05aee4f2386c62c079db496a7c81f | [
"Apache-2.0"
] | null | null | null | hazelcast/protocol/codec/custom/sql_error_codec.py | tonytheonlypony/hazelcast-python-client | 3aafeaf2ebc05aee4f2386c62c079db496a7c81f | [
"Apache-2.0"
] | null | null | null | from hazelcast.protocol.builtin import FixSizedTypesCodec, CodecUtil
from hazelcast.serialization.bits import *
from hazelcast.protocol.client_message import END_FRAME_BUF, END_FINAL_FRAME_BUF, SIZE_OF_FRAME_LENGTH_AND_FLAGS, create_initial_buffer_custom
from hazelcast.sql import _SqlError
from hazelcast.protocol.builtin import StringCodec
_CODE_ENCODE_OFFSET = 2 * SIZE_OF_FRAME_LENGTH_AND_FLAGS
_CODE_DECODE_OFFSET = 0
_ORIGINATING_MEMBER_ID_ENCODE_OFFSET = _CODE_ENCODE_OFFSET + INT_SIZE_IN_BYTES
_ORIGINATING_MEMBER_ID_DECODE_OFFSET = _CODE_DECODE_OFFSET + INT_SIZE_IN_BYTES
_INITIAL_FRAME_SIZE = _ORIGINATING_MEMBER_ID_ENCODE_OFFSET + UUID_SIZE_IN_BYTES - SIZE_OF_FRAME_LENGTH_AND_FLAGS
class SqlErrorCodec(object):
@staticmethod
def encode(buf, sql_error, is_final=False):
initial_frame_buf = create_initial_buffer_custom(_INITIAL_FRAME_SIZE)
FixSizedTypesCodec.encode_int(initial_frame_buf, _CODE_ENCODE_OFFSET, sql_error.code)
FixSizedTypesCodec.encode_uuid(initial_frame_buf, _ORIGINATING_MEMBER_ID_ENCODE_OFFSET, sql_error.originating_member_id)
buf.extend(initial_frame_buf)
CodecUtil.encode_nullable(buf, sql_error.message, StringCodec.encode)
CodecUtil.encode_nullable(buf, sql_error.suggestion, StringCodec.encode)
if is_final:
buf.extend(END_FINAL_FRAME_BUF)
else:
buf.extend(END_FRAME_BUF)
@staticmethod
def decode(msg):
msg.next_frame()
initial_frame = msg.next_frame()
code = FixSizedTypesCodec.decode_int(initial_frame.buf, _CODE_DECODE_OFFSET)
originating_member_id = FixSizedTypesCodec.decode_uuid(initial_frame.buf, _ORIGINATING_MEMBER_ID_DECODE_OFFSET)
message = CodecUtil.decode_nullable(msg, StringCodec.decode)
is_suggestion_exists = False
suggestion = None
if not msg.peek_next_frame().is_end_frame():
suggestion = CodecUtil.decode_nullable(msg, StringCodec.decode)
is_suggestion_exists = True
CodecUtil.fast_forward_to_end_frame(msg)
return _SqlError(code, message, originating_member_id, is_suggestion_exists, suggestion)
| 51.547619 | 142 | 0.787991 |
7957f441c17b9fd1e9decb076fdf49b6a82b349e | 1,426 | py | Python | truffe2/truffe/management/commands/export_users.py | JonathanCollaud/truffe2 | 5cbb055ac1acf7e7dc697340618fcb56c67fbd91 | [
"BSD-2-Clause"
] | 9 | 2016-09-14T02:19:19.000Z | 2020-10-18T14:52:14.000Z | truffe2/truffe/management/commands/export_users.py | JonathanCollaud/truffe2 | 5cbb055ac1acf7e7dc697340618fcb56c67fbd91 | [
"BSD-2-Clause"
] | 19 | 2016-11-09T21:28:51.000Z | 2021-02-10T22:37:31.000Z | truffe2/truffe/management/commands/export_users.py | JonathanCollaud/truffe2 | 5cbb055ac1acf7e7dc697340618fcb56c67fbd91 | [
"BSD-2-Clause"
] | 13 | 2016-12-31T14:22:09.000Z | 2020-12-27T19:43:19.000Z | from django.core.management.base import BaseCommand, CommandError
from users.models import TruffeUser, UserPrivacy
import json
class Command(BaseCommand):
help = 'Dump users'
def handle(self, *args, **options):
LEVEL_MAPPING = {
'prive': 'private',
'groupe': 'com',
'member': 'agep',
'public': 'all'
}
retour = []
for user in TruffeUser.objects.all():
data = {
'username': user.username,
'emailEpfl': user.email,
'prenom': user.first_name,
'nom': user.last_name,
'mobile': user.mobile,
'adresse': user.adresse,
'banque': user.nom_banque,
'ibanOrCcp': user.iban_ou_ccp,
'emailPerso': user.email_perso,
'password': user.password,
'mobileVisibility': LEVEL_MAPPING.get(UserPrivacy.get_privacy_for_field(user, 'mobile')),
'adresseVisibility': LEVEL_MAPPING.get(UserPrivacy.get_privacy_for_field(user, 'adresse')),
'banqueVisibility': LEVEL_MAPPING.get(UserPrivacy.get_privacy_for_field(user, 'nom_banque')),
'ibanOrCcpVisibility': LEVEL_MAPPING.get(UserPrivacy.get_privacy_for_field(user, 'iban_ou_ccp')),
}
retour.append(data)
print json.dumps({'data': retour})
| 32.409091 | 113 | 0.56662 |
7957f46d33d06e1035bb35e0662a475e8b8dddeb | 6,275 | py | Python | marshmallow_oneofschema/one_of_schema.py | sjhewitt/marshmallow-oneofschema | 40af0fa74bbf41a6a1fe0a288f5062b527f1acfd | [
"MIT"
] | null | null | null | marshmallow_oneofschema/one_of_schema.py | sjhewitt/marshmallow-oneofschema | 40af0fa74bbf41a6a1fe0a288f5062b527f1acfd | [
"MIT"
] | null | null | null | marshmallow_oneofschema/one_of_schema.py | sjhewitt/marshmallow-oneofschema | 40af0fa74bbf41a6a1fe0a288f5062b527f1acfd | [
"MIT"
] | null | null | null | from marshmallow import Schema, ValidationError
class OneOfSchema(Schema):
"""
This is a special kind of schema that actually multiplexes other schemas
based on object type. When serializing values, it uses get_obj_type() method
to get object type name. Then it uses `type_schemas` name-to-Schema mapping
to get schema for that particular object type, serializes object using that
schema and adds an extra "type" field with name of object type.
Deserialization is reverse.
Example:
class Foo(object):
def __init__(self, foo):
self.foo = foo
class Bar(object):
def __init__(self, bar):
self.bar = bar
class FooSchema(marshmallow.Schema):
foo = marshmallow.fields.String(required=True)
@marshmallow.post_load
def make_foo(self, data, **kwargs):
return Foo(**data)
class BarSchema(marshmallow.Schema):
bar = marshmallow.fields.Integer(required=True)
@marshmallow.post_load
def make_bar(self, data, **kwargs):
return Bar(**data)
class MyUberSchema(marshmallow.OneOfSchema):
type_schemas = {
'foo': FooSchema,
'bar': BarSchema,
}
def get_obj_type(self, obj):
if isinstance(obj, Foo):
return 'foo'
elif isinstance(obj, Bar):
return 'bar'
else:
raise Exception('Unknown object type: %s' % repr(obj))
MyUberSchema().dump([Foo(foo='hello'), Bar(bar=123)], many=True)
# => [{'type': 'foo', 'foo': 'hello'}, {'type': 'bar', 'bar': 123}]
You can control type field name added to serialized object representation by
setting `type_field` class property.
"""
type_field = "type"
type_field_remove = True
type_schemas = {}
def get_obj_type(self, obj):
"""Returns name of object schema"""
return obj.__class__.__name__
def dump(self, obj, *, many=None, **kwargs):
errors = {}
result_data = []
result_errors = {}
many = self.many if many is None else bool(many)
if not many:
result = result_data = self._dump(obj, **kwargs)
else:
for idx, o in enumerate(obj):
try:
result = self._dump(o, **kwargs)
result_data.append(result)
except ValidationError as error:
result_errors[idx] = error.normalized_messages()
result_data.append(error.valid_data)
result = result_data
errors = result_errors
if not errors:
return result
else:
exc = ValidationError(errors, data=obj, valid_data=result)
raise exc
def _dump(self, obj, *, update_fields=True, **kwargs):
obj_type = self.get_obj_type(obj)
if not obj_type:
return (
None,
{"_schema": "Unknown object class: %s" % obj.__class__.__name__},
)
type_schema = self.type_schemas.get(obj_type)
if not type_schema:
return None, {"_schema": "Unsupported object type: %s" % obj_type}
schema = type_schema if isinstance(type_schema, Schema) else type_schema()
schema.context.update(getattr(self, "context", {}))
result = schema.dump(obj, many=False, **kwargs)
if result is not None:
result[self.type_field] = obj_type
return result
def load(self, data, *, many=None, partial=None, unknown=None, **kwargs):
errors = {}
result_data = []
result_errors = {}
many = self.many if many is None else bool(many)
if partial is None:
partial = self.partial
if not many:
try:
result = result_data = self._load(
data, partial=partial, unknown=unknown, **kwargs
)
# result_data.append(result)
except ValidationError as error:
result_errors = error.normalized_messages()
result_data.append(error.valid_data)
else:
for idx, item in enumerate(data):
try:
result = self._load(item, partial=partial, **kwargs)
result_data.append(result)
except ValidationError as error:
result_errors[idx] = error.normalized_messages()
result_data.append(error.valid_data)
result = result_data
errors = result_errors
if not errors:
return result
else:
exc = ValidationError(errors, data=data, valid_data=result)
raise exc
def _load(self, data, *, partial=None, unknown=None, **kwargs):
if not isinstance(data, dict):
raise ValidationError({"_schema": "Invalid data type: %s" % data})
data = dict(data)
unknown = unknown or self.unknown
data_type = data.get(self.type_field)
if self.type_field in data and self.type_field_remove:
data.pop(self.type_field)
if not data_type:
raise ValidationError(
{self.type_field: ["Missing data for required field."]}
)
try:
type_schema = self.type_schemas.get(data_type)
except TypeError:
# data_type could be unhashable
raise ValidationError({self.type_field: ["Invalid value: %s" % data_type]})
if not type_schema:
raise ValidationError(
{self.type_field: ["Unsupported value: %s" % data_type]}
)
schema = type_schema if isinstance(type_schema, Schema) else type_schema()
schema.context.update(getattr(self, "context", {}))
return schema.load(data, many=False, partial=partial, unknown=unknown, **kwargs)
def validate(self, data, *, many=None, partial=None):
try:
self.load(data, many=many, partial=partial)
except ValidationError as ve:
return ve.messages
return {}
| 34.103261 | 88 | 0.565259 |
7957f5b70c449781911e8a7cbd5e8c9bd1c80ec7 | 12,012 | py | Python | tests/custom_object_test.py | mbmccoy/jax | 74346f464bc8369d81964305fcf05f95f43fb2d3 | [
"Apache-2.0"
] | 2 | 2021-09-27T06:33:20.000Z | 2022-01-08T10:03:33.000Z | tests/custom_object_test.py | mbmccoy/jax | 74346f464bc8369d81964305fcf05f95f43fb2d3 | [
"Apache-2.0"
] | null | null | null | tests/custom_object_test.py | mbmccoy/jax | 74346f464bc8369d81964305fcf05f95f43fb2d3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest, parameterized
import numpy as np
from jax._src import test_util as jtu
import jax.numpy as jnp
from jax import core, jit, lax, make_jaxpr
from jax._src import device_array
from jax._src import dispatch
from jax._src import dtypes
from jax.interpreters import mlir
from jax.interpreters import xla
from jax._src.lib.mlir import ir
from jax._src.lib import xla_bridge, xla_client
xc = xla_client
xb = xla_bridge
from jax.config import config
config.parse_flags_with_absl()
# TODO(jakevdp): use a setup/teardown method to populate and unpopulate all the
# dictionaries associated with the following objects.
# Define a sparse array data structure. The important feature here is that
# it is a jaxpr object that is backed by two device buffers.
class SparseArray:
"""Simple sparse COO array data structure."""
def __init__(self, aval, data, indices):
self.aval = aval
self.shape = aval.shape
self.data = data
self.indices = indices
@property
def index_dtype(self):
return self.indices.dtype
@property
def dtype(self):
return self.data.dtype
@property
def nnz(self):
return self.data.shape[0]
def __repr__(self):
return repr(list((tuple(ind), d) for ind, d in zip(self.indices, self.data)))
class AbstractSparseArray(core.ShapedArray):
__slots__ = ['index_dtype', 'nnz', 'data_aval', 'indices_aval']
def __init__(self, shape, dtype, index_dtype, nnz, weak_type=False,
named_shape=None):
super().__init__(shape, dtypes.canonicalize_dtype(dtype))
named_shape = {} if named_shape is None else named_shape
self.index_dtype = index_dtype
self.nnz = nnz
self.data_aval = core.ShapedArray((nnz,), dtypes.canonicalize_dtype(dtype),
weak_type, named_shape)
self.indices_aval = core.ShapedArray(
(nnz, len(shape)), dtypes.canonicalize_dtype(index_dtype),
named_shape=named_shape)
def update(self, shape=None, dtype=None, index_dtype=None, nnz=None,
weak_type=None, named_shape=None):
if shape is None:
shape = self.shape
if dtype is None:
dtype = self.dtype
if index_dtype is None:
index_dtype = self.dtype
if nnz is None:
nnz = self.nnz
if weak_type is None:
weak_type = self.weak_type
if named_shape is None:
named_shape = self.named_shape
return AbstractSparseArray(
shape, dtype, index_dtype, nnz, weak_type, named_shape)
def strip_weak_type(self):
return self
@core.aval_property
def data(self):
return sp_data_p.bind(self)
@core.aval_property
def indices(self):
return sp_indices_p.bind(self)
class ConcreteSparseArray(AbstractSparseArray):
pass
def sparse_array_result_handler(device, aval):
def build_sparse_array(data_buf, indices_buf):
data = device_array.make_device_array(aval.data_aval, device, data_buf)
indices = device_array.make_device_array(aval.indices_aval, device, indices_buf)
return SparseArray(aval, data, indices)
return build_sparse_array
def sparse_array_shape_handler(a):
return (
xc.Shape.array_shape(a.data_aval.dtype, a.data_aval.shape),
xc.Shape.array_shape(a.indices_aval.dtype, a.indices_aval.shape),
)
def sparse_array_device_put_handler(a, device):
return (
xb.get_device_backend(device).buffer_from_pyval(a.data, device),
xb.get_device_backend(device).buffer_from_pyval(a.indices, device)
)
core.pytype_aval_mappings[SparseArray] = lambda x: x.aval
core.raise_to_shaped_mappings[AbstractSparseArray] = lambda aval, _: aval
xla.pytype_aval_mappings[SparseArray] = lambda x: x.aval
xla.canonicalize_dtype_handlers[SparseArray] = lambda x: x
dispatch.device_put_handlers[SparseArray] = sparse_array_device_put_handler
dispatch.result_handlers[AbstractSparseArray] = sparse_array_result_handler
dispatch.num_buffers_handlers[AbstractSparseArray] = lambda _: 2
xla.xla_shape_handlers[AbstractSparseArray] = sparse_array_shape_handler
def sparse_array_mlir_type_handler(a):
return (
ir.RankedTensorType.get(
a.data_aval.shape, mlir.dtype_to_ir_type(a.data_aval.dtype)),
ir.RankedTensorType.get(
a.indices_aval.shape, mlir.dtype_to_ir_type(a.indices_aval.dtype)),
)
mlir.ir_type_handlers[AbstractSparseArray] = sparse_array_mlir_type_handler
sp_indices_p = core.Primitive('sp_indices')
@sp_indices_p.def_impl
def _sp_indices_impl(mat):
return mat.indices
@sp_indices_p.def_abstract_eval
def _sp_indices_abstract_eval(mat):
return mat.indices_aval
# Note: cannot use lower_fun to define attribute access primitives
# because it leads to infinite recursion.
def _sp_indices_mhlo_lowering(ctx, data_and_indices):
return [data_and_indices[1]]
mlir.register_lowering(sp_indices_p, _sp_indices_mhlo_lowering)
sp_data_p = core.Primitive('sp_data')
@sp_data_p.def_impl
def _sp_data_impl(mat):
return mat.data
@sp_data_p.def_abstract_eval
def _sp_data_abstract_eval(mat):
return mat.data_aval
# Note: cannot use lower_fun to define attribute access primitives
# because it leads to infinite recursion.
def _sp_data_mhlo_lowering(ctx, data_and_indices):
return [data_and_indices[0]]
mlir.register_lowering(sp_data_p, _sp_data_mhlo_lowering)
def identity(x):
return identity_p.bind(x)
identity_p = core.Primitive('identity')
@identity_p.def_impl
def _identity_impl(mat):
return mat
@identity_p.def_abstract_eval
def _identity_abstract_eval(mat):
return AbstractSparseArray(mat.shape, mat.dtype, mat.index_dtype, mat.nnz)
mlir.register_lowering(
identity_p, mlir.lower_fun(_identity_impl, multiple_results=False))
def split(x):
return split_p.bind(x)
split_p = core.Primitive('split')
split_p.multiple_results = True
@split_p.def_impl
def _split_impl(mat):
return mat, mat
@split_p.def_abstract_eval
def _split_abstract_eval(mat):
m = AbstractSparseArray(mat.shape, mat.dtype, mat.index_dtype, mat.nnz)
return m, m
mlir.register_lowering(
split_p, mlir.lower_fun(_split_impl, multiple_results=True))
def make_sparse_array(rng, shape, dtype, nnz=0.2):
mat = rng(shape, dtype)
size = int(np.prod(shape))
if 0 < nnz < 1:
nnz = nnz * size
nnz = int(nnz)
if nnz == 0:
mat = np.zeros_like(mat)
elif nnz < size:
# TODO(jakevdp): do we care about duplicates?
cutoff = np.sort(mat.ravel())[nnz]
mat[mat >= cutoff] = 0
nz = (mat != 0)
data = jnp.array(mat[nz])
indices = jnp.array(np.where(nz)).T
aval = AbstractSparseArray(shape, data.dtype, indices.dtype, len(indices))
return SparseArray(aval, data, indices)
def matvec(mat, v):
v = jnp.asarray(v)
assert v.ndim == 1
assert len(mat.shape) == 2
assert v.shape[0] == mat.shape[1]
rows = mat.indices[:, 0]
cols = mat.indices[:, 1]
dv = mat.data * v[cols]
return jnp.zeros(mat.shape[0], dtype=dv.dtype).at[rows].add(dv)
class Empty:
def __init__(self, aval):
self.aval = aval
class AbstractEmpty(core.AbstractValue):
def join(self, other):
assert isinstance(other, self.__class__), other
return self
def __hash__(self):
return hash(())
def __eq__(self, other):
return isinstance(other, AbstractEmpty)
class ConcreteEmpty(AbstractEmpty):
pass
core.pytype_aval_mappings[Empty] = lambda x: ConcreteEmpty()
core.raise_to_shaped_mappings[AbstractEmpty] = lambda aval, _: aval
xla.pytype_aval_mappings[Empty] = lambda x: AbstractEmpty()
xla.canonicalize_dtype_handlers[Empty] = lambda x: x
dispatch.device_put_handlers[Empty] = lambda _, __: ()
dispatch.result_handlers[AbstractEmpty] = lambda _, __: lambda: Empty(AbstractEmpty())
dispatch.num_buffers_handlers[AbstractEmpty] = lambda _: 0
xla.xla_shape_handlers[AbstractEmpty] = lambda _: ()
class CustomObjectTest(jtu.JaxTestCase):
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_compile={}_primitive={}".format(compile, primitive),
"compile": compile, "primitive": primitive}
for primitive in [True, False]
for compile in [True, False]))
def testSparseIdentity(self, compile, primitive):
f = identity if primitive else (lambda x: x)
f = jit(f) if compile else f
rng = jtu.rand_default(self.rng())
M = make_sparse_array(rng, (10,), jnp.float32)
M2 = f(M)
jaxpr = make_jaxpr(f)(M).jaxpr
core.check_jaxpr(jaxpr)
self.assertEqual(M.dtype, M2.dtype)
self.assertEqual(M.index_dtype, M2.index_dtype)
self.assertAllClose(M.data, M2.data)
self.assertAllClose(M.indices, M2.indices)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_compile={}".format(compile),
"compile": compile}
for compile in [True, False]))
def testSparseSplit(self, compile):
f = jit(split) if compile else split
rng = jtu.rand_default(self.rng())
M = make_sparse_array(rng, (10,), jnp.float32)
M2, M3 = f(M)
jaxpr = make_jaxpr(f)(M).jaxpr
core.check_jaxpr(jaxpr)
for MM in M2, M3:
self.assertEqual(M.dtype, MM.dtype)
self.assertEqual(M.index_dtype, MM.index_dtype)
self.assertArraysEqual(M.data, MM.data)
self.assertArraysEqual(M.indices, MM.indices)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_compile={}_primitive={}".format(compile, primitive),
"compile": compile, "primitive": primitive}
for primitive in [True, False]
for compile in [True, False]))
def testSparseLaxLoop(self, compile, primitive):
rng = jtu.rand_default(self.rng())
f = identity if primitive else (lambda x: x)
f = jit(f) if compile else f
body_fun = lambda _, A: f(A)
M = make_sparse_array(rng, (10,), jnp.float32)
lax.fori_loop(0, 10, body_fun, M)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_attr={}".format(attr), "attr": attr}
for attr in ["data", "indices"]))
def testSparseAttrAccess(self, attr):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [make_sparse_array(rng, (10,), jnp.float32)]
f = lambda x: getattr(x, attr)
self._CompileAndCheck(f, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in [(3, 3), (2, 6), (6, 2)]
for dtype in jtu.dtypes.floating))
def testSparseMatvec(self, shape, dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [make_sparse_array(rng, shape, dtype), rng(shape[-1:], dtype)]
self._CompileAndCheck(matvec, args_maker)
def testLowerToNothing(self):
empty = Empty(AbstractEmpty())
jaxpr = make_jaxpr(jit(lambda e: e))(empty).jaxpr
core.check_jaxpr(jaxpr)
# cannot return a unit, because CompileAndCheck assumes array output.
testfunc = lambda e: None
args_maker = lambda: [empty]
self._CompileAndCheck(testfunc, args_maker)
def testConstantHandler(self):
def make_const_array():
data = np.arange(3.0)
indices = np.arange(3)[:, None]
shape = (5,)
aval = AbstractSparseArray(shape, data.dtype, indices.dtype, len(indices))
return SparseArray(aval, data, indices)
out1 = make_const_array()
out2 = jit(make_const_array)()
self.assertArraysEqual(out1.data, out2.data)
self.assertArraysEqual(out1.indices, out2.indices)
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
| 31.946809 | 87 | 0.724609 |
7957f5e03f21eb1e5224521fe823be8e36583956 | 842 | py | Python | sa/migrations/0171_lowercase_stencil_filenames.py | xUndero/noc | 9fb34627721149fcf7064860bd63887e38849131 | [
"BSD-3-Clause"
] | 1 | 2019-09-20T09:36:48.000Z | 2019-09-20T09:36:48.000Z | sa/migrations/0171_lowercase_stencil_filenames.py | ewwwcha/noc | aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb | [
"BSD-3-Clause"
] | null | null | null | sa/migrations/0171_lowercase_stencil_filenames.py | ewwwcha/noc | aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# lowercase stencil filenames
# ----------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# NOC modules
from noc.core.migration.base import BaseMigration
class Migration(BaseMigration):
def migrate(self):
self.db.execute(
"""update sa_managedobjectprofile
set shape=upper(substring(shape from 1 for 1))||lower(substring(shape from 2 for length(shape)))"""
)
self.db.execute(
"""update sa_managedobject
set shape=upper(substring(shape from 1 for 1))||lower(substring(shape from 2 for length(shape)))"""
)
| 36.608696 | 111 | 0.480998 |
7957f5f52a93c5c82fdd6cf48296325945a16c5c | 821 | py | Python | app/model/spec.py | melphi/kafkaform | 391c4c70b6aff9fd5ad703dcf4b6e863e92d53b6 | [
"MIT"
] | null | null | null | app/model/spec.py | melphi/kafkaform | 391c4c70b6aff9fd5ad703dcf4b6e863e92d53b6 | [
"MIT"
] | null | null | null | app/model/spec.py | melphi/kafkaform | 391c4c70b6aff9fd5ad703dcf4b6e863e92d53b6 | [
"MIT"
] | null | null | null | import enum
from dataclasses import dataclass
from typing import List, Optional
class FieldType(enum.Enum):
"""Supported field types"""
BIGINT = "BIGINT"
BOOLEAN = "BOOLEAN"
DECIMAL = "DECIMAL"
DOUBLE = "DOUBLE"
DYNAMIC = "DYNAMIC" # Dynamic type leads to different outcomes depending on the input.
INT = "INT"
STRING = "STRING"
TIMESTAMP = "TIMESTAMP"
VARCHAR = "VARCHAR"
@dataclass
class SpecItem:
"""Specification item"""
name: str
resource_type: str
params: dict
schema_name: Optional[str]
def full_name(self):
"""Returns the fully qualified name of the resource"""
return f"{self.resource_type}:{self.name}"
@dataclass
class Spec:
"""Complete specification"""
specs: List[SpecItem]
EMPTY_SPEC = Spec(specs=list())
| 19.093023 | 91 | 0.658952 |
7957f7452cb30a9f0b4e1171ceeb81f34e22e768 | 2,514 | py | Python | AI 이노베이션 스퀘어 언어지능 과정/assignments/daumnewstesting.py | donddog/AI_Innovation_Square_Codes | a04d50db011d25e00d8486146c24124c50242aa7 | [
"MIT"
] | 1 | 2021-02-11T16:45:21.000Z | 2021-02-11T16:45:21.000Z | AI 이노베이션 스퀘어 언어지능 과정/assignments/daumnewstesting.py | donddog/AI_Innovation_Square_Codes | a04d50db011d25e00d8486146c24124c50242aa7 | [
"MIT"
] | null | null | null | AI 이노베이션 스퀘어 언어지능 과정/assignments/daumnewstesting.py | donddog/AI_Innovation_Square_Codes | a04d50db011d25e00d8486146c24124c50242aa7 | [
"MIT"
] | null | null | null | <<<<<<< HEAD
from newspaper import Article
from bs4 import BeautifulSoup
import requests
def download(url, params={}, retries=3):
resp = None
header = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.108 Safari/537.36"}
try:
resp = requests.get(url, params=params, headers=header)
resp.raise_for_status()
except requests.exceptions.HTTPError as e:
if 500 <= e.response.status_code < 600 and retries > 0:
print(retries)
resp = download(url, params, retries - 1)
else:
print(e.response.status_code)
print(e.response.reason)
print(e.request.headers)
return resp
html = download("https://media.daum.net/breakingnews/digital")
daumnews = BeautifulSoup(html.text, "lxml")
daumnewstitellists = daumnews.select("div > strong > a")
k = []
t = 17
for links in daumnewstitellists:
l = links.get('href')
k.append(l)
for i in range(0,20):
url = k[i]
a = Article(url, language='ko')
a.download()
a.parse()
with open("%d.txt" % i, "w", encoding="utf-8") as f:
f.write(a.title)
f.write(a.text)
f.close()
=======
from newspaper import Article
from bs4 import BeautifulSoup
import requests
def download(url, params={}, retries=3):
resp = None
header = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.108 Safari/537.36"}
try:
resp = requests.get(url, params=params, headers=header)
resp.raise_for_status()
except requests.exceptions.HTTPError as e:
if 500 <= e.response.status_code < 600 and retries > 0:
print(retries)
resp = download(url, params, retries - 1)
else:
print(e.response.status_code)
print(e.response.reason)
print(e.request.headers)
return resp
html = download("https://media.daum.net/breakingnews/digital")
daumnews = BeautifulSoup(html.text, "lxml")
daumnewstitellists = daumnews.select("div > strong > a")
k = []
t = 17
for links in daumnewstitellists:
l = links.get('href')
k.append(l)
for i in range(0,20):
url = k[i]
a = Article(url, language='ko')
a.download()
a.parse()
with open("%d.txt" % i, "w", encoding="utf-8") as f:
f.write(a.title)
f.write(a.text)
f.close()
>>>>>>> 125e15a4c5fcf711dd279c9b18e149867466699e
| 26.744681 | 140 | 0.616547 |
7957f8b48da54a2de7c3540987a5e8e3a4570504 | 978 | py | Python | c2_1_data/s4_barriers.py | zhou-zhenyi/sicp | ee6475d79a486a3bdc458378c55d0721195ea7d6 | [
"MIT"
] | null | null | null | c2_1_data/s4_barriers.py | zhou-zhenyi/sicp | ee6475d79a486a3bdc458378c55d0721195ea7d6 | [
"MIT"
] | null | null | null | c2_1_data/s4_barriers.py | zhou-zhenyi/sicp | ee6475d79a486a3bdc458378c55d0721195ea7d6 | [
"MIT"
] | null | null | null | from util_pair import cons, car, cdr
from util import gcd
def make_rat(n, d):
return cons(n, d)
def numer(x):
g = gcd(car(x), cdr(x))
return int(car(x) / g)
def denom(x):
g = gcd(car(x), cdr(x))
return int(cdr(x) / g)
def add_rat(x, y):
return make_rat(numer(x) * denom(y) + numer(y) * denom(x), denom(x) * denom(y))
def sub_rat(x, y):
return make_rat(numer(x) * denom(y) - numer(y) * denom(x), denom(x) * denom(y))
def mul_rat(x, y):
return make_rat(numer(x) * numer(y), denom(x) * denom(y))
def div_rat(x, y):
return make_rat(numer(x) * denom(y), denom(x) * numer(y))
def equal_rat(x, y):
return numer(x) * denom(y) == numer(y) * denom(x)
def print_rat(x):
print(str(numer(x)) + "/" + str(denom(x)))
one_half = make_rat(1, 2)
print_rat(one_half)
one_third = make_rat(1, 3)
print_rat(one_third)
print_rat(add_rat(one_half, one_third))
print_rat(mul_rat(one_half, one_third))
print_rat(add_rat(one_third, one_third)) | 22.744186 | 83 | 0.630879 |
7957f91db718714b598b0acdaf25f3ca0c66c91e | 5,176 | py | Python | libcloud/test/loadbalancer/test_brightbox.py | pquerna/libcloud | 85cccf53235e081a5ed737f12f0c2c70908a3332 | [
"Apache-2.0"
] | 1 | 2015-11-05T15:37:04.000Z | 2015-11-05T15:37:04.000Z | libcloud/test/loadbalancer/test_brightbox.py | pquerna/libcloud | 85cccf53235e081a5ed737f12f0c2c70908a3332 | [
"Apache-2.0"
] | null | null | null | libcloud/test/loadbalancer/test_brightbox.py | pquerna/libcloud | 85cccf53235e081a5ed737f12f0c2c70908a3332 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
from libcloud.utils.py3 import httplib
from libcloud.loadbalancer.base import Member, Algorithm
from libcloud.loadbalancer.drivers.brightbox import BrightboxLBDriver
from libcloud.loadbalancer.types import State
from libcloud.test import MockHttpTestCase
from libcloud.test.secrets import LB_BRIGHTBOX_PARAMS
from libcloud.test.file_fixtures import LoadBalancerFileFixtures
class BrightboxLBTests(unittest.TestCase):
def setUp(self):
BrightboxLBDriver.connectionCls.conn_classes = (None,
BrightboxLBMockHttp)
BrightboxLBMockHttp.type = None
self.driver = BrightboxLBDriver(*LB_BRIGHTBOX_PARAMS)
def test_list_protocols(self):
protocols = self.driver.list_protocols()
self.assertEqual(len(protocols), 2)
self.assertTrue('tcp' in protocols)
self.assertTrue('http' in protocols)
def test_list_balancers(self):
balancers = self.driver.list_balancers()
self.assertEquals(len(balancers), 1)
self.assertEquals(balancers[0].id, 'lba-1235f')
self.assertEquals(balancers[0].name, 'lb1')
def test_get_balancer(self):
balancer = self.driver.get_balancer(balancer_id='lba-1235f')
self.assertEquals(balancer.id, 'lba-1235f')
self.assertEquals(balancer.name, 'lb1')
self.assertEquals(balancer.state, State.RUNNING)
def test_destroy_balancer(self):
balancer = self.driver.get_balancer(balancer_id='lba-1235f')
self.assertTrue(self.driver.destroy_balancer(balancer))
def test_create_balancer(self):
members = [Member('srv-lv426', None, None)]
balancer = self.driver.create_balancer(name='lb2', port=80,
protocol='http', algorithm=Algorithm.ROUND_ROBIN, members=members)
self.assertEquals(balancer.name, 'lb2')
self.assertEquals(balancer.port, 80)
self.assertEquals(balancer.state, State.PENDING)
def test_balancer_list_members(self):
balancer = self.driver.get_balancer(balancer_id='lba-1235f')
members = balancer.list_members()
self.assertEquals(len(members), 1)
self.assertEquals(members[0].balancer, balancer)
self.assertEquals('srv-lv426', members[0].id)
def test_balancer_attach_member(self):
balancer = self.driver.get_balancer(balancer_id='lba-1235f')
member = balancer.attach_member(Member('srv-kg983', ip=None,
port=None))
self.assertEquals(member.id, 'srv-kg983')
def test_balancer_detach_member(self):
balancer = self.driver.get_balancer(balancer_id='lba-1235f')
member = Member('srv-lv426', None, None)
self.assertTrue(balancer.detach_member(member))
class BrightboxLBMockHttp(MockHttpTestCase):
fixtures = LoadBalancerFileFixtures('brightbox')
def _token(self, method, url, body, headers):
if method == 'POST':
return self.response(httplib.OK, self.fixtures.load('token.json'))
def _1_0_load_balancers(self, method, url, body, headers):
if method == 'GET':
return self.response(httplib.OK,
self.fixtures.load('load_balancers.json'))
elif method == 'POST':
body = self.fixtures.load('load_balancers_post.json')
return self.response(httplib.ACCEPTED, body)
def _1_0_load_balancers_lba_1235f(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('load_balancers_lba_1235f.json')
return self.response(httplib.OK, body)
elif method == 'DELETE':
return self.response(httplib.ACCEPTED, '')
def _1_0_load_balancers_lba_1235f_add_nodes(self, method, url, body,
headers):
if method == 'POST':
return self.response(httplib.ACCEPTED, '')
def _1_0_load_balancers_lba_1235f_remove_nodes(self, method, url, body,
headers):
if method == 'POST':
return self.response(httplib.ACCEPTED, '')
def response(self, status, body):
return (status, body, {'content-type': 'application/json'},
httplib.responses[status])
if __name__ == "__main__":
sys.exit(unittest.main())
| 38.917293 | 78 | 0.672141 |
7957f94e2ec99a61f261e70c0ae4305ee268f90c | 1,803 | py | Python | GUI/Generic/GRadioGroups.py | gcewing/PyGUI | 58c6c38ccb8e66acdf98dea6b24bef1d9a03147c | [
"MIT"
] | 9 | 2019-07-15T19:03:27.000Z | 2021-11-24T19:50:02.000Z | GUI/Generic/GRadioGroups.py | mnabeelp/PyGUI | 58c6c38ccb8e66acdf98dea6b24bef1d9a03147c | [
"MIT"
] | 3 | 2019-09-11T13:22:10.000Z | 2020-08-19T20:13:00.000Z | GUI/Generic/GRadioGroups.py | mnabeelp/PyGUI | 58c6c38ccb8e66acdf98dea6b24bef1d9a03147c | [
"MIT"
] | 4 | 2020-02-23T16:50:06.000Z | 2022-02-10T07:15:35.000Z | #
# Python GUI - Radio groups - Generic
#
from GUI.Properties import Properties, overridable_property
from GUI.Actions import Action
class RadioGroup(Properties, Action):
"""A RadioGroup coordinates a group of RadioButtons.
It has a 'value' property which is equal to the value
of the currently selected RadioButton. It may be given
an action procedure to execute when its value changes.
Operations:
iter(group)
Returns an iterator over the items of the group.
"""
value = overridable_property('value', """The value of the currently
selected radio button.""")
_items = None
_value = None
def __init__(self, items = [], **kwds):
Properties.__init__(self, **kwds)
self._items = []
self.add_items(items)
#
# Operations
#
def __iter__(self):
return iter(self._items)
#
# Properties
#
def get_value(self):
return self._value
def set_value(self, x):
if self._value <> x:
self._value = x
self._value_changed()
self.do_action()
#
# Adding and removing items
#
def add_items(self, items):
"Add a sequence of RadioButtons to this group."
for item in items:
self.add_item(item)
def add_item(self, item):
"Add a RadioButton to this group."
item.group = self
def remove_items(self, items):
"Remove a sequence of RadioButtons from this group."
for item in items:
item.group = None
def remove_item(self, item):
"Remove a RadioButton from this group."
item.group = None
def _add_item(self, item):
self._items.append(item)
self._item_added(item)
def _remove_item(self, item):
self._items.remove(item)
self._item_removed(item)
def _item_added(self, item):
raise NotImplementedError
def _item_removed(self, item):
raise NotImplementedError
def _value_changed(self):
raise NotImplementedError
| 20.488636 | 68 | 0.712701 |
7957f9a135c8ca7f49b9050e1320196ddac684e5 | 8,178 | py | Python | app/main/routes.py | Bobw147/Microblog2 | 03b8b5784aed41e4da4c7f218dc973703ccf7166 | [
"MIT"
] | null | null | null | app/main/routes.py | Bobw147/Microblog2 | 03b8b5784aed41e4da4c7f218dc973703ccf7166 | [
"MIT"
] | 3 | 2020-03-19T22:08:50.000Z | 2021-06-02T00:55:10.000Z | app/main/routes.py | Bobw147/Microblog2 | 03b8b5784aed41e4da4c7f218dc973703ccf7166 | [
"MIT"
] | null | null | null | from datetime import datetime
from flask import render_template, flash, redirect, url_for, request, g, \
jsonify, current_app
from flask_login import current_user, login_required
from flask_babel import _, get_locale
from guess_language import guess_language
from app import db
from app.main.forms import EditProfileForm, PostForm, SearchForm
from app.models import User, Post, Message, Notification
from app.translate import translate
from app.main import bp
from app.main.forms import MessageForm
# Stop pyliint from complaining about dynamically created members not
# being present when the file is checked
# pylint: disable=E1101
@bp.before_app_request
def before_request():
if current_user.is_authenticated:
current_user.last_seen = datetime.utcnow()
db.session.commit()
g.search_form = SearchForm()
g.locale = str(get_locale())
@bp.route('/', methods=['GET', 'POST'])
@bp.route('/index', methods=['GET', 'POST'])
@login_required
def index():
form = PostForm()
if form.validate_on_submit():
language = guess_language(form.post.data)
if language == 'UNKNOWN' or len(language) > 5:
language = ''
post = Post(body=form.post.data, author=current_user,
language=language)
db.session.add(post)
db.session.commit()
flash(_('Your post is now live!'))
return redirect(url_for('main.index'))
page = request.args.get('page', 1, type=int)
posts = current_user.followed_posts().paginate(
page, current_app.config['POSTS_PER_PAGE'], False)
next_url = url_for('main.index', page=posts.next_num) \
if posts.has_next else None
prev_url = url_for('main.index', page=posts.prev_num) \
if posts.has_prev else None
return render_template('index.html', title=_('Home'), form=form,
posts=posts.items, next_url=next_url,
prev_url=prev_url)
@bp.route('/explore')
@login_required
def explore():
page = request.args.get('page', 1, type=int)
posts = Post.query.order_by(Post.timestamp.desc()).paginate(
page, current_app.config['POSTS_PER_PAGE'], False)
next_url = url_for('main.explore', page=posts.next_num) \
if posts.has_next else None
prev_url = url_for('main.explore', page=posts.prev_num) \
if posts.has_prev else None
return render_template('index.html', title=_('Explore'),
posts=posts.items, next_url=next_url,
prev_url=prev_url)
@bp.route('/user/<username>')
@login_required
def user(username):
user = User.query.filter_by(username=username).first_or_404()
page = request.args.get('page', 1, type=int)
posts = user.posts.order_by(Post.timestamp.desc()).paginate(
page, current_app.config['POSTS_PER_PAGE'], False)
next_url = url_for('main.user', username=user.username,
page=posts.next_num) if posts.has_next else None
prev_url = url_for('main.user', username=user.username,
page=posts.prev_num) if posts.has_prev else None
return render_template('user.html', user=user, posts=posts.items,
next_url=next_url, prev_url=prev_url)
@bp.route('/edit_profile', methods=['GET', 'POST'])
@login_required
def edit_profile():
form = EditProfileForm(current_user.username)
if form.validate_on_submit():
current_user.username = form.username.data
current_user.about_me = form.about_me.data
db.session.commit()
flash(_('Your changes have been saved.'))
return redirect(url_for('main.edit_profile'))
elif request.method == 'GET':
form.username.data = current_user.username
form.about_me.data = current_user.about_me
return render_template('edit_profile.html', title=_('Edit Profile'),
form=form)
@bp.route('/follow/<username>')
@login_required
def follow(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash(_('User %(username)s not found.', username=username))
return redirect(url_for('main.index'))
if user == current_user:
flash(_('You cannot follow yourself!'))
return redirect(url_for('main.user', username=username))
current_user.follow(user)
db.session.commit()
flash(_('You are following %(username)s!', username=username))
return redirect(url_for('main.user', username=username))
@bp.route('/unfollow/<username>')
@login_required
def unfollow(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash(_('User %(username)s not found.', username=username))
return redirect(url_for('main.index'))
if user == current_user:
flash(_('You cannot unfollow yourself!'))
return redirect(url_for('main.user', username=username))
current_user.unfollow(user)
db.session.commit()
flash(_('You are not following %(username)s.', username=username))
return redirect(url_for('main.user', username=username))
@bp.route('/translate', methods=['POST'])
@login_required
def translate_text():
return jsonify({'text': translate(request.form['text'],
request.form['source_language'],
request.form['dest_language'])})
@bp.route('/search')
@login_required
def search():
if not g.search_form.validate():
return redirect(url_for('main.explore'))
page = request.args.get('page', 1, type=int)
posts, total = Post.search(g.search_form.q.data, page,
current_app.config['POSTS_PER_PAGE'])
next_url = url_for('main.search', q=g.search_form.q.data, page=page + 1) \
if total > page * current_app.config['POSTS_PER_PAGE'] else None
prev_url = url_for('main.search', q=g.search_form.q.data, page=page - 1) \
if page > 1 else None
return render_template('search.html', title=_('Search'), posts=posts,
next_url=next_url, prev_url=prev_url)
@bp.route('/user/<username>/popup')
@login_required
def user_popup(username):
user = User.query.filter_by(username=username).first_or_404()
return render_template('user_popup.html', user=user)
@bp.route('/send_message/<recipient>', methods=['GET', 'POST'])
@login_required
def send_message(recipient):
user = User.query.filter_by(username=recipient).first_or_404()
form = MessageForm()
if form.validate_on_submit():
msg = Message(author=current_user, recipient=user,
body=form.message.data)
db.session.add(msg)
user.add_notification('unread_message_count', user.new_messages())
db.session.commit()
flash(_('Your message has been sent.'))
return redirect(url_for('main.user', username=recipient))
return render_template('send_message.html', title=_('Send Message'),
form=form, recipient=recipient)
@bp.route('/messages')
@login_required
def messages():
current_user.last_message_read_time = datetime.utcnow()
current_user.add_notification('unread_message_count', 0)
db.session.commit()
page = request.args.get('page', 1, type=int)
messages = current_user.messages_received.order_by(
Message.timestamp.desc()).paginate(
page, current_app.config['POSTS_PER_PAGE'], False)
next_url = url_for('main.messages', page=messages.next_num) \
if messages.has_next else None
prev_url = url_for('main.messages', page=messages.prev_num) \
if messages.has_prev else None
return render_template('messages.html', messages=messages.items,
next_url=next_url, prev_url=prev_url)
@bp.route('/notifications')
@login_required
def notifications():
since = request.args.get('since', 0.0, type=float)
notifications = current_user.notifications.filter(
Notification.timestamp > since).order_by(Notification.timestamp.asc())
return jsonify([{
'name': n.name,
'data': n.get_data(),
'timestamp': n.timestamp
} for n in notifications])
| 39.699029 | 78 | 0.664099 |
7957faf05c2a3d1bc2d5f71dff6759c0cf771c81 | 38,386 | py | Python | Scripts/sims4communitylib/utils/common_log_registry.py | ColonolNutty/Sims4CommunityLibrary | 684f28dc3c7deb4d9fd520e21e63942b65a91d31 | [
"CC-BY-4.0"
] | 118 | 2019-08-31T04:33:18.000Z | 2022-03-28T21:12:14.000Z | Scripts/sims4communitylib/utils/common_log_registry.py | ColonolNutty/Sims4CommunityLibrary | 684f28dc3c7deb4d9fd520e21e63942b65a91d31 | [
"CC-BY-4.0"
] | 15 | 2019-12-05T01:29:46.000Z | 2022-02-18T17:13:46.000Z | Scripts/sims4communitylib/utils/common_log_registry.py | ColonolNutty/Sims4CommunityLibrary | 684f28dc3c7deb4d9fd520e21e63942b65a91d31 | [
"CC-BY-4.0"
] | 28 | 2019-09-07T04:11:05.000Z | 2022-02-07T18:31:40.000Z | """
The Sims 4 Community Library is licensed under the Creative Commons Attribution 4.0 International public license (CC BY 4.0).
https://creativecommons.org/licenses/by/4.0/
https://creativecommons.org/licenses/by/4.0/legalcode
Copyright (c) COLONOLNUTTY
"""
import os
import sims4.commands
from typing import List, Dict, Any, Union, Tuple
from pprint import pformat
from sims4communitylib.enums.enumtypes.common_int import CommonInt
from sims4communitylib.exceptions.common_stacktrace_utils import CommonStacktraceUtil
from sims4communitylib.mod_support.mod_identity import CommonModIdentity
from sims4communitylib.services.common_service import CommonService
from sims4communitylib.utils.common_io_utils import CommonIOUtils
from sims4communitylib.utils.common_log_utils import CommonLogUtils
class CommonMessageType(CommonInt):
"""Message types for use when logging.
"""
INVALID: 'CommonMessageType' = 0
ERROR: 'CommonMessageType' = 1
WARN: 'CommonMessageType' = 2
DEBUG: 'CommonMessageType' = 3
INFO: 'CommonMessageType' = 4
class CommonLog:
"""CommonLog(mod_identifier, log_name, custom_file_path=None)
A class used to log messages.
:param mod_identifier: The name or identity of the Mod that owns the log.
:type mod_identifier: Union[str, CommonModIdentity]
:param log_name: The name of the log, used when enabling/disabling logs via commands
:type log_name: str
:param custom_file_path: A custom file path relative to The Sims 4 folder. Example: Value is 'fake_path/to/directory', the final path would be 'The Sims 4/fake_path/to_directory'. Default is None.
:type custom_file_path: str, optional
"""
def __init__(self, mod_identifier: Union[str, CommonModIdentity], log_name: str, custom_file_path: str=None):
self._log_name = log_name
self._mod_name = mod_identifier.name if isinstance(mod_identifier, CommonModIdentity) else mod_identifier
self._custom_file_path = custom_file_path
self._enabled_message_types = tuple()
self._should_log_extra_sim_details = False
def debug(self, message: str):
"""debug(message)
Log a message with message type DEBUG.
:param message: The message to log.
:type message: str
"""
if self.is_enabled(CommonMessageType.DEBUG):
self._log_message(CommonMessageType.DEBUG, message)
def info(self, message: str):
"""info(message)
Log a message with message type INFO.
:param message: The message to log.
:type message: str
"""
if self.is_enabled(CommonMessageType.INFO):
self._log_message(CommonMessageType.INFO, message)
def format_info(self, *args: Any, update_tokens: bool=True, **kwargs: Any):
"""format_info(*args, update_tokens=True, **kwargs)
Log a non-descriptive message containing pformatted arguments and keyword arguments with message type INFO.
:param update_tokens: If set to True, when an arg or kwarg value is a Sim or SimInfo, it will be converted to their name before format occurs. Default is True.
:type update_tokens: bool, optional
:param args: Arguments to format into the message.
:type args: Any
:param kwargs: Keyword Arguments to format into the message.
:type kwargs: Any
"""
self.format(*args, message_type=CommonMessageType.INFO, update_tokens=update_tokens, **kwargs)
def format_info_with_message(self, message: str, *args, update_tokens: bool=True, **kwargs):
"""format_info_with_message(message, *args, update_tokens=True, **kwargs)
Log a message containing pformatted arguments and keyword arguments with message type INFO.
:param message: The message to log.
:type message: str
:param update_tokens: If set to True, when an arg or kwarg value is a Sim or SimInfo, it will be converted to their name before format occurs. Default is True.
:type update_tokens: bool, optional
:param args: Arguments to format into the message.
:type args: Any
:param kwargs: Keyword Arguments to format into the message.
:type kwargs: Any
"""
self.format_with_message(message, *args, message_type=CommonMessageType.INFO, update_tokens=update_tokens, **kwargs)
def format(self, *args, message_type: CommonMessageType=CommonMessageType.DEBUG, update_tokens: bool=True, **kwargs):
"""format(*args, message_type=CommonMessageType.DEBUG, update_tokens=True, **kwargs)
Log a non-descriptive message containing pformatted arguments and keyword arguments with the specified message type.
:param message_type: The MessageType of the logged message. Default is CommonMessageType.DEBUG.
:type message_type: CommonMessageType, optional
:param update_tokens: If set to True, when an arg or kwarg value is a Sim or SimInfo, it will be converted to their name before format occurs. Default is True.
:type update_tokens: bool, optional
:param args: Arguments to format into the message.
:type args: Any
:param kwargs: Keyword Arguments to format into the message.
:type kwargs: Any
"""
if self.is_enabled(message_type):
if update_tokens:
args = self._update_args(*args)
kwargs = self._update_kwargs(**kwargs)
if args and kwargs:
self._log_message(message_type, '{}, {}'.format(pformat(args), pformat(kwargs)))
elif args:
self._log_message(message_type, '{}'.format(pformat(args)))
else:
self._log_message(message_type, '{}'.format(pformat(kwargs)))
def format_with_message(self, message: str, *args, message_type: CommonMessageType=CommonMessageType.DEBUG, update_tokens: bool=True, **kwargs):
"""format_with_message(message, *args, message_type=CommonMessageType.DEBUG, update_tokens=True, **kwargs)
Log a message containing pformatted arguments and keyword arguments with the specified message type.
:param message: The message to log.
:type message: str
:param message_type: The type of message being logged. Default is CommonMessageType.DEBUG.
:type message_type: CommonMessageType, optional
:param update_tokens: If set to True, when an arg or kwarg value is a Sim or SimInfo, it will be converted to their name before format occurs. Default is True.
:type update_tokens: bool, optional
:param args: Arguments to format into the message.
:type args: Any
:param kwargs: Keyword Arguments to format into the message.
:type kwargs: Any
"""
if self.is_enabled(message_type):
if update_tokens:
args = self._update_args(*args)
kwargs = self._update_kwargs(**kwargs)
if args and kwargs:
self._log_message(message_type, '{} {}, {}'.format(message, pformat(args), pformat(kwargs)))
elif args:
self._log_message(message_type, '{} {}'.format(message, pformat(args)))
elif kwargs:
self._log_message(message_type, '{} {}'.format(message, pformat(kwargs)))
else:
self._log_message(message_type, message)
def warn(self, message: str):
"""warn(message)
Log a message with message type WARN.
:param message: The message to log.
:type message: str
"""
if self.is_enabled(CommonMessageType.WARN):
self._log_message(CommonMessageType.WARN, message)
def format_warn(self, *args: Any, update_tokens: bool=True, **kwargs: Any):
"""format_warn(*args, update_tokens=True, **kwargs)
Log a non-descriptive message containing pformatted arguments and keyword arguments with message type WARN.
:param update_tokens: If set to True, when an arg or kwarg value is a Sim or SimInfo, it will be converted to their name before format occurs. Default is True.
:type update_tokens: bool, optional
:param args: Arguments to format into the message.
:type args: Any
:param kwargs: Keyword Arguments to format into the message.
:type kwargs: Any
"""
self.format(*args, message_type=CommonMessageType.WARN, update_tokens=update_tokens, **kwargs)
def format_warn_with_message(self, message: str, *args, update_tokens: bool=True, **kwargs):
"""format_warn_with_message(message, *args, update_tokens=True, **kwargs)
Log a message containing pformatted arguments and keyword arguments with message type WARN.
:param message: The message to log.
:type message: str
:param update_tokens: If set to True, when an arg or kwarg value is a Sim or SimInfo, it will be converted to their name before format occurs. Default is True.
:type update_tokens: bool, optional
:param args: Arguments to format into the message.
:type args: Any
:param kwargs: Keyword Arguments to format into the message.
:type kwargs: Any
"""
self.format_with_message(message, *args, message_type=CommonMessageType.WARN, update_tokens=update_tokens, **kwargs)
def error(self, message: str, message_type: CommonMessageType=CommonMessageType.ERROR, exception: Exception=None, throw: bool=True, stack_trace: List[str]=None):
"""error(message, message_type=CommonMessageType.ERROR, exception=None, throw=True, stack_trace=None)
Log an error message with the specified message type
:param message: The message to log.
:type message: str
:param message_type: The message type of the error message. Default is CommonMessageType.ERROR.
:type message_type: CommonMessageType, optional
:param exception: The exception that occurred. Default is None.
:type exception: Exception, optional
:param stack_trace: The stack trace leading to the exception, if not supplied, a stack trace will be gathered for you. Default is None.
:type stack_trace: List[str], optional
:param throw: If set to True, the exception will be rethrown.
:type throw: bool, optional
"""
if throw:
stack_trace = stack_trace or CommonStacktraceUtil.get_full_stack_trace()
self._log_error(message, exception=exception, stack_trace=stack_trace)
self._log_message(message_type, message)
if exception is not None:
self._log_message(message_type, pformat(exception))
def format_error(self, *args, exception: Exception=None, throw: bool=True, update_tokens: bool=True, stack_trace: List[str]=None, **kwargs):
"""format_error(*args, exception=None, throw=True, update_tokens=True, stack_trace=None, **kwargs)
Log a non-descriptive error message containing pformatted arguments and keyword arguments.
:param exception: The exception that occurred.
:type exception: Exception, optional
:param throw: If set to True, the exception will be rethrown.
:type throw: bool, optional
:param update_tokens: If set to True, when an arg or kwarg value is a Sim or SimInfo, it will be converted to their name before format occurs. Default is True.
:type update_tokens: bool, optional
:param stack_trace: The stack trace leading to the exception, if not supplied, a stack trace will be gathered for you. Default is None.
:type stack_trace: List[str], optional
:param args: Arguments to format into the message.
:type args: Any
:param kwargs: Keyword Arguments to format into the message.
:type kwargs: Any
"""
if update_tokens:
args = self._update_args(*args)
kwargs = self._update_kwargs(**kwargs)
stack_trace = stack_trace or CommonStacktraceUtil.get_full_stack_trace()
if args and kwargs:
self.error('{}, {}'.format(pformat(args), pformat(kwargs)), exception=exception, throw=throw, stack_trace=stack_trace)
elif args:
self.error('{}'.format(pformat(args)), exception=exception, throw=throw, stack_trace=stack_trace)
else:
self.error('{}'.format(pformat(kwargs)), exception=exception, throw=throw, stack_trace=stack_trace)
def format_error_with_message(self, message: str, *args, exception: Exception=None, throw: bool=True, update_tokens: bool=True, stack_trace: List[str]=None, **kwargs):
"""format_error_with_message(message, *args, exception=None, throw=True, update_tokens=True, stack_trace=None, **kwargs)
Log an error message containing pformatted arguments and keyword arguments.
:param message: The message to log.
:type message: str
:param exception: The exception that occurred. Default is None.
:type exception: Exception, None
:param throw: If set to True, the exception will be rethrown. Default is True.
:type throw: bool, optional
:param update_tokens: If set to True, when an arg or kwarg value is a Sim or SimInfo, it will be converted to their name before format occurs. Default is True.
:type update_tokens: bool, optional
:param stack_trace: The stack trace leading to the exception, if not supplied, a stack trace will be gathered for you. Default is None.
:type stack_trace: List[str], optional
:param args: Arguments to format into the message.
:type args: Any
:param kwargs: Keyword Arguments to format into the message.
:type kwargs: Any
"""
if update_tokens:
args = self._update_args(*args)
kwargs = self._update_kwargs(**kwargs)
stack_trace = stack_trace or CommonStacktraceUtil.get_full_stack_trace()
if args and kwargs:
self.error('{} {}, {}'.format(message, pformat(args), pformat(kwargs)), exception=exception, throw=throw, stack_trace=stack_trace)
elif args:
self.error('{} {}'.format(message, pformat(args)), exception=exception, throw=throw, stack_trace=stack_trace)
elif kwargs:
self.error('{} {}'.format(message, pformat(kwargs)), exception=exception, throw=throw, stack_trace=stack_trace)
else:
self.error(message, exception=exception, throw=throw, stack_trace=stack_trace)
def log_stack(self) -> None:
"""log_stack()
Log the current stack trace and the calling frames
.. note:: The best use for this is to find the path of invocation to the location this function is called at.
"""
if not self.is_enabled(CommonMessageType.DEBUG):
return
import inspect
current_frame = inspect.currentframe()
calling_frame = inspect.getouterframes(current_frame, 2)
self.format(calling_frame)
def enable(self, message_types: Tuple[CommonMessageType]=(CommonMessageType.WARN, CommonMessageType.DEBUG, CommonMessageType.INFO), enable_logging_extra_sim_details: bool=False) -> None:
"""enable(message_types=(CommonMessageType.WARN, CommonMessageType.DEBUG, CommonMessageType.INFO), enable_extra_sim_details=False)
Enable the log or specific types of logs.
:param message_types: The types of messages to enable for logging. Default message types are Info, Debug, and Warn.
:rtype message_types: Tuple[CommonMessageTypes], optional
:param enable_logging_extra_sim_details: If True, when a Sim is being logged, extra Sim details, such as Sim Type and Current Sim Type, will be logged in addition to their name and id. If False, only their name and id will be logged. Default is False.
:type enable_logging_extra_sim_details: bool, optional
"""
self._enabled_message_types = message_types or tuple()
if enable_logging_extra_sim_details:
self.enable_logging_extra_sim_details()
def enable_logging_extra_sim_details(self) -> None:
"""enable_logging_extra_sim_details()
Enable the logging of extra Sim details, when logging a Sim, such as Sim Type and Sim Current Type.
"""
self._should_log_extra_sim_details = True
def disable_logging_extra_sim_details(self) -> None:
"""disable_logging_extra_sim_details()
Disable the logging of extra Sim details when logging a Sim, such as Sim Type and Sim Current Type.
"""
self._should_log_extra_sim_details = False
def disable(self) -> None:
"""disable()
Disable the log
"""
self._enabled_message_types = tuple()
self.disable_logging_extra_sim_details()
@property
def enabled(self) -> bool:
"""Determine whether the log is enabled or not.
.. note:: All logs are disabled by default.
:return: True, if the log is enabled. False, if the log is disabled.
:rtype: bool
"""
return any(self._enabled_message_types)
@property
def name(self) -> str:
"""The identifier of this log.
:return: A string identifier.
:rtype: str
"""
return self._log_name
@property
def mod_name(self) -> str:
"""The name of the mod that owns the log.
:return: The name of the mod that owns the log
:rtype: str
"""
return self._mod_name
@property
def messages_file_path(self) -> str:
"""The file path messages are logged to.
:return: The file path messages are logged to.
:rtype: str
"""
return CommonLogUtils.get_message_file_path(self.mod_name, custom_file_path=self._custom_file_path)
@property
def exceptions_file_path(self) -> str:
"""The file path exceptions are logged to.
:return: The file path exceptions are logged to.
:rtype: str
"""
return CommonLogUtils.get_exceptions_file_path(self.mod_name, custom_file_path=self._custom_file_path)
def is_enabled(self, message_type: CommonMessageType) -> bool:
"""is_enabled(message_type)
Determine if a message type is enabled for logging.
:param message_type: The type of messages to check for allowance.
:type message_type: CommonMessageType
:return: True, if the specified message type is enabled for logging. False, if not.
:rtype: bool
"""
return message_type in self._enabled_message_types
def _log_message(self, message_type: CommonMessageType, message: str):
from sims4communitylib.utils.common_date_utils import CommonRealDateUtils
from sims4communitylib.exceptions.common_exceptions_handler import CommonExceptionHandler
current_date_time = CommonRealDateUtils.get_current_date_string()
new_message = '{} {}: [{}]: {}\n'.format(current_date_time, message_type.name, self.name, message)
try:
from sims4communitylib.utils.common_io_utils import CommonIOUtils
file_path = self.messages_file_path
os.makedirs(os.path.dirname(file_path), exist_ok=True)
CommonIOUtils.write_to_file(file_path, new_message, ignore_errors=True)
except Exception as ex:
CommonExceptionHandler.log_exception(self.mod_name, 'Error occurred while attempting to log message: {}'.format(pformat(message)), exception=ex, custom_file_path=self._custom_file_path)
def _log_error(self, message: str, exception: Exception=None, stack_trace: List[str]=None):
from sims4communitylib.utils.common_date_utils import CommonRealDateUtils
from sims4communitylib.exceptions.common_exceptions_handler import CommonExceptionHandler
try:
exceptions = stack_trace or CommonStacktraceUtil.get_full_stack_trace()
if exception is not None:
stack_trace_message = '{}{} -> {}: {}\n'.format(''.join(exceptions), message, type(exception).__name__, exception)
else:
stack_trace_message = '{}{}\n'.format(''.join(exceptions), message)
file_path = self.exceptions_file_path
os.makedirs(os.path.dirname(file_path), exist_ok=True)
exception_traceback_text = '[{}] {} {}\n'.format(self.mod_name, CommonRealDateUtils.get_current_date_string(), stack_trace_message)
result = CommonIOUtils.write_to_file(file_path, exception_traceback_text, ignore_errors=True)
if result:
CommonExceptionHandler._notify_exception_occurred(file_path, mod_identifier=self.mod_name)
except Exception as ex:
CommonExceptionHandler.log_exception(self.mod_name, 'Error occurred while attempting to log message: {}'.format(pformat(message)), exception=ex, custom_file_path=self._custom_file_path)
def _update_args(self, *args: Any) -> Tuple[Any]:
if not args:
return args
from sims4communitylib.utils.common_type_utils import CommonTypeUtils
from sims4communitylib.utils.sims.common_sim_name_utils import CommonSimNameUtils
from sims4communitylib.utils.sims.common_sim_utils import CommonSimUtils
new_args: List[Any] = list()
for arg in args:
if CommonTypeUtils.is_sim_or_sim_info(arg) or CommonTypeUtils.is_sim_info_base_wrapper(arg):
obj_type_acronym = 'Unknown'
if CommonTypeUtils.is_sim_info(arg):
obj_type_acronym = 'SI'
elif CommonTypeUtils.is_sim_instance(arg):
obj_type_acronym = 'S'
elif CommonTypeUtils.is_sim_info_base_wrapper(arg):
obj_type_acronym = 'SIBW'
if self._should_log_extra_sim_details:
from sims4communitylib.utils.sims.common_sim_type_utils import CommonSimTypeUtils
sim_info = CommonSimUtils.get_sim_info(arg)
sim_types = tuple(CommonSimTypeUtils.get_all_sim_types_gen(sim_info, combine_teen_young_adult_and_elder_age=False, combine_child_dog_types=False))
current_sim_type = CommonSimTypeUtils.determine_sim_type(sim_info, combine_teen_young_adult_and_elder_age=False, combine_child_dog_types=False, use_current_occult_type=True)
new_args.append('{} ({}, ({}), C:{}) [{}]'.format(CommonSimNameUtils.get_full_name(arg), str(CommonSimUtils.get_sim_id(arg)), ', '.join([sim_type.name for sim_type in sim_types]), current_sim_type.name, obj_type_acronym))
else:
new_args.append('{} ({}) [{}]'.format(CommonSimNameUtils.get_full_name(arg), str(CommonSimUtils.get_sim_id(arg)), obj_type_acronym))
else:
new_args.append(arg)
return tuple(new_args)
def _update_kwargs(self, **kwargs: Any) -> Dict[str, Any]:
if not kwargs:
return kwargs
new_kwargs: Dict[str, Any] = dict()
from sims4communitylib.utils.common_type_utils import CommonTypeUtils
from sims4communitylib.utils.sims.common_sim_name_utils import CommonSimNameUtils
from sims4communitylib.utils.sims.common_sim_utils import CommonSimUtils
for (key, val) in kwargs.items():
if CommonTypeUtils.is_sim_or_sim_info(val) or CommonTypeUtils.is_sim_info_base_wrapper(val):
obj_type_acronym = 'UnknownType'
if CommonTypeUtils.is_sim_info(val):
obj_type_acronym = 'SI'
elif CommonTypeUtils.is_sim_instance(val):
obj_type_acronym = 'S'
elif CommonTypeUtils.is_sim_info_base_wrapper(val):
obj_type_acronym = 'SIBW'
if self._should_log_extra_sim_details:
from sims4communitylib.utils.sims.common_sim_type_utils import CommonSimTypeUtils
sim_info = CommonSimUtils.get_sim_info(val)
sim_types = tuple(CommonSimTypeUtils.get_all_sim_types_gen(sim_info, combine_teen_young_adult_and_elder_age=False, combine_child_dog_types=False))
current_sim_type = CommonSimTypeUtils.determine_sim_type(sim_info, combine_teen_young_adult_and_elder_age=False, combine_child_dog_types=False, use_current_occult_type=True)
new_kwargs[key] = '{} ({}, ({}), C:{}) [{}]'.format(CommonSimNameUtils.get_full_name(val), str(CommonSimUtils.get_sim_id(val)), ', '.join([sim_type.name for sim_type in sim_types]), current_sim_type.name, obj_type_acronym)
else:
new_kwargs[key] = '{} ({}) [{}]'.format(CommonSimNameUtils.get_full_name(val), str(CommonSimUtils.get_sim_id(val)), obj_type_acronym)
else:
new_kwargs[key] = val
return new_kwargs
class CommonLogRegistry(CommonService):
"""CommonLogRegistry()
Used to register logs.
.. note:: To register your own logs, please use :func:`~get` to create a CommonLogRegistry (CommonLogRegistry.get()).
:Example Usage:
.. highlight:: python
.. code-block:: python
# Register the log, Logs will appear in a file titled "MOD_NAME_Messages.txt" and messages logged using this log will be prefixed with "s4cl_log_name"
log = CommonLogRegistry.get().register_log('MOD_NAME', 's4cl_log_name')
# Enable the log, if not enabled, messages will not be logged.
log.enable()
# Log a message
log.debug('Printing a message to the log.')
# Disable the log
log.disable()
# The MOD_NAME_Messages.txt file will contain the "Printing a message to the log." message.
.. note::
Available Commands:
- `s4clib.enable_log` or `s4clib.enablelog`
- `s4clib.disable_log` or `s4clib.disablelog`
- `s4clib.disable_all_logs` or `s4clib.disablealllogs`
- `s4clib.logs`
"""
def __init__(self) -> None:
self._registered_logs: Dict[str, Dict[str, CommonLog]] = dict()
self._delete_old_log_files()
def get_registered_log_names(self, mod_identifier: Union[str, CommonModIdentity]=None) -> List[str]:
"""get_registered_log_names()
Retrieve the names of all registered logs.
:param mod_identifier: The name or identifier of the mod the log is registered for. Default is None.
:type mod_identifier: Union[str, CommonModIdentity], optional
:return: A collection of registered logs.
:rtype: List[str]
"""
if self._registered_logs is None:
return list()
if mod_identifier is None:
log_names = []
for log_mod_name in self._registered_logs:
for log_name in self._registered_logs[log_mod_name]:
log_names.append(log_name)
return log_names
else:
mod_name = CommonModIdentity._get_mod_name(mod_identifier)
mod_name = mod_name.lower()
if mod_name not in self._registered_logs:
return list()
return list(self._registered_logs[mod_name].keys())
def register_log(self, mod_identifier: Union[str, CommonModIdentity], log_name: str, custom_file_path: str=None) -> CommonLog:
"""register_log(mod_identifier, log_name, custom_file_path: str=None)
Create and register a log with the specified name.
.. note:: If `log_name` matches the name of a Log already registered, that log will be returned rather than creating a new Log.
:param mod_identifier: The name or identifier of the mod the log is registered for.
:type mod_identifier: Union[str, CommonModIdentity]
:param log_name: The name of the log.
:type log_name: str
:param custom_file_path: A custom file path relative to The Sims 4 folder. Example: Value is 'fake_path/to/directory', the final path would be 'The Sims 4/fake_path/to_directory'. Default is None.
:type custom_file_path: str, optional
:return: An object of type CommonLog
:rtype: CommonLog
"""
if self._registered_logs is None:
self._registered_logs = dict()
mod_name = CommonModIdentity._get_mod_name(mod_identifier)
mod_name = mod_name.lower()
# Dict[str, Dict[str, CommonLog]]
if mod_name not in self._registered_logs:
self._registered_logs[mod_name] = dict()
# Dict[str, CommonLog]
if log_name in self._registered_logs[mod_name]:
return self._registered_logs[mod_name][log_name]
log = CommonLog(mod_identifier, log_name, custom_file_path=custom_file_path)
from sims4communitylib.s4cl_configuration import S4CLConfiguration
if log_name in S4CLConfiguration().enable_logs:
log.enable(message_types=S4CLConfiguration().enable_logs[log_name])
self._registered_logs[mod_name][log_name] = log
return log
def _delete_old_log_files(self) -> None:
from sims4communitylib.utils.common_io_utils import CommonIOUtils
files_to_delete = (
os.path.join(CommonLogUtils.get_sims_documents_location_path(), 'mod_logs'),
)
for file_to_delete in files_to_delete:
# noinspection PyBroadException
try:
if os.path.isfile(file_to_delete):
CommonIOUtils.delete_file(file_to_delete, ignore_errors=True)
else:
CommonIOUtils.delete_directory(file_to_delete, ignore_errors=True)
except:
continue
# noinspection PyUnusedLocal
def log_exists(self, log_name: str, mod_identifier: Union[str, CommonModIdentity]=None) -> bool:
"""log_exists(log_name, mod_identifier=None)
Determine if logs exist with the specified name.
:param log_name: The name of the log to locate.
:type log_name: str
:param mod_identifier: The name or identity of the mod the log belongs to. Default is None.
:type mod_identifier: Union[str, CommonModIdentity], optional
:return: True, if a handler exists with the specified name.
:rtype: bool
"""
if self._registered_logs is None:
return False
if mod_identifier is None:
for log_mod_name in self._registered_logs:
if log_name not in self._registered_logs[log_mod_name]:
continue
return True
else:
mod_name = CommonModIdentity._get_mod_name(mod_identifier)
mod_name = mod_name.lower()
return mod_name in self._registered_logs and log_name in self._registered_logs[mod_name]
# noinspection PyUnusedLocal
def enable_logs(self, log_name: str, mod_identifier: Union[str, CommonModIdentity]=None) -> bool:
"""enable_logs(log_name, mod_identifier=None)
Enable all logs with the specified name.
:param log_name: The name of the logs to enable.
:type log_name: str
:param mod_identifier: The name or identity of the mod the log belongs to. Default is None.
:type mod_identifier: Union[str, CommonModIdentity], optional
:return: True, if successful. False, if not.
:rtype: bool
"""
if self._registered_logs is None:
self._registered_logs = dict()
if mod_identifier is None:
for log_mod_name in self._registered_logs:
if log_name not in self._registered_logs[log_mod_name]:
continue
log = self._registered_logs[log_mod_name][log_name]
log.enable()
else:
mod_name = CommonModIdentity._get_mod_name(mod_identifier)
mod_name = mod_name.lower()
if log_name not in self._registered_logs[mod_name]:
log = self.register_log(mod_name, log_name)
if log is not None:
log.enable()
return True
return False
self._registered_logs[mod_name][log_name].enable()
return True
# noinspection PyUnusedLocal
def disable_logs(self, log_name: str, mod_identifier: Union[str, CommonModIdentity]=None) -> bool:
"""disable_logs(log_name, mod_identifier=None)
Disable all logs with the specified name.
:param log_name: The name of the logs to disable.
:type log_name: str
:param mod_identifier: The name or identity of the mod to disable logs for. Default is None.
:type mod_identifier: Union[str, CommonModIdentity], optional
:return: True, if successful. False, if not.
:rtype: bool
"""
if self._registered_logs is None:
self._registered_logs = dict()
if mod_identifier is None:
for log_mod_name in self._registered_logs:
if log_name not in self._registered_logs[log_mod_name]:
continue
log = self._registered_logs[log_mod_name][log_name]
log.disable()
else:
mod_name = CommonModIdentity._get_mod_name(mod_identifier)
mod_name = mod_name.lower()
if log_name not in self._registered_logs[mod_name]:
return False
self._registered_logs[mod_name][log_name].disable()
return True
# noinspection PyUnusedLocal
def disable_all_logs(self, mod_identifier: Union[str, CommonModIdentity]=None) -> bool:
"""disable_all_logs(mod_identifier=None)
Disable all logs from logging
:param mod_identifier: The name or identity of the mod to disable logs for. Default is None.
:type mod_identifier: Union[str, CommonModIdentity], optional
:return: True, if successful. False, if not.
:rtype: bool
"""
if self._registered_logs is None:
self._registered_logs = dict()
if mod_identifier is None:
for log_mod_name in self._registered_logs:
for log_name in self._registered_logs[log_mod_name]:
self._registered_logs[log_mod_name][log_name].disable()
else:
mod_name = CommonModIdentity._get_mod_name(mod_identifier)
mod_name = mod_name.lower()
for log_name in self._registered_logs.get(mod_name, dict()):
self._registered_logs[mod_name][log_name].disable()
return True
@sims4.commands.Command('s4clib.enable_log', 's4clib.enablelog', command_type=sims4.commands.CommandType.Live)
def _common_command_enable_log(log_name: str=None, mod_name: str=None, _connection: int=None):
output = sims4.commands.CheatOutput(_connection)
try:
if log_name is None:
output('specify a log name (See all logs via "s4clib.logs" command)')
return
output('Attempting to enable log with name \'{}\''.format(log_name))
if CommonLogRegistry.get().log_exists(log_name, mod_identifier=mod_name) or mod_name is not None:
if CommonLogRegistry.get().enable_logs(log_name, mod_identifier=mod_name):
output('Log enabled: {}'.format(log_name))
else:
if mod_name is None:
output('Failed to enable log with name \'{}\', did you forget to specify a mod name?'.format(log_name))
else:
output('Failed to enable log with name \'{}\' for mod \'{}\''.format(log_name, mod_name))
else:
if mod_name is None:
output('No log found with name \'{}\''.format(log_name))
else:
output('No log found with name \'{}\' for mod \'{}\''.format(log_name, mod_name))
except Exception as ex:
output('Failed to enable log: {}'.format(pformat(ex)))
@sims4.commands.Command('s4clib.disable_log', 's4clib.disablelog', command_type=sims4.commands.CommandType.Live)
def _common_command_disable_log(log_name: str=None, mod_name: str=None, _connection: int=None):
output = sims4.commands.CheatOutput(_connection)
try:
if log_name is None:
output('specify a log name (See all logs via "s4clib.logs" command)')
return
output('Attempting to disable log with name \'{}\''.format(log_name))
if CommonLogRegistry.get().log_exists(log_name, mod_identifier=mod_name):
if CommonLogRegistry.get().disable_logs(log_name, mod_identifier=mod_name):
output('Log disabled: {}'.format(log_name))
else:
if mod_name is None:
output('Failed to disable log with name \'{}\', did you forget to specify a mod name?'.format(log_name))
else:
output('Failed to disable log with name \'{}\' for mod \'{}\''.format(log_name, mod_name))
else:
if mod_name is None:
output('No log found with name \'{}\''.format(log_name))
else:
output('No log found with name \'{}\' for mod \'{}\''.format(log_name, mod_name))
except Exception as ex:
output('Failed to disable log: {}'.format(pformat(ex)))
@sims4.commands.Command('s4clib.disable_all_logs', 's4clib.disablealllogs', command_type=sims4.commands.CommandType.Live)
def _common_command_disable_all_logs(mod_name: str=None, _connection: int=None):
output = sims4.commands.CheatOutput(_connection)
output('Disabling all logs')
try:
CommonLogRegistry.get().disable_all_logs(mod_identifier=mod_name)
except Exception as ex:
output('Failed to disable all logs: {}'.format(pformat(ex)))
output('All logs disabled')
@sims4.commands.Command('s4clib.logs', command_type=sims4.commands.CommandType.Live)
def _common_command_show_all_logs(mod_identifier: str=None, _connection: int=None):
output = sims4.commands.CheatOutput(_connection)
try:
log_names = CommonLogRegistry.get().get_registered_log_names(mod_identifier=mod_identifier)
if log_names is None or output is None:
return
if len(log_names) == 0:
output('No registered logs found')
return
for log_name in log_names:
output('' + str(log_name))
except Exception as ex:
output('Failed to show logs: {}'.format(pformat(ex)))
| 49.149808 | 259 | 0.669749 |
7957fbe3e5149f9aaed3fac4bc134bbdc43833b6 | 5,433 | py | Python | model/seg/nets/deeplabv3.py | jessemelpolio/torchcv | 09bd5c80a2b4b869f4e23881cbe5b8b3bb462f7a | [
"Apache-2.0"
] | 1 | 2020-10-08T00:51:46.000Z | 2020-10-08T00:51:46.000Z | model/seg/nets/deeplabv3.py | jessemelpolio/torchcv | 09bd5c80a2b4b869f4e23881cbe5b8b3bb462f7a | [
"Apache-2.0"
] | null | null | null | model/seg/nets/deeplabv3.py | jessemelpolio/torchcv | 09bd5c80a2b4b869f4e23881cbe5b8b3bb462f7a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: Donny You(youansheng@gmail.com)
# deeplabv3 res101 (synchronized BN version)
import torch
import torch.nn as nn
import torch.nn.functional as F
from model.backbones.backbone_selector import BackboneSelector
from model.tools.module_helper import ModuleHelper
from model.seg.loss.loss import BASE_LOSS_DICT
class ASPPModule(nn.Module):
"""
Reference:
Chen, Liang-Chieh, et al. *"Rethinking Atrous Convolution for Semantic Image Segmentation."*
"""
def __init__(self, features, inner_features=512, out_features=512, dilations=(12, 24, 36), norm_type=None):
super(ASPPModule, self).__init__()
self.conv1 = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),
nn.Conv2d(features, inner_features, kernel_size=1, padding=0, dilation=1,
bias=False),
ModuleHelper.BNReLU(inner_features, norm_type=norm_type))
self.conv2 = nn.Sequential(
nn.Conv2d(features, inner_features, kernel_size=1, padding=0, dilation=1, bias=False),
ModuleHelper.BNReLU(inner_features, norm_type=norm_type))
self.conv3 = nn.Sequential(
nn.Conv2d(features, inner_features, kernel_size=3, padding=dilations[0], dilation=dilations[0], bias=False),
ModuleHelper.BNReLU(inner_features, norm_type=norm_type))
self.conv4 = nn.Sequential(
nn.Conv2d(features, inner_features, kernel_size=3, padding=dilations[1], dilation=dilations[1], bias=False),
ModuleHelper.BNReLU(inner_features, norm_type=norm_type))
self.conv5 = nn.Sequential(
nn.Conv2d(features, inner_features, kernel_size=3, padding=dilations[2], dilation=dilations[2], bias=False),
ModuleHelper.BNReLU(inner_features, norm_type=norm_type))
self.bottleneck = nn.Sequential(
nn.Conv2d(inner_features * 5, out_features, kernel_size=1, padding=0, dilation=1, bias=False),
ModuleHelper.BNReLU(out_features, norm_type=norm_type),
nn.Dropout2d(0.1)
)
def forward(self, x):
_, _, h, w = x.size()
feat1 = F.interpolate(self.conv1(x), size=(h, w), mode='bilinear', align_corners=True)
feat2 = self.conv2(x)
feat3 = self.conv3(x)
feat4 = self.conv4(x)
feat5 = self.conv5(x)
out = torch.cat((feat1, feat2, feat3, feat4, feat5), 1)
bottle = self.bottleneck(out)
return bottle
class DeepLabV3(nn.Module):
def __init__(self, configer):
self.inplanes = 128
super(DeepLabV3, self).__init__()
self.configer = configer
self.num_classes = self.configer.get('data', 'num_classes')
self.backbone = BackboneSelector(configer).get_backbone()
self.head = nn.Sequential(ASPPModule(self.backbone.get_num_features(),
norm_type=self.configer.get('network', 'norm_type')),
nn.Conv2d(512, self.num_classes, kernel_size=1, stride=1, padding=0, bias=True))
self.dsn = nn.Sequential(
nn.Conv2d(1024, 512, kernel_size=3, stride=1, padding=1),
ModuleHelper.BNReLU(512, norm_type=self.configer.get('network', 'norm_type')),
nn.Dropout2d(0.1),
nn.Conv2d(512, self.num_classes, kernel_size=1, stride=1, padding=0, bias=True)
)
self.valid_loss_dict = configer.get('loss', 'loss_weights', configer.get('loss.loss_type'))
def forward(self, data_dict):
x = self.backbone(data_dict['img'])
x_dsn = self.dsn(x[-2])
x = self.head(x[-1])
x_dsn = F.interpolate(x_dsn, size=(data_dict['img'].size(2), data_dict['img'].size(3)),
mode="bilinear", align_corners=True)
x = F.interpolate(x, size=(data_dict['img'].size(2), data_dict['img'].size(3)),
mode="bilinear", align_corners=True)
out_dict = dict(dsn_out=x_dsn, out=x)
if self.configer.get('phase') == 'test':
return out_dict
loss_dict = dict()
if 'dsn_ce_loss' in self.valid_loss_dict:
loss_dict['dsn_ce_loss'] = dict(
params=[x, data_dict['labelmap']],
type=torch.cuda.LongTensor([BASE_LOSS_DICT['ce_loss']]),
weight=torch.cuda.FloatTensor([self.valid_loss_dict['dsn_ce_loss']])
)
if 'ce_loss' in self.valid_loss_dict:
loss_dict['ce_loss'] = dict(
params=[x, data_dict['labelmap']],
type=torch.cuda.LongTensor([BASE_LOSS_DICT['ce_loss']]),
weight=torch.cuda.FloatTensor([self.valid_loss_dict['ce_loss']])
)
if 'ohem_ce_loss' in self.valid_loss_dict:
loss_dict['ohem_ce_loss'] = dict(
params=[x, data_dict['labelmap']],
type=torch.cuda.LongTensor([BASE_LOSS_DICT['ohem_ce_loss']]),
weight=torch.cuda.FloatTensor([self.valid_loss_dict['ohem_ce_loss']])
)
return out_dict, loss_dict
if __name__ == '__main__':
model = DeepLabV3(20, multi_grid=[1, 2, 1])
model.freeze_bn()
model.eval()
image = torch.autograd.Variable(torch.randn(1, 3, 512, 512), volatile=True)
print(type(model.resnet_features))
print (model(image).size())
| 43.464 | 120 | 0.614578 |
7957fc1c523f65009a271e510270cda2d5298d0c | 879 | py | Python | tests/syntax/GeneratorReturn_2.py | sthagen/Nuitka-Nuitka | 023dc76eeafd9c53ee2a51931474ddd98a3ba083 | [
"Apache-2.0"
] | null | null | null | tests/syntax/GeneratorReturn_2.py | sthagen/Nuitka-Nuitka | 023dc76eeafd9c53ee2a51931474ddd98a3ba083 | [
"Apache-2.0"
] | null | null | null | tests/syntax/GeneratorReturn_2.py | sthagen/Nuitka-Nuitka | 023dc76eeafd9c53ee2a51931474ddd98a3ba083 | [
"Apache-2.0"
] | null | null | null | # Copyright 2022, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Python tests originally created or extracted from other peoples work. The
# parts were too small to be protected.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def g():
for a in range(3):
yield a
return 7
print("Yielder with return value", list(g()))
| 33.807692 | 79 | 0.696246 |
7957fcaa847a4955bb34c1b99f0242f177f125a8 | 6,852 | py | Python | nnlib/data/iterator.py | huzecong/nnlib | fd68abc51028444ce7c789642e2a7b8ed1853255 | [
"MIT"
] | 1 | 2019-01-08T03:55:23.000Z | 2019-01-08T03:55:23.000Z | nnlib/data/iterator.py | huzecong/nnlib | fd68abc51028444ce7c789642e2a7b8ed1853255 | [
"MIT"
] | null | null | null | nnlib/data/iterator.py | huzecong/nnlib | fd68abc51028444ce7c789642e2a7b8ed1853255 | [
"MIT"
] | null | null | null | import math
import random
from collections import defaultdict
from typing import Dict, Iterator, List, Mapping, Optional, Sequence, Tuple, TypeVar
__all__ = ['bucketed_bitext_iterator', 'sorted_bitext_iterator', 'bitext_index_iterator', 'bitext_iterator',
'multi_dataset_iterator']
Token = TypeVar('Token', str, int) # the token could either be a literal string or a vocabulary index
Sentence = Sequence[Token]
LangPair = Tuple[str, str] # a pair of language specifiers
def bucketed_bitext_iterator(src_sents: Sequence[Sentence], tgt_sents: Sequence[Sentence], batch_size: int,
max_length: Optional[int] = None) \
-> Iterator[List[int]]:
r"""
Return an iterator generating batches over bi-text bucketed by length. Each batch only contains examples with
source sentences of the same length.
:param src_sents: List of source sentences.
:param tgt_sents: List of target sentences, paired with source sentences.
:param batch_size: Number of data examples to include in the same batch.
:param max_length: Maximum length of both source and target sentences. If specified, examples containing sentences
longer than this limit will be discarded.
:return: Iterator yielding a list of indices to data examples. Each list is one batch containing no more than
`batch_size` examples.
"""
buckets: Dict[int, List[int]] = defaultdict(list)
for idx, (src_sent, tgt_sent) in enumerate(zip(src_sents, tgt_sents)):
if max_length is not None and (len(src_sent) > max_length or len(tgt_sent) > max_length):
continue
if len(src_sent) == 0 or len(tgt_sent) == 0: # skip emtpy sentences
continue
buckets[len(src_sent)].append(idx)
batches = []
src_lens = list(buckets.keys())
random.shuffle(src_lens)
for src_len in src_lens:
bucket = buckets[src_len]
random.shuffle(bucket)
num_batches = int(math.ceil(len(bucket) * 1.0 / batch_size))
for i in range(num_batches):
batch = bucket[(i * batch_size):((i + 1) * batch_size)]
batches.append(batch)
random.shuffle(batches)
yield from batches
def sorted_bitext_iterator(src_sents: Sequence[Sentence], tgt_sents: Sequence[Sentence], batch_size: int,
max_length: Optional[int] = None, bins: Optional[Sequence[int]] = None) \
-> Iterator[List[int]]:
r"""
Return an iterator generating batches over bi-text. Examples in a batch are of similar length, and are sorted in
descending order of source sentence length.
In implementation, examples are first clustered into bins (as in `np.histogram`).
:param src_sents: List of source sentences.
:param tgt_sents: List of target sentences, paired with source sentences.
:param batch_size: Number of data examples to include in the same batch.
:param max_length: Maximum length of both source and target sentences. If specified, examples containing sentences
longer than this limit will be discarded.
:param bins: Thresholds for bins.
:return: Iterator yielding a list of indices to data examples. Each list is one batch containing no more than
`batch_size` examples.
"""
bins = bins or [20, 30, 40, 50, 60, 75]
grouped_data: List[List[int]] = [[] for _ in bins]
outlier = []
for idx, (src_sent, tgt_sent) in enumerate(zip(src_sents, tgt_sents)):
if max_length is not None and (len(src_sent) > max_length or len(tgt_sent) > max_length):
continue
if len(src_sent) == 0 or len(tgt_sent) == 0: # skip emtpy sentences
continue
for bid, i in enumerate(bins):
if len(src_sent) <= i:
grouped_data[bid].append(idx)
break
else:
outlier.append(idx)
if len(outlier) > 0:
grouped_data.append(outlier)
batches = []
for group in grouped_data:
random.shuffle(group)
num_batches = int(math.ceil(len(group) * 1.0 / batch_size))
for i in range(num_batches):
batch = group[(i * batch_size):((i + 1) * batch_size)]
batch = sorted(batch, key=lambda x: len(src_sents[x]), reverse=True)
batches.append(batch)
random.shuffle(batches)
yield from batches
def bitext_index_iterator(src_sents: Sequence[Sentence], tgt_sents: Sequence[Sentence], batch_size: int,
max_length: Optional[int] = None, sort: bool = False) \
-> Iterator[List[int]]:
r"""
A convenient interface that calls other bi-text iterator functions. Returns an iterator over example indices.
:param src_sents: List of source sentences.
:param tgt_sents: List of target sentences, paired with source sentences.
:param batch_size: Number of data examples to include in the same batch.
:param max_length: Maximum length of both source and target sentences. If specified, examples containing sentences
longer than this limit will be discarded.
:param sort: If true, the examples in a batch are sorted in descending order of source sentence length.
"""
if sort:
iterator = sorted_bitext_iterator(src_sents, tgt_sents, batch_size, max_length=max_length)
else:
iterator = bucketed_bitext_iterator(src_sents, tgt_sents, batch_size, max_length=max_length)
yield from iterator
def bitext_iterator(src_sents: Sequence[Sentence], tgt_sents: Sequence[Sentence], batch_size: int,
max_length: Optional[int] = None, sort: bool = False) \
-> Iterator[Tuple[List[Sentence], List[Sentence]]]:
r"""
A wrapper over :func:`bitext_index_iterator` that returns the actual data examples instead of indices.
"""
iterator = bitext_index_iterator(src_sents, tgt_sents, batch_size, max_length=max_length, sort=sort)
for batch in iterator:
src_batch = [src_sents[idx] for idx in batch]
tgt_batch = [tgt_sents[idx] for idx in batch]
yield src_batch, tgt_batch
Key = TypeVar('Key')
BatchValue = TypeVar('BatchValue')
def multi_dataset_iterator(iterators: Mapping[Key, Iterator[BatchValue]]) \
-> Iterator[Tuple[Key, BatchValue]]:
r"""
An iterator that first chooses a dataset at random, then returns a batch from that dataset.
:param iterators: A dictionary mapping keys to iterators.
:return: Iterator yielding the key of the chosen dataset and the batch.
"""
iters_list = list(iterators.items()) # create a list for indexing
while len(iters_list) > 0:
iter_idx = random.randint(0, len(iters_list) - 1)
key, it = iters_list[iter_idx]
try:
batch = next(it)
except StopIteration:
del iters_list[iter_idx]
continue
yield key, batch
| 44.493506 | 118 | 0.676299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.