hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
13df30d351901d02b18ff47f7fcc1cdf57cf3412 | 198 | py | Python | bootstrap_modal_forms/forms.py | uhuntu/django-bootstrap-modal-forms | e725643ac6233c04763f886b67127c7df06e5d71 | [
"MIT"
] | null | null | null | bootstrap_modal_forms/forms.py | uhuntu/django-bootstrap-modal-forms | e725643ac6233c04763f886b67127c7df06e5d71 | [
"MIT"
] | null | null | null | bootstrap_modal_forms/forms.py | uhuntu/django-bootstrap-modal-forms | e725643ac6233c04763f886b67127c7df06e5d71 | [
"MIT"
] | null | null | null | from django import forms
from bootstrap_modal_forms.mixins import PopRequestMixin, CreateUpdateAjaxMixin
class BSModalForm(PopRequestMixin, CreateUpdateAjaxMixin, forms.ModelForm):
pass
| 28.285714 | 80 | 0.828283 |
de502f3c799701e9dc8774a83e9e6ac2d430b20a | 2,324 | py | Python | pai/pouw/verification/verifier_pb2_grpc.py | projectpai/pouw-main-iteration | e2505f63e11bbf80648c8cbe56b6d6f3e3a8546e | [
"MIT"
] | 11 | 2020-06-22T05:31:18.000Z | 2022-03-29T16:50:21.000Z | pai/pouw/verification/verifier_pb2_grpc.py | AIIJJII/pouw-main-iteration | e2505f63e11bbf80648c8cbe56b6d6f3e3a8546e | [
"MIT"
] | 3 | 2020-06-23T18:20:09.000Z | 2021-07-06T23:28:24.000Z | pai/pouw/verification/verifier_pb2_grpc.py | AIIJJII/pouw-main-iteration | e2505f63e11bbf80648c8cbe56b6d6f3e3a8546e | [
"MIT"
] | 3 | 2020-09-02T11:03:16.000Z | 2022-03-29T16:50:00.000Z | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
import verifier_pb2 as verifier__pb2
class VerifierStub(object):
"""definition of verifier service
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Verify = channel.unary_unary(
'/pai.pouw.verification.Verifier/Verify',
request_serializer=verifier__pb2.Request.SerializeToString,
response_deserializer=verifier__pb2.Response.FromString,
)
class VerifierServicer(object):
"""definition of verifier service
"""
def Verify(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_VerifierServicer_to_server(servicer, server):
rpc_method_handlers = {
'Verify': grpc.unary_unary_rpc_method_handler(
servicer.Verify,
request_deserializer=verifier__pb2.Request.FromString,
response_serializer=verifier__pb2.Response.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'pai.pouw.verification.Verifier', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Verifier(object):
"""definition of verifier service
"""
@staticmethod
def Verify(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/pai.pouw.verification.Verifier/Verify',
verifier__pb2.Request.SerializeToString,
verifier__pb2.Response.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| 33.2 | 103 | 0.655336 |
fc4900986b8d537635259a1bfe79132c14ac863a | 3,283 | py | Python | captum/attr/_utils/class_summarizer.py | caraya10/captum | 258928905875c18e85a2413b3bb97def1bfb730a | [
"BSD-3-Clause"
] | 1 | 2022-01-05T14:17:48.000Z | 2022-01-05T14:17:48.000Z | captum/attr/_utils/class_summarizer.py | caraya10/captum | 258928905875c18e85a2413b3bb97def1bfb730a | [
"BSD-3-Clause"
] | null | null | null | captum/attr/_utils/class_summarizer.py | caraya10/captum | 258928905875c18e85a2413b3bb97def1bfb730a | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
from collections import defaultdict
from typing import Any, Dict, List, Union, Optional
from torch import Tensor
from captum._utils.common import _format_tensor_into_tuples
from captum._utils.typing import TargetType, TensorOrTupleOfTensorsGeneric
from captum.attr._utils.stat import Stat
from captum.attr._utils.summarizer import Summarizer
from captum.log import log_usage
class ClassSummarizer(Summarizer):
r"""
Used to keep track of summaries for associated classes. The
classes/labels can be of any type that are supported by `dict`.
This also keeps track of an aggregate of all class summaries.
"""
@log_usage()
def __init__(self, stats: List[Stat]):
Summarizer.__init__.__wrapped__(self, stats)
self.summaries: Dict[Any, Summarizer] = defaultdict(
lambda: Summarizer(stats=stats)
)
def update( # type: ignore
self, x: TensorOrTupleOfTensorsGeneric, labels: TargetType = None,
):
r"""
Updates the stats of the summarizer, optionally associated to classes.
This accepts either a single tensor to summarise or a tuple of tensors.
Args:
x (Tensor or Tuple[Tensor, ...]):
The input tensor to be summarised. The first
dimension of this input must be associated to
the batch size of the inputs.
labels (int, tuple, tensor or list, optional):
The associated labels for `x`. If Any, we
assume `labels` represents the label for all inputs in `x`.
If this is None we simply aggregate the total summary.
"""
if labels is None:
super().update(x)
return
x = _format_tensor_into_tuples(x)
num_labels = 1
labels_typed: Union[List[Any], Tensor]
if isinstance(labels, list) or isinstance(labels, Tensor):
labels_typed = labels
num_labels = len(labels) # = labels.size(0) if tensor
else:
labels_typed = [labels]
# mypy doesn't realise I have made the int a list
if len(labels_typed) > 1:
for x_i in x:
assert x_i.size(0) == num_labels, (
"batch size does not equal amount of labels; "
"please ensure length of labels is equal to 1 "
"or to the `batch_size` corresponding to the "
"number of examples in the input(s)"
)
batch_size = x[0].size(0)
for i in range(batch_size):
tensors_to_summarize = tuple(tensor[i] for tensor in x)
tensors_to_summarize_copy = tuple(tensor[i].clone() for tensor in x)
label = labels_typed[0] if len(labels_typed) == 1 else labels_typed[i]
self.summaries[label].update(tensors_to_summarize)
super().update(tensors_to_summarize_copy)
@property
def class_summaries(
self,
) -> Dict[
Any, Union[None, Dict[str, Optional[Tensor]], List[Dict[str, Optional[Tensor]]]]
]:
r"""
Returns:
The summaries for each class.
"""
return {key: value.summary for key, value in self.summaries.items()}
| 34.925532 | 88 | 0.613768 |
8e384e762b8b98d591c63a726af3969628e0536c | 2,748 | py | Python | flask_oss.py | SailerNote/flask-oss | 5526beb2d5cd4fbb81e5648e1435192cf5a4634c | [
"Apache-2.0"
] | null | null | null | flask_oss.py | SailerNote/flask-oss | 5526beb2d5cd4fbb81e5648e1435192cf5a4634c | [
"Apache-2.0"
] | null | null | null | flask_oss.py | SailerNote/flask-oss | 5526beb2d5cd4fbb81e5648e1435192cf5a4634c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import logging
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
import oss2
import oss2.exceptions
logger = logging.getLogger("flask_oss")
class FlaskOSS(object):
def __init__(self, app=None):
if app is not None:
self.init_app(app)
def init_app(self, app):
_access_key = app.config.get("OSS_ACCESS_KEY_ID")
_secret = app.config.get("OSS_SECRET_ACCESS_KEY")
_endpoint = app.config.get("OSS_ENDPOINT")
_bucket_name = app.config.get("OSS_BUCKET_NAME")
# assert self.access_key is not None
# print self.access_key
# if self.access_key is None or self.secret is None:
# raise
self.auth = oss2.Auth(_access_key, _secret)
self.bucket = oss2.Bucket(self.auth, _endpoint, _bucket_name)
def put_file(self, filename=None, raw_contents=None):
assert filename is not None
success = self.bucket.put_object(filename, raw_contents)
if success.status == 200:
return filename
else:
logger.error(
"FAILURE writing file {filename}".format(filename=filename)
)
def put_file_by_path(self, filename=None, filepath=None):
"""
:param filename: filename-in-oss.txt
:param filepath: /tmp/folder/filename-in-local.txt
:return: filename
"""
assert filename is not None
assert filepath is not None
success = self.bucket.put_object_from_file(filename, filepath)
if success.status == 200:
return filename
else:
logger.error("FAILURE writing file {filename}".format(filename=filename))
def get_file(self, filename=None):
assert filename is not None
try:
result = self.bucket.get_object(filename)
return result.read()
except oss2.exceptions.NoSuchKey as e:
logger.error(
"{0} not found: http_status={1}, request_id={2}".format(
filename, e.status, e.request_id
)
)
def exists_file(self, filename=None):
assert filename is not None
exist = self.bucket.object_exists(filename)
if exist:
return True
def del_file(self, filename=None):
is_delete = False
try:
self.bucket.delete_object(filename)
is_delete = True
except oss2.exceptions.NoSuchKey as e:
logger.error(
"{0} not found: http_status={1}, request_id={2}".format(
filename, e.status, e.request_id
)
)
finally:
return is_delete
| 30.533333 | 85 | 0.593886 |
ec4b6c4e2a2d1b00d27dc3bb5623131bffd82cc9 | 2,163 | py | Python | tests/example/models.py | elpatiostudio/wagtail_cache_block | 88bfe456668903321810c48e00a8bfb9d7abe848 | [
"BSD-3-Clause"
] | 4 | 2020-03-21T19:54:29.000Z | 2020-12-01T11:24:29.000Z | tests/example/models.py | elpatiostudio/wagtail_cache_block | 88bfe456668903321810c48e00a8bfb9d7abe848 | [
"BSD-3-Clause"
] | 1 | 2021-03-08T18:43:23.000Z | 2021-03-08T18:43:23.000Z | tests/example/models.py | elpatiostudio/wagtail_cache_block | 88bfe456668903321810c48e00a8bfb9d7abe848 | [
"BSD-3-Clause"
] | 4 | 2020-03-19T13:31:43.000Z | 2021-12-24T03:05:31.000Z | from wagtail.admin.edit_handlers import (FieldPanel, InlinePanel,
MultiFieldPanel, PageChooserPanel,
StreamFieldPanel)
from wagtail.core.fields import RichTextField, StreamField
from wagtail.core.models import Orderable, Page
from taggit.models import TaggedItemBase
from wagtail.core.blocks import (CharBlock, ChoiceBlock, ListBlock,
PageChooserBlock, RawHTMLBlock, RichTextBlock,
StreamBlock, StructBlock, StructValue)
class ColumnStructBlock(StructBlock):
heading = CharBlock(classname="full title")
paragraph = RichTextBlock()
reference_page = PageChooserBlock()
class Meta:
template = 'example/blocks/column_struct_block.html'
class ColumnStreamBlock(StreamBlock):
sub_struct_data = ColumnStructBlock()
class Meta:
template = 'example/blocks/column_stream_block.html'
class ArticlePage(Page):
body = StreamField([
('heading', CharBlock(classname="full title")),
('paragraph', RichTextBlock()),
('reference_page', PageChooserBlock()),
# this is single StructBlock
(
'struct_data',
StructBlock([
('heading', CharBlock(classname="full title")),
('paragraph', RichTextBlock()),
('reference_page', PageChooserBlock()),
])
),
# this is StreamBlock
(
'stream_data',
StreamBlock([
(
'sub_struct_data',
StructBlock([
('heading', CharBlock(classname="full title")),
('paragraph', RichTextBlock()),
('reference_page', PageChooserBlock()),
])
),
])
),
('column_struct_data', ColumnStructBlock()),
('column_stream_data', ColumnStreamBlock())
], null=True, blank=True)
# Editor panels configuration
content_panels = Page.content_panels + [
StreamFieldPanel('body'),
]
| 30.464789 | 79 | 0.562644 |
f2f3bb590615bc5b943c67814f0f09aa2fcb8e8d | 121 | py | Python | projecture/tests/test_projecture.py | diszgaurav/projecture | afbdaa55e9476fa0fd0b914969087ed2a7ca694b | [
"MIT"
] | 1 | 2016-05-04T17:26:59.000Z | 2016-05-04T17:26:59.000Z | projecture/tests/test_projecture.py | diszgaurav/projecture | afbdaa55e9476fa0fd0b914969087ed2a7ca694b | [
"MIT"
] | null | null | null | projecture/tests/test_projecture.py | diszgaurav/projecture | afbdaa55e9476fa0fd0b914969087ed2a7ca694b | [
"MIT"
] | null | null | null | import projecture
def test_list_projects():
lp = projecture.list_projects()
assert True == isinstance(lp, list)
| 20.166667 | 39 | 0.727273 |
02b3a3628b1e61d9e4fa47bdfdbbf9fdad1451f1 | 8,972 | py | Python | lib/synchronizer.py | PrettyBoyHelios/electrum | 00cc7332f8547e21785e31f76786e4887322403e | [
"MIT"
] | null | null | null | lib/synchronizer.py | PrettyBoyHelios/electrum | 00cc7332f8547e21785e31f76786e4887322403e | [
"MIT"
] | null | null | null | lib/synchronizer.py | PrettyBoyHelios/electrum | 00cc7332f8547e21785e31f76786e4887322403e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2014 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from threading import Lock
import hashlib
# from .bitcoin import Hash, hash_encode
from .transaction import Transaction
from .util import ThreadJob, bh2u
class Synchronizer(ThreadJob):
'''The synchronizer keeps the wallet up-to-date with its set of
addresses and their transactions. It subscribes over the network
to wallet addresses, gets the wallet to generate new addresses
when necessary, requests the transaction history of any addresses
we don't have the full history of, and requests binary transaction
data of any transactions the wallet doesn't have.
External interface: __init__() and add() member functions.
'''
def __init__(self, wallet, network):
self.wallet = wallet
self.network = network
self.new_addresses = set()
# Entries are (tx_hash, tx_height) tuples
self.requested_tx = {}
self.requested_histories = {}
self.requested_addrs = set()
self.lock = Lock()
self.initialized = False
self.initialize()
def parse_response(self, response):
if response.get('error'):
self.print_error("response error:", response)
return None, None
return response['params'], response['result']
def is_up_to_date(self):
return (not self.requested_tx and not self.requested_histories
and not self.requested_addrs)
def release(self):
self.network.unsubscribe(self.on_address_status)
def add(self, address):
'''This can be called from the proxy or GUI threads.'''
with self.lock:
self.new_addresses.add(address)
def subscribe_to_addresses(self, addresses):
if addresses:
self.requested_addrs |= addresses
def get_status(self, h):
if not h:
return None
status = ''
for tx_hash, height in h:
status += tx_hash + ':%d:' % height
return bh2u(hashlib.sha256(status.encode('ascii')).digest())
def on_address_status(self, response):
if self.wallet.synchronizer is None and self.initialized:
return # we have been killed, this was just an orphan callback
params, result = self.parse_response(response)
if not params:
return
addr = params[0]
history = self.wallet.history.get(addr, [])
if self.get_status(history) != result:
# note that at this point 'result' can be None;
# if we had a history for addr but now the server is telling us
# there is no history
if addr not in self.requested_histories:
self.requested_histories[addr] = result
self.network.request_address_history(addr, self.on_address_history)
# remove addr from list only after it is added to requested_histories
if addr in self.requested_addrs: # Notifications won't be in
self.requested_addrs.remove(addr)
def on_address_history(self, response):
if self.wallet.synchronizer is None and self.initialized:
return # we have been killed, this was just an orphan callback
params, result = self.parse_response(response)
if not params:
return
addr = params[0]
try:
server_status = self.requested_histories[addr]
except KeyError:
# note: server_status can be None even if we asked for the history,
# so it is not sufficient to test that
self.print_error("receiving history (unsolicited)", addr, len(result))
return
self.print_error("receiving history", addr, len(result))
hashes = set(map(lambda item: item['tx_hash'], result))
hist = list(map(lambda item: (item['tx_hash'], item['height']), result))
# tx_fees
tx_fees = [(item['tx_hash'], item.get('fee')) for item in result]
tx_fees = dict(filter(lambda x:x[1] is not None, tx_fees))
# Note if the server hasn't been patched to sort the items properly
if hist != sorted(hist, key=lambda x:x[1]):
interface = self.network.interface
# note: we don't actually know which interface was used if it was *just* changed
if interface: interface.print_error("serving improperly sorted address histories")
# Check that txids are unique
if len(hashes) != len(result):
self.print_error("error: server history has non-unique txids: %s"% addr)
# Check that the status corresponds to what was announced
elif self.get_status(hist) != server_status:
self.print_error("error: status mismatch: %s" % addr)
else:
# Store received history
self.wallet.receive_history_callback(addr, hist, tx_fees)
# Request transactions we don't have
self.request_missing_txs(hist)
# Remove request; this allows up_to_date to be True
self.requested_histories.pop(addr)
def on_tx_response(self, response):
if self.wallet.synchronizer is None and self.initialized:
return # we have been killed, this was just an orphan callback
params, result = self.parse_response(response)
if not params:
return
tx_hash = params[0]
#assert tx_hash == hash_encode(Hash(bytes.fromhex(result)))
tx = Transaction(result)
try:
tx.deserialize()
except Exception:
self.print_msg("cannot deserialize transaction, skipping", tx_hash)
return
tx_height = self.requested_tx.pop(tx_hash)
self.wallet.receive_tx_callback(tx_hash, tx, tx_height)
self.print_error("received tx %s height: %d bytes: %d" %
(tx_hash, tx_height, len(tx.raw)))
# callbacks
self.network.trigger_callback('new_transaction', tx)
if not self.requested_tx:
self.network.trigger_callback('updated')
def request_missing_txs(self, hist):
# "hist" is a list of [tx_hash, tx_height] lists
transaction_hashes = []
for tx_hash, tx_height in hist:
if tx_hash in self.requested_tx:
continue
if tx_hash in self.wallet.transactions:
continue
transaction_hashes.append(tx_hash)
self.requested_tx[tx_hash] = tx_height
self.network.get_transactions(transaction_hashes, self.on_tx_response)
def initialize(self):
'''Check the initial state of the wallet. Subscribe to all its
addresses, and request any transactions in its address history
we don't have.
'''
for history in self.wallet.history.values():
# Old electrum servers returned ['*'] when all history for
# the address was pruned. This no longer happens but may
# remain in old wallets.
if history == ['*']:
continue
self.request_missing_txs(history)
if self.requested_tx:
self.print_error("missing tx", self.requested_tx)
self.subscribe_to_addresses(set(self.wallet.get_addresses()))
self.initialized = True
def run(self):
'''Called from the network proxy thread main loop.'''
# 1. Create new addresses
self.wallet.synchronize()
# 2. Subscribe to new addresses
with self.lock:
addresses = self.new_addresses
self.new_addresses = set()
self.subscribe_to_addresses(addresses)
# 3. Detect if situation has changed
up_to_date = self.is_up_to_date()
if up_to_date != self.wallet.is_up_to_date():
self.wallet.set_up_to_date(up_to_date)
self.network.trigger_callback('updated')
| 41.730233 | 94 | 0.649354 |
4ed57258c00c9e7c55904926f1a91436820006bb | 897 | py | Python | setup_new_service.py | sellpy/stockholm-ai | 187bda91d6bc5cfef89083b15836ec18e819a1b8 | [
"MIT"
] | 2 | 2018-03-26T16:31:38.000Z | 2018-03-28T08:55:07.000Z | setup_new_service.py | sellpy/stockholm-ai | 187bda91d6bc5cfef89083b15836ec18e819a1b8 | [
"MIT"
] | null | null | null | setup_new_service.py | sellpy/stockholm-ai | 187bda91d6bc5cfef89083b15836ec18e819a1b8 | [
"MIT"
] | null | null | null | import os
import subprocess
from aws_helpers import create_repository, setup_elb, create_ecs_service, deregister_old_taskdefinitions
version = 1
service_path = 'services.dev.sellpy.net'
service = "stockholm-ai"
env = "dev"
cluster = "microservices"
load_balancer_name = "sellpy-services"
# Create an image repository for storing docker files.
create_repository(service)
subprocess.call(
"eval $( aws ecr get-login --no-include-email)",
shell=True)
subprocess.call(
"docker build -t temp_image .",
shell=True)
subprocess.call(
"docker tag temp_image 966836717103.dkr.ecr.eu-west-1.amazonaws.com/" + ":".join([service, env]),
shell=True)
subprocess.call(
"docker push 966836717103.dkr.ecr.eu-west-1.amazonaws.com/" + ":".join([service, env]),
shell=True)
subprocess.call(
"aws ecs register-task-definition --cli-input-json file://task_definition_2.json --region eu-west-1",
shell=True) | 30.931034 | 104 | 0.759197 |
6178e2eab604c1ede0b22f9dc268fff459d4283f | 7,151 | py | Python | legacy/v1midiSort.py | montoyamoraga/midi-files-sort | 6c99632bc9f7f0b4319d0514bd259b5e7075f2a6 | [
"MIT"
] | null | null | null | legacy/v1midiSort.py | montoyamoraga/midi-files-sort | 6c99632bc9f7f0b4319d0514bd259b5e7075f2a6 | [
"MIT"
] | null | null | null | legacy/v1midiSort.py | montoyamoraga/midi-files-sort | 6c99632bc9f7f0b4319d0514bd259b5e7075f2a6 | [
"MIT"
] | null | null | null | # Python script for TODO
# December 2021
# runs on Python 3.x on Mac OS 12.0.1
################
# import modules
################
# sys for command line arguments
import sys
# os for listing files and directories
import os
# shutil for copy and paste files
import shutil
# Path for creating new directories and files
from pathlib import Path
# csv for CSV files
import csv
# pandas for .xls to CSV
import pandas as pd
# mido for MIDI files
from mido import MetaMessage
from mido import MidiFile
###################
# default variables
###################
libraryPathOriginal = "libraryOriginal"
libraryPathNew = "libraryNew"
libraryPathFiles = "files"
libraryCSVFileName = "libraryNew.csv"
libraryMetadataFilename = "All_Rolls"
libraryMetadataFolder = "DOCUMENTATION"
libraryMetadataExtensionOriginal = ".xls"
libraryMetadataExtensionNew = ".csv"
libraryRollsSuffixes = ["emR", "emP"]
# variable for storing the names of each MIDI file
midiFilesNames = []
midiFilesPaths = []
# variable for storing a subset of MIDI files: only 1 word ones
midiFilesShortNames = []
midiFilesShortPaths = []
##############################
# create files and directories
##############################
# if it doesnt exist, create new directory for storing the modified library
def createDirectories():
Path("./" + libraryPathNew + "/" + libraryPathFiles).mkdir(parents=True, exist_ok=True)
# create new file with CSV list
def createFiles():
newFile = open("./" + libraryPathNew + "/" + libraryCSVFileName, "w")
writer = csv.writer(newFile)
newFile.close()
###########
# CSV files
###########
def readCSVFile(filename, column, delimiter):
with open(filename, newline='') as myCSVFile:
reader = csv.reader(myCSVFile, delimiter=delimiter, quotechar='|')
result = []
for row in reader:
result.append(row[column])
return result
def createListMIDIFiles():
with open("./" + libraryPathNew + "/" + libraryCSVFileName, "w", newline="") as csvFile:
csvWriter = csv.writer(csvFile, delimiter = " ", quotechar='|', quoting=csv.QUOTE_MINIMAL)
for i in range(len(midiFilesShortNames)):
csvWriter.writerow([midiFilesShortNames[i], midiFilesShortPaths[i]])
#################################
# parse metadata from AllRolls.xls
#################################
def readLibraryMetadata():
# read Excel file with pandas
readXLSFile = pd.read_excel("./" + libraryPathOriginal + "/" + libraryMetadataFolder + "/" + libraryMetadataFilename + libraryMetadataExtensionOriginal)
# convert to CSV
readXLSFile.to_csv("./" + libraryPathNew + "/" + libraryMetadataFilename + libraryMetadataExtensionNew, index = None, header = True)
############
# MIDI files
############
# find all MIDI files in libraryOriginal
def findMIDIFiles():
# get current working directory
cwd = os.getcwd()
for root, directories, files in os.walk(cwd + "/" +libraryPathOriginal + "/"):
for filename in files:
filepath = os.path.join(root, filename)
# append if it is a filename
if filepath.endswith(".mid") or filepath.endswith(".MID"):
# append them to the list
midiFilesNames.append(os.path.splitext(os.path.basename(filepath))[0])
midiFilesPaths.append(os.path.relpath(filepath))
# append to the shorter list if they are only one word
if (len(os.path.splitext(os.path.basename(filepath))[0].split()) == 1):
midiFilesShortNames.append(os.path.splitext(os.path.basename(filepath))[0])
midiFilesShortPaths.append(os.path.relpath(filepath))
# open a MIDI file
def readMIDIFile(filename):
myFile = MidiFile(filename)
return myFile
# print the meta messages of MIDI file
def printMetaMessages(file):
for i, track in enumerate(file.tracks):
print('Track {}: {}'.format(i, track.name))
for msg in track:
if msg.is_meta:
print(msg)
# copy MIDI files from original folder to new folder
# only do the 1 word ones
def copyMIDIFiles():
# retrieve paths of original MIDI files
midiPaths = readCSVFile("./" + libraryPathNew + "/" + libraryCSVFileName, column=1, delimiter=" ")
# copy them to the new library
for i in range(len(midiPaths)):
shutil.copy(midiPaths[i], './' + libraryPathNew + "/" + libraryPathFiles)
# check if any of the copied files matches with an entry on AllRolls.csv
def matchMIDIFiles():
# read All_Rolls.csv, retrieve these columns:
# column 0 for title
AllRollsTitles = readCSVFile("./" + libraryPathNew + "/" + libraryMetadataFilename + libraryMetadataExtensionNew, column=0, delimiter= ",")
# column 1 for composer
AllRollsComposer = readCSVFile("./" + libraryPathNew + "/" + libraryMetadataFilename + libraryMetadataExtensionNew, column=1, delimiter= ",")
# column 2 for pianist
AllRollsPianist = readCSVFile("./" + libraryPathNew + "/" + libraryMetadataFilename + libraryMetadataExtensionNew, column=2, delimiter= ",")
# column 5 for filenames
AllRollsNames = readCSVFile("./" + libraryPathNew + "/" + libraryMetadataFilename + libraryMetadataExtensionNew, column=5, delimiter= ",")
matches = 0
# go through every MIDI file with 1 word
for i in range(len(midiFilesShortNames)):
# retrieve filename
name = midiFilesShortNames[i]
# check if the filename ends on the suffixes emR or emP
if name[-3:] in libraryRollsSuffixes:
# retrieve the name without suffix
name = name[:-3]
# go through filenames in All_Rolls
for i in range(len(AllRollsNames)):
# check if there is a match
if (AllRollsNames[i] == name):
# add to counter
matches = matches + 1
# print(AllRollsTitles[i], AllRollsNames[i])
print(matches)
#########
# running
#########
# create directories and files
createDirectories()
createFiles()
# find all MIDI files
findMIDIFiles()
# check the contents and length
# print(midiFilesPaths)
# print(midiFilesNames)
# print(len(midiFilesPaths))
# print(len(midiFilesNames))
# create CSV file with MIDI files
createListMIDIFiles()
# read metadata
readLibraryMetadata()
# copy MIDI files from original to new
copyMIDIFiles()
# see if there is a match in the MIDI files and the All_Rolls.csv file
matchMIDIFiles()
# open a MIDI file
# myFile = readMIDIFile("./libraryDummy/A41emP.mid")
myFile = readMIDIFile("./libraryDummy/01 Moon River.mid")
# newTitle = MetaMessage('text', text="Title Something blabla", time=0)
# newArtist = MetaMessage('text', text="Artist WHATEVER", time=0)
# newAlbum = MetaMessage('text', text="Album MAYBE", time=0)
# print(newTitle.dict()['type'])
# for i, track in enumerate(myFile.tracks):
# myFile.tracks[i].append(newTitle)
# myFile.tracks[i].append(newArtist)
# myFile.tracks[i].append(newAlbum)
# myFile.tracks[i].insert(myFile.tracks[i].index('end_of_track'), newTitle)
# myFile.tracks[i].insert(10, newArtist)
# myFile.tracks[i].insert(len(myFile.tracks[i]) - 2, newArtist)
# myFile.tracks[i].insert(len(myFile.tracks[i]) - 2, newAlbum)
# myFile.append(newTitle)
# print meta messages
printMetaMessages(myFile)
myFile.save(myFile.filename)
# print the 1th argument of the command line
# print(sys.argv[1:]) | 29.187755 | 154 | 0.687177 |
130f90736e0c65db554f011d25bc2829a0d805e2 | 4,992 | py | Python | tools/scripts/plot_crossover.py | leroyjvargis/workflows | 4d5c3a11c678c10cd0c8b7e043f66e4e62cc790b | [
"Apache-2.0"
] | 558 | 2020-04-27T12:05:39.000Z | 2022-03-28T15:20:22.000Z | tools/scripts/plot_crossover.py | leroyjvargis/workflows | 4d5c3a11c678c10cd0c8b7e043f66e4e62cc790b | [
"Apache-2.0"
] | 103 | 2020-04-27T21:44:55.000Z | 2022-03-29T21:36:10.000Z | tools/scripts/plot_crossover.py | leroyjvargis/workflows | 4d5c3a11c678c10cd0c8b7e043f66e4e62cc790b | [
"Apache-2.0"
] | 61 | 2020-04-22T11:02:14.000Z | 2022-03-14T19:32:17.000Z | # SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2020-2021 Micron Technology, Inc. All rights reserved.
from bokeh.plotting import figure, output_file, save, gridplot
from bokeh.layouts import layout, column, row
from bokeh.models import Div
import os, re
import argparse
import threading
from lib.tree_shape import TreeShape
from lib.crossover import Crossover
def parse_cmdline():
desc = (
"Plot Crossover data\n\n"
"example: %(prog)s --ouput_file=/tmp/plot.html --test_dir=/var/tmp/test\n"
)
p = argparse.ArgumentParser(
description=desc, formatter_class=argparse.RawTextHelpFormatter
)
p.add_argument(
"-o", "--out_file", type=str, help="html output file", required=False
)
p.add_argument("-i", "--test_dir", type=str, help="test directory", required=False)
return p.parse_args()
def make_plots(arg_dict):
u = Util()
vlen = arg_dict["vlen"]
nkey = arg_dict["nkey"]
tree_path = arg_dict["tree_path"]
crossover_path = arg_dict["crossover_path"]
title = "vlen: {} nkey: {} size: {}".format(
vlen, u.humanize(nkey), u.humanize(nkey * vlen)
)
heading = Div(
text=f"""
<hr style="width:100%">
<h2>{title}</h2>
""",
width=1000,
)
ts = TreeShape(fanout=16, width=500, height=500)
c = Crossover(width=1500, height=500)
ybuf = u.file_to_yaml(tree_path)
d = ts.process_yaml(ybuf)
ts.add_tree_data(d)
tot_kvsets, c_kvsets_avg, g_kvsets_avg = ts.avg_nkvset(ybuf, pivot=2)
tree_plot, slider = ts.plot_graph(
title="nkvsets Get: {} Cursor: {} Total: {}".format(
g_kvsets_avg, c_kvsets_avg, tot_kvsets
)
)
df = c.df_for_nkeys(
nkey_dir=crossover_path, c_avg_nkvset=c_kvsets_avg, g_avg_nkvset=g_kvsets_avg
)
crossover_plot = c.plot(df=df)
# Output
arg_dict["plots"] = column(heading, row(tree_plot, crossover_plot))
def scan_dirs(basepath):
"""
Returns a dictionary with the following mapping
(vlen, klen) --> [tree_path, crossover_path]
"""
flist = os.listdir(basepath)
pdict = {}
for vlen_dir in sorted(flist):
c = re.compile(r"rr_(\d+)")
d = c.search(vlen_dir)
if d == None:
continue
vlen = int(d.group(1))
loadpath = "{}/{}/load/".format(basepath, vlen_dir)
c = re.compile(r"cn_metrics_raw_(\d+).log")
for tree in os.listdir(loadpath):
pat = c.search(tree)
if pat == None:
continue
nkey = int(pat.group(1))
tree_path = "{}/{}/load/{}".format(basepath, vlen_dir, tree)
pdict[(vlen, nkey)] = [tree_path]
runpath = "{}/{}/run/".format(basepath, vlen_dir)
nkey_pat = re.compile(r"keys_(\d+)")
for nkey_dir in os.listdir(runpath):
nkey = int(nkey_pat.search(nkey_dir).group(1))
filepath = "{}/{}/run/{}".format(basepath, vlen_dir, nkey_dir)
pdict[(vlen, nkey)].append(filepath)
return pdict
def main():
opt = parse_cmdline()
outpath = opt.out_file
pdict = scan_dirs(opt.test_dir)
args = []
for k in sorted(pdict, key=lambda s: (s[0], s[1])):
arg_dict = {}
arg_dict["vlen"] = k[0]
arg_dict["nkey"] = k[1]
arg_dict["tree_path"] = pdict[k][0]
arg_dict["crossover_path"] = pdict[k][1]
args.append(arg_dict)
threads = []
for arg in args:
t = threading.Thread(target=make_plots, args=(arg,))
threads.append(t)
t.start()
for t in threads:
t.join()
intro = Div(
text="""
<h1>Crossover</h1>
<p>
This set of plots compares get performance with cursor performance as it varies with the burst length.<br>
The keys in the kvs is evenly distributed across 64 prefixes. At each stage the test adds more suffixes<br>
for each prefix and this load is followed by warming up mcache and then performing get and cursor operation<br>
for various burstlens.<br>
</p>
<p>
Each row represents a distinct tuple of number of keys and value length. The first plot shows the tree shape right<br>
after load (after allowing the cn tree shape to quiesce). The next 3 plots show the following:
<ul>
<li>Throughput: Number of cursor reads or point gets per second.</li>
<li>Latency Mean: A plot of the mean latency of each value read.</li>
<li>Latency StdDev: A plot of the Standard deviation values for the latencies (noremalized to the mean).</li>
</ul>
</p>
<p>
KVS Pfxlen: 8bytes (Equal to the prefix length of the multi-segmented keys).<br>
System Memory: 256G<br>
Value Length: 256bytes<br>
</p>
""",
width=1000,
)
subplots = [intro]
for r in args:
subplots.append(r["plots"])
output_file(outpath)
canvas = layout(subplots)
save(canvas)
if __name__ == "__main__":
main()
| 28.689655 | 122 | 0.616186 |
0a949ea738fd0c265344fb2ea1a4cf520dbe72b5 | 2,806 | py | Python | src/models/craft/basenet/vgg16_bn.py | gregbugaj/marie-ai | f51a74f19ab5d7231c9f8a426284feff1671b974 | [
"MIT"
] | 4 | 2021-09-23T22:38:48.000Z | 2022-01-19T12:03:02.000Z | src/models/craft/basenet/vgg16_bn.py | gregbugaj/marie-icr | f51a74f19ab5d7231c9f8a426284feff1671b974 | [
"MIT"
] | 17 | 2021-12-22T16:37:21.000Z | 2022-03-16T16:07:34.000Z | src/models/craft/basenet/vgg16_bn.py | gregbugaj/marie-ai | f51a74f19ab5d7231c9f8a426284feff1671b974 | [
"MIT"
] | null | null | null | from collections import namedtuple
import torch
import torch.nn as nn
import torch.nn.init as init
from torchvision import models
from torchvision.models.vgg import model_urls
def init_weights(modules):
for m in modules:
if isinstance(m, nn.Conv2d):
init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
class vgg16_bn(torch.nn.Module):
def __init__(self, pretrained=True, freeze=True):
super(vgg16_bn, self).__init__()
model_urls['vgg16_bn'] = model_urls['vgg16_bn'].replace('https://', 'http://')
vgg_pretrained_features = models.vgg16_bn(pretrained=pretrained).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
for x in range(12): # conv2_2
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(12, 19): # conv3_3
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(19, 29): # conv4_3
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(29, 39): # conv5_3
self.slice4.add_module(str(x), vgg_pretrained_features[x])
# fc6, fc7 without atrous conv
self.slice5 = torch.nn.Sequential(
nn.MaxPool2d(kernel_size=3, stride=1, padding=1),
nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6),
nn.Conv2d(1024, 1024, kernel_size=1)
)
if not pretrained:
init_weights(self.slice1.modules())
init_weights(self.slice2.modules())
init_weights(self.slice3.modules())
init_weights(self.slice4.modules())
init_weights(self.slice5.modules()) # no pretrained model for fc6 and fc7
if freeze:
for param in self.slice1.parameters(): # only first conv
param.requires_grad= False
def forward(self, X):
h = self.slice1(X)
h_relu2_2 = h
h = self.slice2(h)
h_relu3_2 = h
h = self.slice3(h)
h_relu4_3 = h
h = self.slice4(h)
h_relu5_3 = h
h = self.slice5(h)
h_fc7 = h
vgg_outputs = namedtuple("VggOutputs", ['fc7', 'relu5_3', 'relu4_3', 'relu3_2', 'relu2_2'])
out = vgg_outputs(h_fc7, h_relu5_3, h_relu4_3, h_relu3_2, h_relu2_2)
return out
| 37.413333 | 99 | 0.596222 |
0e88dd75bcb736e7db91d8bcfcab95c00acfed1c | 766 | py | Python | source_code/5-2-scrapy.py | linzexinmasterchief/easy-scraping-tutorial | e52c45d27ffce90a7bfe12ea1da09f049e052099 | [
"MIT"
] | 2 | 2020-05-16T07:47:45.000Z | 2020-05-16T07:48:48.000Z | source_code/5-2-scrapy.py | ept106354026/easy-scraping-tutorial | e52c45d27ffce90a7bfe12ea1da09f049e052099 | [
"MIT"
] | null | null | null | source_code/5-2-scrapy.py | ept106354026/easy-scraping-tutorial | e52c45d27ffce90a7bfe12ea1da09f049e052099 | [
"MIT"
] | 1 | 2018-04-06T07:05:29.000Z | 2018-04-06T07:05:29.000Z | import scrapy
class MofanSpider(scrapy.Spider):
name = "mofan"
start_urls = [
'https://morvanzhou.github.io/',
]
# unseen = set()
# seen = set() # we don't need these two as scrapy will deal with them automatically
def parse(self, response):
yield { # return some results
'title': response.css('h1::text').extract_first(default='Missing').strip().replace('"', ""),
'url': response.url,
}
urls = response.css('a::attr(href)').re(r'^/.+?/$') # find all sub urls
for url in urls:
yield response.follow(url, callback=self.parse) # it will filter duplication automatically
# lastly, run this in terminal
# scrapy runspider 5-2-scrapy.py -o res.json | 31.916667 | 106 | 0.592689 |
2fc583575db05e93ce3bf5fdbcce33169a48a365 | 13,340 | py | Python | conans/client/conf/config_installer.py | Wonders11/conan | 28ec09f6cbf1d7e27ec27393fd7bbc74891e74a8 | [
"MIT"
] | 1 | 2022-01-21T05:31:13.000Z | 2022-01-21T05:31:13.000Z | conans/client/conf/config_installer.py | Wonders11/conan | 28ec09f6cbf1d7e27ec27393fd7bbc74891e74a8 | [
"MIT"
] | null | null | null | conans/client/conf/config_installer.py | Wonders11/conan | 28ec09f6cbf1d7e27ec27393fd7bbc74891e74a8 | [
"MIT"
] | null | null | null | import json
import os
import shutil
from datetime import datetime
from dateutil.tz import gettz
from contextlib import contextmanager
from six.moves.urllib.parse import urlparse
from conans import load
from conans.client import tools
from conans.client.cache.remote_registry import load_registry_txt, migrate_registry_file
from conans.client.downloaders.file_downloader import FileDownloader
from conans.client.tools import Git
from conans.client.tools.files import unzip
from conans.errors import ConanException
from conans.util.files import mkdir, rmdir, walk, save, touch, remove
from conans.client.cache.cache import ClientCache
def _hide_password(resource):
"""
Hide password from url/file path
:param resource: string with url or file path
:return: resource with hidden password if present
"""
password = urlparse(resource).password
return resource.replace(password, "<hidden>") if password else resource
def _handle_remotes(cache, remote_file):
# FIXME: Should we encourage to pass the remotes in json?
remotes, _ = load_registry_txt(load(remote_file))
cache.registry.define(remotes)
@contextmanager
def tmp_config_install_folder(cache):
tmp_folder = os.path.join(cache.cache_folder, "tmp_config_install")
# necessary for Mac OSX, where the temp folders in /var/ are symlinks to /private/var/
tmp_folder = os.path.realpath(tmp_folder)
rmdir(tmp_folder)
mkdir(tmp_folder)
try:
yield tmp_folder
finally:
rmdir(tmp_folder)
def _process_git_repo(config, cache, output):
output.info("Trying to clone repo: %s" % config.uri)
with tmp_config_install_folder(cache) as tmp_folder:
with tools.chdir(tmp_folder):
try:
args = config.args or ""
git = Git(verify_ssl=config.verify_ssl, output=output)
git.clone(config.uri, args=args)
output.info("Repo cloned!")
except Exception as e:
raise ConanException("Can't clone repo: %s" % str(e))
_process_folder(config, tmp_folder, cache, output)
def _process_zip_file(config, zippath, cache, output, tmp_folder, first_remove=False):
unzip(zippath, tmp_folder, output=output)
if first_remove:
os.unlink(zippath)
_process_folder(config, tmp_folder, cache, output)
def _handle_conan_conf(current_conan_conf, new_conan_conf_path):
current_conan_conf.read(new_conan_conf_path)
with open(current_conan_conf.filename, "w") as f:
current_conan_conf.write(f)
def _filecopy(src, filename, dst):
# https://github.com/conan-io/conan/issues/6556
# This is just a local convenience for "conan config install", using copyfile to avoid
# copying with permissions that later cause bugs
src = os.path.join(src, filename)
dst = os.path.join(dst, filename)
# Clear the destination file
if os.path.exists(dst):
if os.path.isdir(dst): # dst was a directory and now src is a file
rmdir(dst)
else:
remove(dst)
shutil.copyfile(src, dst)
def _process_file(directory, filename, config, cache, output, folder):
if filename == "settings.yml":
output.info("Installing settings.yml")
_filecopy(directory, filename, cache.cache_folder)
elif filename == "conan.conf":
output.info("Processing conan.conf")
_handle_conan_conf(cache.config, os.path.join(directory, filename))
elif filename == "remotes.txt":
output.info("Defining remotes from remotes.txt")
_handle_remotes(cache, os.path.join(directory, filename))
elif filename in ("registry.txt", "registry.json"):
try:
os.remove(cache.remotes_path)
except OSError:
pass
finally:
_filecopy(directory, filename, cache.cache_folder)
migrate_registry_file(cache, output)
elif filename == "remotes.json":
# Fix for Conan 2.0
raise ConanException("remotes.json install is not supported yet. Use 'remotes.txt'")
else:
# This is ugly, should be removed in Conan 2.0
if filename in ("README.md", "LICENSE.txt"):
output.info("Skip %s" % filename)
else:
relpath = os.path.relpath(directory, folder)
if config.target_folder:
target_folder = os.path.join(cache.cache_folder, config.target_folder,
relpath)
else:
target_folder = os.path.join(cache.cache_folder, relpath)
if os.path.exists(target_folder):
if os.path.isfile(target_folder): # Existed as a file and now should be a folder
remove(target_folder)
mkdir(target_folder)
output.info("Copying file %s to %s" % (filename, target_folder))
_filecopy(directory, filename, target_folder)
def _process_folder(config, folder, cache, output):
if not os.path.isdir(folder):
raise ConanException("No such directory: '%s'" % str(folder))
if config.source_folder:
folder = os.path.join(folder, config.source_folder)
for root, dirs, files in walk(folder):
dirs[:] = [d for d in dirs if d != ".git"]
for f in files:
_process_file(root, f, config, cache, output, folder)
def _process_download(config, cache, output, requester):
with tmp_config_install_folder(cache) as tmp_folder:
output.info("Trying to download %s" % _hide_password(config.uri))
zippath = os.path.join(tmp_folder, os.path.basename(config.uri))
try:
downloader = FileDownloader(requester=requester, output=output, verify=config.verify_ssl,
config_retry=None, config_retry_wait=None)
downloader.download(url=config.uri, file_path=zippath)
_process_zip_file(config, zippath, cache, output, tmp_folder, first_remove=True)
except Exception as e:
raise ConanException("Error while installing config from %s\n%s" % (config.uri, str(e)))
class _ConfigOrigin(object):
def __init__(self, data):
self.type = data.get("type")
self.uri = data.get("uri")
self.verify_ssl = data.get("verify_ssl")
self.args = data.get("args")
self.source_folder = data.get("source_folder")
self.target_folder = data.get("target_folder")
def __eq__(self, other):
return (self.type == other.type and self.uri == other.uri and
self.args == other.args and self.source_folder == other.source_folder
and self.target_folder == other.target_folder)
def __ne__(self, other):
return not self.__eq__(other)
def json(self):
return {"type": self.type,
"uri": self.uri,
"verify_ssl": self.verify_ssl,
"args": self.args,
"source_folder": self.source_folder,
"target_folder": self.target_folder}
@staticmethod
def from_item(uri, config_type, verify_ssl, args, source_folder, target_folder):
config = _ConfigOrigin({})
if config_type:
config.type = config_type
else:
if uri.endswith(".git"):
config.type = "git"
elif os.path.isdir(uri):
config.type = "dir"
elif os.path.isfile(uri):
config.type = "file"
elif uri.startswith("http"):
config.type = "url"
else:
raise ConanException("Unable to deduce type config install: %s" % uri)
config.source_folder = source_folder
config.target_folder = target_folder
config.args = args
config.verify_ssl = verify_ssl
if os.path.exists(uri):
uri = os.path.abspath(uri)
config.uri = uri
return config
def _is_compressed_file(filename):
open(filename, "r") # Check if the file exist and can be opened
import zipfile
if zipfile.is_zipfile(filename):
return True
if (filename.endswith(".tar.gz") or filename.endswith(".tgz") or
filename.endswith(".tbz2") or filename.endswith(".tar.bz2") or
filename.endswith(".tar") or filename.endswith(".gz") or
filename.endswith(".tar.xz") or filename.endswith(".txz")):
return True
return False
def _process_config(config, cache, output, requester):
try:
if config.type == "git":
_process_git_repo(config, cache, output)
elif config.type == "dir":
_process_folder(config, config.uri, cache, output)
elif config.type == "file":
if _is_compressed_file(config.uri):
with tmp_config_install_folder(cache) as tmp_folder:
_process_zip_file(config, config.uri, cache, output, tmp_folder)
else:
dirname, filename = os.path.split(config.uri)
_process_file(dirname, filename, config, cache, output, dirname)
elif config.type == "url":
_process_download(config, cache, output, requester=requester)
else:
raise ConanException("Unable to process config install: %s" % config.uri)
except Exception as e:
raise ConanException("Failed conan config install: %s" % str(e))
def _save_configs(configs_file, configs):
save(configs_file, json.dumps([config.json() for config in configs],
indent=True))
def _load_configs(configs_file):
try:
configs = json.loads(load(configs_file))
except Exception as e:
raise ConanException("Error loading configs-install file: %s\n%s"
% (configs_file, str(e)))
return [_ConfigOrigin(config) for config in configs]
def configuration_install(app, uri, verify_ssl, config_type=None,
args=None, source_folder=None, target_folder=None):
cache, output, requester = app.cache, app.out, app.requester
configs = []
configs_file = cache.config_install_file
if os.path.isfile(configs_file):
configs = _load_configs(configs_file)
if uri is None:
if config_type or args or not verify_ssl: # Not the defaults
if not configs:
raise ConanException("Called config install without arguments")
# Modify the last one
config = configs[-1]
config.config_type = config_type or config.type
config.args = args or config.args
config.verify_ssl = verify_ssl or config.verify_ssl
_process_config(config, cache, output, requester)
_save_configs(configs_file, configs)
else:
if not configs:
raise ConanException("Called config install without arguments")
# Execute the previously stored ones
for config in configs:
output.info("Config install: %s" % _hide_password(config.uri))
_process_config(config, cache, output, requester)
touch(cache.config_install_file)
else:
# Execute and store the new one
config = _ConfigOrigin.from_item(uri, config_type, verify_ssl, args,
source_folder, target_folder)
_process_config(config, cache, output, requester)
if config not in configs:
configs.append(config)
else:
configs = [(c if c != config else config) for c in configs]
_save_configs(configs_file, configs)
def _is_scheduled_intervals(file, interval):
""" Check if time interval is bigger than last file change
:param file: file path to stat last change
:param interval: required time interval
:return: True if last change - current time is bigger than interval. Otherwise, False.
"""
timestamp = os.path.getmtime(file)
sched = datetime.fromtimestamp(timestamp, tz=gettz())
sched += interval
now = datetime.now(gettz())
return now > sched
def is_config_install_scheduled(api):
""" Validate if the next config install is scheduled to occur now
When config_install_interval is not configured, config install should not run
When configs file is empty, scheduled config install should not run
When config_install_interval is configured, config install will respect the delta from:
last conan install execution (sched file) + config_install_interval value < now
:param api: Conan API instance
:return: True, if it should occur now. Otherwise, False.
"""
cache = ClientCache(api.cache_folder, api.out)
interval = cache.config.config_install_interval
config_install_file = cache.config_install_file
if interval is not None:
if not os.path.exists(config_install_file):
raise ConanException("config_install_interval defined, but no config_install file")
scheduled = _is_scheduled_intervals(config_install_file, interval)
if scheduled and not _load_configs(config_install_file):
api.out.warn("Skipping scheduled config install, "
"no config listed in config_install file")
os.utime(config_install_file, None)
else:
return scheduled
| 39.820896 | 101 | 0.646027 |
281e8904bc28e4498beeae42ea02e5b7e7094e8d | 2,649 | py | Python | tests/bugs/core_5982_test.py | reevespaul/firebird-qa | 98f16f425aa9ab8ee63b86172f959d63a2d76f21 | [
"MIT"
] | null | null | null | tests/bugs/core_5982_test.py | reevespaul/firebird-qa | 98f16f425aa9ab8ee63b86172f959d63a2d76f21 | [
"MIT"
] | null | null | null | tests/bugs/core_5982_test.py | reevespaul/firebird-qa | 98f16f425aa9ab8ee63b86172f959d63a2d76f21 | [
"MIT"
] | null | null | null | #coding:utf-8
#
# id: bugs.core_5982
# title: error read permission for BLOB field, when it is input/output procedure`s parametr
# decription:
# Confirmed bug on WI-V3.0.4.33034 and WI-T4.0.0.1340.
# Checked on:
# 4.0.0.1421: OK, 2.098s.
# 3.0.5.33097: OK, 1.294s.
#
# 24.06.2020: changed min_version to 2.5 because problem was fixed in 2.5.9.27151.
#
# tracker_id: CORE-5982
# min_versions: ['2.5.9']
# versions: 2.5.9
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 2.5.9
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """
set bail on;
set term ^;
execute block as
begin
begin
execute statement 'drop user tmp$c5982' with autonomous transaction;
when any do begin end
end
end^
set term ;^
commit;
create user tmp$c5982 password '123';
commit;
recreate table my_table (
my_num integer
,my_data blob
);
commit;
insert into my_table(my_num , my_data) values (1, 'qwerty');
commit;
set term ^;
create or alter procedure sp_worker(my_data blob) as
declare variable my_value blob;
begin
my_value = my_data ;
rdb$set_context('USER_SESSION', 'SP_WORKER', 'DONE BY ' || current_user );
end
^
create or alter procedure sp_main as
declare variable my_data blob;
begin
select my_data
from my_table
where my_num = 1
into: my_data;
execute procedure sp_worker(my_data);
end
^
set term ;^
commit;
grant select on table my_table to procedure sp_main;
grant execute on procedure sp_worker to procedure sp_main;
grant execute on procedure sp_main to public;
commit;
set list on;
connect '$(DSN)' user 'tmp$c5982' password '123';
execute procedure sp_main;
select rdb$get_context('USER_SESSION', 'SP_WORKER') as result from rdb$database;
commit;
connect '$(DSN)' user 'SYSDBA' password 'masterkey';
drop user tmp$c5982;
commit;
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
RESULT DONE BY TMP$C5982
"""
@pytest.mark.version('>=2.5.9')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_expected_stdout == act_1.clean_stdout
| 24.302752 | 100 | 0.606644 |
48bed9c05770daf890ce08b614fe89d098ecbc78 | 13,971 | py | Python | journalsdb.py | Impactstory/jump-api | e9c048694905e4606b68ae4650696a134bd5a349 | [
"MIT"
] | null | null | null | journalsdb.py | Impactstory/jump-api | e9c048694905e4606b68ae4650696a134bd5a349 | [
"MIT"
] | null | null | null | journalsdb.py | Impactstory/jump-api | e9c048694905e4606b68ae4650696a134bd5a349 | [
"MIT"
] | null | null | null | # coding: utf-8
import datetime
import argparse
import simplejson as json
from cached_property import cached_property
from time import time
# import requests
from psycopg2 import sql
from psycopg2.extras import execute_values
from enum import Enum
from app import db
from app import get_db_cursor
from util import elapsed
from util import chunks
from util import sql_bool
from util import sql_escape_string
class JiscDefaultPrices(Enum):
TaylorFrancis = 954.10
Sage = 659.99
Wiley = 1350.47
SpringerNature = 1476.53
Elsevier = 3775
class JournalsDBRaw(db.Model):
__tablename__ = "journalsdb_raw"
issn_l = db.Column(db.Text, primary_key=True)
issns = db.Column(db.Text)
title = db.Column(db.Text)
publisher = db.Column(db.Text)
dois_by_issued_year = db.Column(db.Text)
subscription_pricing = db.Column(db.Text)
apc_pricing = db.Column(db.Text)
open_access = db.Column(db.Text)
def __repr__(self):
return "<{} ({}) '{}' {}>".format(self.__class__.__name__, self.issn_l, self.title, self.publisher)
class JournalMetadata(db.Model):
__tablename__ = "journalsdb_computed"
created = db.Column(db.DateTime)
issn_l = db.Column(db.Text, primary_key=True)
issns_string = db.Column(db.Text)
title = db.Column(db.Text)
publisher = db.Column(db.Text)
is_current_subscription_journal = db.Column(db.Boolean)
is_gold_journal_in_most_recent_year = db.Column(db.Boolean)
is_currently_publishing = db.Column(db.Boolean)
subscription_price_usd = db.Column(db.Numeric(asdecimal=False))
subscription_price_gbp = db.Column(db.Numeric(asdecimal=False))
apc_price_usd = db.Column(db.Numeric(asdecimal=False))
apc_price_gbp = db.Column(db.Numeric(asdecimal=False))
num_dois_in_2020 = db.Column(db.Numeric(asdecimal=False))
def __init__(self, journal_raw):
self.created = datetime.datetime.utcnow().isoformat()
for attr in ("issn_l", "title", "publisher"):
setattr(self, attr, getattr(journal_raw, attr))
self.issns_string = journal_raw.issns
for attr in ("is_current_subscription_journal", "is_gold_journal_in_most_recent_year", "is_currently_publishing", "num_dois_in_2020", ):
setter = getattr(self, "set_{}".format(attr))
setter(journal_raw)
self.set_subscription_prices(journal_raw)
self.set_apc_prices(journal_raw)
super(JournalMetadata, self).__init__()
@cached_property
def issns(self):
return json.loads(self.issns_string)
@cached_property
def display_issns(self):
return ",".join(self.issns)
@cached_property
def display_issn_l(self):
return "issn:{}".format(self.issn_l)
@cached_property
def is_hybrid(self):
return not self.is_gold_journal_in_most_recent_year
@cached_property
def publisher_code(self):
if self.publisher == "Elsevier":
return "Elsevier"
elif self.publisher == "Springer Nature":
return "SpringerNature"
elif self.publisher == "Wiley":
return "Wiley"
elif self.publisher == "SAGE":
return "Sage"
elif self.publisher == "Taylor & Francis":
return "TaylorFrancis"
return self.publisher
def get_insert_list(self):
return (
self.created,
self.issn_l,
self.issns_string,
sql_escape_string(self.title),
sql_escape_string(self.publisher),
sql_bool(self.is_current_subscription_journal),
sql_bool(self.is_gold_journal_in_most_recent_year),
sql_bool(self.is_currently_publishing),
self.subscription_price_usd,
self.subscription_price_gbp,
self.apc_price_usd,
self.apc_price_gbp,
self.num_dois_in_2020,)
@classmethod
def get_insert_column_names(cls):
return ["created",
"issn_l",
"issns_string",
"title",
"publisher",
"is_current_subscription_journal",
"is_gold_journal_in_most_recent_year",
"is_currently_publishing",
"subscription_price_usd",
"subscription_price_gbp",
"apc_price_usd",
"apc_price_gbp",
"num_dois_in_2020"
]
def set_is_current_subscription_journal(self, journal_raw):
self.set_is_currently_publishing(journal_raw)
self.set_is_gold_journal_in_most_recent_year(journal_raw)
self.set_subscription_prices(journal_raw)
self.is_current_subscription_journal = True
if not (self.subscription_price_usd or self.subscription_price_gbp):
if not self.is_currently_publishing:
self.is_current_subscription_journal = False
if self.is_gold_journal_in_most_recent_year:
self.is_current_subscription_journal = False
def set_is_gold_journal_in_most_recent_year(self, journal_raw):
self.is_gold_journal_in_most_recent_year = False
if journal_raw.open_access:
self.is_gold_journal_in_most_recent_year = (json.loads(journal_raw.open_access)["is_gold_journal"] == True)
def set_is_currently_publishing(self, journal_raw):
self.is_currently_publishing = False
if journal_raw.dois_by_issued_year:
dois_tuple = json.loads(journal_raw.dois_by_issued_year)
for (year, num) in dois_tuple:
if year == 2021 and num > 0:
self.is_currently_publishing = True
if self.issn_l == '0036-8733':
self.is_currently_publishing = True
def set_num_dois_in_2020(self, journal_raw):
self.num_dois_in_2020 = 0
if journal_raw.dois_by_issued_year:
dois_tuple = json.loads(journal_raw.dois_by_issued_year)
for (year, num) in dois_tuple:
if year == 2020:
self.num_dois_in_2020 = num
def set_subscription_prices(self, journal_raw):
if journal_raw.subscription_pricing:
subscription_dict = json.loads(journal_raw.subscription_pricing)
for price_dict in subscription_dict["prices"]:
if price_dict["currency"] == "USD":
self.subscription_price_usd = float(price_dict["price"])
if price_dict["currency"] == "GBP":
self.subscription_price_gbp = float(price_dict["price"])
def set_apc_prices(self, journal_raw):
if journal_raw.apc_pricing:
apc_dict = json.loads(journal_raw.apc_pricing)
for price_dict in apc_dict["apc_prices"]:
if price_dict["currency"] == "USD":
self.apc_price_usd = float(price_dict["price"])
if price_dict["currency"] == "GBP":
self.apc_price_gbp = float(price_dict["price"])
def get_subscription_price(self, currency="USD", use_high_price_if_unknown=False):
response = None
if currency == "USD":
if self.subscription_price_usd:
response = float(self.subscription_price_usd)
elif currency == "GBP":
if self.subscription_price_gbp:
response = float(self.subscription_price_gbp)
if not response:
if use_high_price_if_unknown and currency == "GBP":
JISC_DEFAULT_PRICE_IN_GBP = JiscDefaultPrices[self.publisher_code].value
response = JISC_DEFAULT_PRICE_IN_GBP
return response
def get_apc_price(self, currency="USD"):
response = None
if currency == "USD":
if self.apc_price_usd:
response = float(self.apc_price_usd)
elif currency == "GBP":
if self.apc_price_gbp:
response = float(self.apc_price_gbp)
return response
def __repr__(self):
return "<{} ({}) '{}' {}>".format(self.__class__.__name__, self.issn_l, self.title, self.publisher)
def recompute_journal_metadata():
journals_raw = JournalsDBRaw.query.all()
print(len(journals_raw))
new_computed_journals = []
print("making backups and getting tables ready to run")
with get_db_cursor() as cursor:
cursor.execute("drop table journalsdb_raw_bak_yesterday;")
cursor.execute("drop table journalsdb_computed_bak_yesterday;")
cursor.execute("create table journalsdb_raw_bak_yesterday as (select * from journalsdb_raw);")
cursor.execute("create table journalsdb_computed_bak_yesterday as (select * from journalsdb_computed);")
# do it as its own to force commit
with get_db_cursor() as cursor:
# don't truncate raw! is populated by xplenty.
# further more truncate hangs, so do truncation this way instead
cursor.execute("delete from journalsdb_computed;")
print("tables ready for insertion")
for journal_raw in journals_raw:
new_journal_metadata = JournalMetadata(journal_raw)
new_computed_journals.append(new_journal_metadata)
print("starting commits")
start_time = time()
insert_values = [j.get_insert_list() for j in new_computed_journals]
cols = JournalMetadata.get_insert_column_names()
with get_db_cursor() as cursor:
qry = sql.SQL("INSERT INTO journalsdb_computed ({}) VALUES %s").format(
sql.SQL(', ').join(map(sql.Identifier, cols)))
execute_values(cursor, qry, insert_values, page_size=1000)
print("done committing journals, took {} seconds total".format(elapsed(start_time)))
print("now refreshing flat view")
with get_db_cursor() as cursor:
cursor.execute("refresh materialized view journalsdb_computed_flat;")
cursor.execute("analyze journalsdb_computed;")
print("done writing to db, took {} seconds total".format(elapsed(start_time)))
class MissingJournalMetadata(object):
def __init__(self, issn_l):
self.issn_l = issn_l
print("in MissingJournalMetadata missing journal {} from journalsdb: https://api.journalsdb.org/journals/{}".format(issn_l, issn_l))
# r = requests.post("https://api.journalsdb.org/missing_journal", json={"issn": issn_l})
# if r.status_code == 200:
# print u"Error: in MissingJournalMetadata Response posting about missing journal {}: previously reported missing".format(issn_l)
# elif r.status_code == 201:
# print u"Error: in MissingJournalMetadata Response posting about missing journal {}: first time reported missing".format(issn_l)
# else:
# print u"Error: in MissingJournalMetadata Response posting about missing journal {}: {}".format(issn_l, r)
super(MissingJournalMetadata, self).__init__()
@cached_property
def display_issn_l(self):
return "issn:{}".format(self.issn_l)
@cached_property
def issns(self):
return [self.issn_l]
@cached_property
def is_hybrid(self):
return None
@cached_property
def display_issns(self):
return ",".join(self.issns)
@cached_property
def title(self):
return "Unrecognized Journal"
@cached_property
def publisher(self):
return "Unrecognized Journal"
def get_apc_price(self, currency):
return None
@cached_property
def get_subscription_price(self, currency, use_high_price_if_unknown=False):
return None
def get_journal_metadata(issn):
global all_journal_metadata_flat
my_journal_metadata = all_journal_metadata_flat.get(issn, None)
if not my_journal_metadata:
my_journal_metadata = MissingJournalMetadata(issn_l=issn)
return my_journal_metadata
def get_journal_metadata_issnl_only(issn_l):
global all_journal_metadata
my_journal_metadata = all_journal_metadata.get(issn_l, None)
if not my_journal_metadata:
my_journal_metadata = MissingJournalMetadata(issn_l=issn_l)
return my_journal_metadata
def get_journal_metadata_for_publisher(publisher):
lookup_journaldb_publisher = {
"SpringerNature": "Springer Nature",
"Sage": "SAGE",
"TaylorFrancis": "Taylor & Francis"
}
publisher_normalized = lookup_journaldb_publisher.get(publisher, publisher)
global all_journal_metadata
response = {}
for issn_l, journal_metadata in all_journal_metadata.items():
if journal_metadata.publisher == publisher_normalized:
response[issn_l] = journal_metadata
return response
def get_journal_metadata_for_publisher_currently_subscription(publisher):
my_journals = get_journal_metadata_for_publisher(publisher)
response = {}
for issn_l, journal_metadata in my_journals.items():
if journal_metadata.is_current_subscription_journal:
response[issn_l] = journal_metadata
return response
print("loading all journal metadata...", end=' ')
start_time = time()
all_journal_metadata_list = JournalMetadata.query.all()
[db.session.expunge(my_journal_metadata) for my_journal_metadata in all_journal_metadata_list]
all_journal_metadata = dict(list(zip([journal_object.issn_l for journal_object in all_journal_metadata_list], all_journal_metadata_list)))
all_journal_metadata_flat = {}
for issn_l, journal_metadata in all_journal_metadata.items():
for issn in journal_metadata.issns:
all_journal_metadata_flat[issn] = journal_metadata
print("loaded all journal metadata in {} seconds.".format(elapsed(start_time)))
# python journalsdb.py --recompute
# heroku run --size=performance-l python journalsdb.py --recompute -r heroku
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--recompute", help="Update journal metadata", action="store_true", default=False)
parsed_args = parser.parse_args()
if parsed_args.recompute:
recompute_journal_metadata()
| 37.861789 | 144 | 0.680839 |
9bcbd33fcaed5f2e1c5fac75f18a910da399b0ab | 2,739 | py | Python | pbpl/compton/extrude_vrml.py | ucla-pbpl/pbpl-compton | a5afcdffc778f61a4726d7c5a231af2bca466900 | [
"MIT"
] | 2 | 2019-09-24T23:52:58.000Z | 2020-06-03T20:59:33.000Z | pbpl/compton/extrude_vrml.py | ucla-pbpl/pbpl-compton | a5afcdffc778f61a4726d7c5a231af2bca466900 | [
"MIT"
] | null | null | null | pbpl/compton/extrude_vrml.py | ucla-pbpl/pbpl-compton | a5afcdffc778f61a4726d7c5a231af2bca466900 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import sys, os
import argparse
from argparse import RawDescriptionHelpFormatter
import numpy as np
import re as regex
def add_bool_arg(parser, name, default=False, help=None):
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument('--' + name, action='store_true', help=help)
group.add_argument('--no-' + name, action='store_false')
parser.set_defaults(**{name:default})
def get_parser():
parser = argparse.ArgumentParser(
formatter_class=RawDescriptionHelpFormatter,
description='Extrude VRML lines into 3D shapes.',
epilog='Example:\n' +
' > pbpl-compton-extrude-vrml g4_00.wrl\n\n' +
"Reads 'g4_00.wrl' and overwrites 'g4_00.wrl'")
parser.add_argument(
'--output', metavar='VRML', type=str, default=None,
help='Specify output filename (default overwrites input)')
parser.add_argument(
'--radius', metavar='FLOAT', type=float, default=1.0,
help='Cross section radius (default=1)')
parser.add_argument(
'--num-points', metavar='INT', type=int, default=8,
help='Number of points in circular cross section (default=8)')
add_bool_arg(
parser, 'name-solids', False,
'Use Geant4 volume names in VRML (default=no)')
parser.add_argument(
'input', metavar='INPUT', type=str,
help='Input filename (VRML format)')
return parser
def get_args():
parser = get_parser()
args = parser.parse_args()
if args.output == None:
args.output = os.path.splitext(args.input)[0] + '.h5'
return args
def extrude(vin, num_points, r0):
p = regex.compile(
r'geometry\s+IndexedLineSet\s*\{\s*coord\s*Coordinate\s*\{' +
r'\s*point\s*\[([^]]*)[^}]*\}[^}]*\}')
theta = np.linspace(0, 2*np.pi, num_points+1)
xy = np.array((r0*np.cos(theta), r0*np.sin(theta)))
cross = ''.join('{:.2f} {:.2f}, '.format(*xy) for xy in xy.T)
def repl(m):
result = 'geometry Extrusion {\nspine [\n' + m[1] + ']\n'
result += 'crossSection [' + cross + ']\n'
result += ' }\n'
return result
vout = p.sub(repl, vin)
return vout
def name_solids(vin):
p = regex.compile(
r'#---------- SOLID: (\S+)\s+Shape {')
def repl(m):
result = 'DEF ' + m.group(1) + ' Shape {'
return result
vout = p.sub(repl, vin)
return vout
def main():
args = get_args()
with open(args.input, 'r') as fin:
vin = fin.read()
vout = extrude(vin, args.num_points, args.radius)
if args.name_solids:
vout = name_solids(vout)
with open(args.output, 'w') as fout:
fout.write(vout)
if __name__ == '__main__':
sys.exit(main())
| 33.402439 | 70 | 0.608251 |
d623525629006fa2cc7549cadc49bdef29f0026e | 1,546 | py | Python | tests/integration_tests/test_suite.py | raineydavid/mindsdb | 4e2458825bad43440067376d1d747db646d07bf3 | [
"MIT"
] | null | null | null | tests/integration_tests/test_suite.py | raineydavid/mindsdb | 4e2458825bad43440067376d1d747db646d07bf3 | [
"MIT"
] | null | null | null | tests/integration_tests/test_suite.py | raineydavid/mindsdb | 4e2458825bad43440067376d1d747db646d07bf3 | [
"MIT"
] | null | null | null | from run_example import run_example
from generated_data_tests import *
import multiprocessing
import os
# Run the generated data tests
for backend in ['ludwig', 'lightwood']:
test_one_label_prediction_wo_strings(backend)
test_timeseries(backend)
test_multilabel_prediction(backend)
test_one_label_prediction(backend)
# Run the CI tests
os.system('cd ..; cd ci_tests; python3 full_test.py')
# Run the example datassts
datasets = [{
'name':'default_of_credit',
'sample':True,
'expect_accuracy_above':72
},{
'name':'imdb_movie_review',
'sample':False,
'expect_accuracy_above':83
},{
'name':'cifar_100',
'sample':True,
'expect_accuracy_above': 40 # For full dataset: 69
}]
for dataset in datasets:
dataset_name = dataset['name']
res = run_example(dataset_name, sample=dataset['sample'])
acc = res['accuracy']
ex_acc = dataset['expect_accuracy_above']
if acc < ex_acc:
print('\n\n\n============WARNING===============\n\n\n')
print(f'Expected an accuracy above {ex_acc} for dataset {dataset_name}.')
print(f'Got accuracy of {acc} instead.')
print('\n\n\n==================================\n\n\n')
else:
print('\n\n\n============SUCCESS===============\n\n\n')
print(f'Example dataset {dataset_name}, ran with success')
print(f'Got accuracy of {acc} !')
print('\n\n\n==================================\n\n\n')
#with multiprocessing.Pool(max(len(datasets),6)) as pool:
# pool.map(run_example,datasets)
| 28.62963 | 81 | 0.61837 |
18e32f821a1882d8d685d1602a851ce5092c6da5 | 14,260 | py | Python | mne/cuda.py | dgwakeman/mne-python | 3cc7a3f8456d78c828355f1860dd7e0297e59c73 | [
"BSD-3-Clause"
] | null | null | null | mne/cuda.py | dgwakeman/mne-python | 3cc7a3f8456d78c828355f1860dd7e0297e59c73 | [
"BSD-3-Clause"
] | null | null | null | mne/cuda.py | dgwakeman/mne-python | 3cc7a3f8456d78c828355f1860dd7e0297e59c73 | [
"BSD-3-Clause"
] | null | null | null | # Authors: Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
from scipy.fftpack import fft, ifft
from .utils import sizeof_fmt, logger, get_config
# Support CUDA for FFTs; requires scikits.cuda and pycuda
_cuda_capable = False
_multiply_inplace_c128 = _halve_c128 = _real_c128 = None
def get_cuda_memory():
"""Get the amount of free memory for CUDA operations
Returns
-------
memory : str
The amount of available memory as a human-readable string.
"""
if not _cuda_capable:
logger.warning('CUDA not enabled, returning zero for memory')
mem = 0
else:
from pycuda.driver import mem_get_info
mem = mem_get_info()[0]
return sizeof_fmt(mem)
def init_cuda(ignore_config=False):
"""Initialize CUDA functionality
This function attempts to load the necessary interfaces
(hardware connectivity) to run CUDA-based filtering. This
function should only need to be run once per session.
If the config var (set via mne.set_config or in ENV)
MNE_USE_CUDA == 'true', this function will be executed when
the first CUDA setup is performed. If this variable is not
set, this function can be manually executed.
"""
global _cuda_capable, _multiply_inplace_c128, _halve_c128, _real_c128
if _cuda_capable:
return
if not ignore_config and (get_config('MNE_USE_CUDA', 'false').lower() !=
'true'):
logger.info('CUDA not enabled in config, skipping initialization')
return
# Triage possible errors for informative messaging
_cuda_capable = False
try:
from pycuda import gpuarray, driver # noqa
from pycuda.elementwise import ElementwiseKernel
except ImportError:
logger.warning('module pycuda not found, CUDA not enabled')
return
try:
# Initialize CUDA; happens with importing autoinit
import pycuda.autoinit # noqa
except ImportError:
logger.warning('pycuda.autoinit could not be imported, likely '
'a hardware error, CUDA not enabled')
return
# Make sure scikits.cuda is installed
try:
from scikits.cuda import fft as cudafft
except ImportError:
logger.warning('module scikits.cuda not found, CUDA not '
'enabled')
return
# let's construct our own CUDA multiply in-place function
_multiply_inplace_c128 = ElementwiseKernel(
'pycuda::complex<double> *a, pycuda::complex<double> *b',
'b[i] *= a[i]', 'multiply_inplace')
_halve_c128 = ElementwiseKernel(
'pycuda::complex<double> *a', 'a[i] /= 2.0', 'halve_value')
_real_c128 = ElementwiseKernel(
'pycuda::complex<double> *a', 'a[i] = real(a[i])', 'real_value')
# Make sure we can use 64-bit FFTs
try:
cudafft.Plan(16, np.float64, np.complex128) # will get auto-GC'ed
except:
logger.warning('Device does not support 64-bit FFTs, '
'CUDA not enabled')
return
_cuda_capable = True
# Figure out limit for CUDA FFT calculations
logger.info('Enabling CUDA with %s available memory' % get_cuda_memory())
###############################################################################
# Repeated FFT multiplication
def setup_cuda_fft_multiply_repeated(n_jobs, h_fft):
"""Set up repeated CUDA FFT multiplication with a given filter
Parameters
----------
n_jobs : int | str
If n_jobs == 'cuda', the function will attempt to set up for CUDA
FFT multiplication.
h_fft : array
The filtering function that will be used repeatedly.
If n_jobs='cuda', this function will be shortened (since CUDA
assumes FFTs of real signals are half the length of the signal)
and turned into a gpuarray.
Returns
-------
n_jobs : int
Sets n_jobs = 1 if n_jobs == 'cuda' was passed in, otherwise
original n_jobs is passed.
cuda_dict : dict
Dictionary with the following CUDA-related variables:
use_cuda : bool
Whether CUDA should be used.
fft_plan : instance of FFTPlan
FFT plan to use in calculating the FFT.
ifft_plan : instance of FFTPlan
FFT plan to use in calculating the IFFT.
x_fft : instance of gpuarray
Empty allocated GPU space for storing the result of the
frequency-domain multiplication.
x : instance of gpuarray
Empty allocated GPU space for the data to filter.
h_fft : array | instance of gpuarray
This will either be a gpuarray (if CUDA enabled) or np.ndarray.
If CUDA is enabled, h_fft will be modified appropriately for use
with filter.fft_multiply().
Notes
-----
This function is designed to be used with fft_multiply_repeated().
"""
cuda_dict = dict(use_cuda=False, fft_plan=None, ifft_plan=None,
x_fft=None, x=None)
n_fft = len(h_fft)
cuda_fft_len = int((n_fft - (n_fft % 2)) / 2 + 1)
if n_jobs == 'cuda':
n_jobs = 1
init_cuda()
if _cuda_capable:
from pycuda import gpuarray
from scikits.cuda import fft as cudafft
# set up all arrays necessary for CUDA
# try setting up for float64
try:
# do the IFFT normalization now so we don't have to later
h_fft = gpuarray.to_gpu(h_fft[:cuda_fft_len]
.astype('complex_') / len(h_fft))
cuda_dict.update(
use_cuda=True,
fft_plan=cudafft.Plan(n_fft, np.float64, np.complex128),
ifft_plan=cudafft.Plan(n_fft, np.complex128, np.float64),
x_fft=gpuarray.empty(cuda_fft_len, np.complex128),
x=gpuarray.empty(int(n_fft), np.float64))
logger.info('Using CUDA for FFT FIR filtering')
except Exception:
logger.info('CUDA not used, could not instantiate memory '
'(arrays may be too large), falling back to '
'n_jobs=1')
else:
logger.info('CUDA not used, CUDA could not be initialized, '
'falling back to n_jobs=1')
return n_jobs, cuda_dict, h_fft
def fft_multiply_repeated(h_fft, x, cuda_dict=dict(use_cuda=False)):
"""Do FFT multiplication by a filter function (possibly using CUDA)
Parameters
----------
h_fft : 1-d array or gpuarray
The filtering array to apply.
x : 1-d array
The array to filter.
cuda_dict : dict
Dictionary constructed using setup_cuda_multiply_repeated().
Returns
-------
x : 1-d array
Filtered version of x.
"""
if not cuda_dict['use_cuda']:
# do the fourier-domain operations
x = np.real(ifft(h_fft * fft(x), overwrite_x=True)).ravel()
else:
from scikits.cuda import fft as cudafft
# do the fourier-domain operations, results in second param
cuda_dict['x'].set(x.astype(np.float64))
cudafft.fft(cuda_dict['x'], cuda_dict['x_fft'], cuda_dict['fft_plan'])
_multiply_inplace_c128(h_fft, cuda_dict['x_fft'])
# If we wanted to do it locally instead of using our own kernel:
# cuda_seg_fft.set(cuda_seg_fft.get() * h_fft)
cudafft.ifft(cuda_dict['x_fft'], cuda_dict['x'],
cuda_dict['ifft_plan'], False)
x = np.array(cuda_dict['x'].get(), dtype=x.dtype, subok=True,
copy=False)
return x
###############################################################################
# FFT Resampling
def setup_cuda_fft_resample(n_jobs, W, new_len):
"""Set up CUDA FFT resampling
Parameters
----------
n_jobs : int | str
If n_jobs == 'cuda', the function will attempt to set up for CUDA
FFT resampling.
W : array
The filtering function to be used during resampling.
If n_jobs='cuda', this function will be shortened (since CUDA
assumes FFTs of real signals are half the length of the signal)
and turned into a gpuarray.
new_len : int
The size of the array following resampling.
Returns
-------
n_jobs : int
Sets n_jobs = 1 if n_jobs == 'cuda' was passed in, otherwise
original n_jobs is passed.
cuda_dict : dict
Dictionary with the following CUDA-related variables:
use_cuda : bool
Whether CUDA should be used.
fft_plan : instance of FFTPlan
FFT plan to use in calculating the FFT.
ifft_plan : instance of FFTPlan
FFT plan to use in calculating the IFFT.
x_fft : instance of gpuarray
Empty allocated GPU space for storing the result of the
frequency-domain multiplication.
x : instance of gpuarray
Empty allocated GPU space for the data to resample.
W : array | instance of gpuarray
This will either be a gpuarray (if CUDA enabled) or np.ndarray.
If CUDA is enabled, W will be modified appropriately for use
with filter.fft_multiply().
Notes
-----
This function is designed to be used with fft_resample().
"""
cuda_dict = dict(use_cuda=False, fft_plan=None, ifft_plan=None,
x_fft=None, x=None, y_fft=None, y=None)
n_fft_x, n_fft_y = len(W), new_len
cuda_fft_len_x = int((n_fft_x - (n_fft_x % 2)) // 2 + 1)
cuda_fft_len_y = int((n_fft_y - (n_fft_y % 2)) // 2 + 1)
if n_jobs == 'cuda':
n_jobs = 1
init_cuda()
if _cuda_capable:
# try setting up for float64
from pycuda import gpuarray
from scikits.cuda import fft as cudafft
try:
# do the IFFT normalization now so we don't have to later
W = gpuarray.to_gpu(W[:cuda_fft_len_x]
.astype('complex_') / n_fft_y)
cuda_dict.update(
use_cuda=True,
fft_plan=cudafft.Plan(n_fft_x, np.float64, np.complex128),
ifft_plan=cudafft.Plan(n_fft_y, np.complex128, np.float64),
x_fft=gpuarray.zeros(max(cuda_fft_len_x,
cuda_fft_len_y), np.complex128),
x=gpuarray.empty(max(int(n_fft_x),
int(n_fft_y)), np.float64))
logger.info('Using CUDA for FFT resampling')
except Exception:
logger.info('CUDA not used, could not instantiate memory '
'(arrays may be too large), falling back to '
'n_jobs=1')
else:
logger.info('CUDA not used, CUDA could not be initialized, '
'falling back to n_jobs=1')
return n_jobs, cuda_dict, W
def fft_resample(x, W, new_len, npad, to_remove,
cuda_dict=dict(use_cuda=False)):
"""Do FFT resampling with a filter function (possibly using CUDA)
Parameters
----------
x : 1-d array
The array to resample.
W : 1-d array or gpuarray
The filtering function to apply.
new_len : int
The size of the output array (before removing padding).
npad : int
Amount of padding to apply before resampling.
to_remove : int
Number of samples to remove after resampling.
cuda_dict : dict
Dictionary constructed using setup_cuda_multiply_repeated().
Returns
-------
x : 1-d array
Filtered version of x.
"""
# add some padding at beginning and end to make this work a little cleaner
x = _smart_pad(x, npad)
old_len = len(x)
shorter = new_len < old_len
if not cuda_dict['use_cuda']:
N = int(min(new_len, old_len))
sl_1 = slice((N + 1) // 2)
y_fft = np.zeros(new_len, np.complex128)
x_fft = fft(x).ravel() * W
y_fft[sl_1] = x_fft[sl_1]
sl_2 = slice(-(N - 1) // 2, None)
y_fft[sl_2] = x_fft[sl_2]
y = np.real(ifft(y_fft, overwrite_x=True)).ravel()
else:
from scikits.cuda import fft as cudafft
cuda_dict['x'].set(np.concatenate((x, np.zeros(max(new_len - old_len,
0), x.dtype))))
# do the fourier-domain operations, results put in second param
cudafft.fft(cuda_dict['x'], cuda_dict['x_fft'], cuda_dict['fft_plan'])
_multiply_inplace_c128(W, cuda_dict['x_fft'])
# This is not straightforward, but because x_fft and y_fft share
# the same data (and only one half of the full DFT is stored), we
# don't have to transfer the slice like we do in scipy. All we
# need to worry about is the Nyquist component, either halving it
# or taking just the real component...
use_len = new_len if shorter else old_len
func = _real_c128 if shorter else _halve_c128
if use_len % 2 == 0:
nyq = int((use_len - (use_len % 2)) // 2)
func(cuda_dict['x_fft'], slice=slice(nyq, nyq + 1))
cudafft.ifft(cuda_dict['x_fft'], cuda_dict['x'],
cuda_dict['ifft_plan'], scale=False)
y = cuda_dict['x'].get()[:new_len if shorter else None]
# now let's trim it back to the correct size (if there was padding)
if to_remove > 0:
keep = np.ones((new_len), dtype='bool')
keep[:to_remove] = False
keep[-to_remove:] = False
y = np.compress(keep, y)
return y
###############################################################################
# Misc
# this has to go in mne.cuda instead of mne.filter to avoid import errors
def _smart_pad(x, n_pad):
"""Pad vector x
"""
# need to pad with zeros if len(x) <= npad
z_pad = np.zeros(max(n_pad - len(x) + 1, 0), dtype=x.dtype)
return np.r_[z_pad, 2 * x[0] - x[n_pad:0:-1], x,
2 * x[-1] - x[-2:-n_pad - 2:-1], z_pad]
| 38.75 | 79 | 0.58878 |
9aa959a27bd1ff733e2993fdd530a3c8b4d58b95 | 179 | py | Python | tools.py | cclai999/pyxl-stock | 3c0bb2f3e17f88770d16e9cb7171d56757a451b4 | [
"MIT"
] | null | null | null | tools.py | cclai999/pyxl-stock | 3c0bb2f3e17f88770d16e9cb7171d56757a451b4 | [
"MIT"
] | null | null | null | tools.py | cclai999/pyxl-stock | 3c0bb2f3e17f88770d16e9cb7171d56757a451b4 | [
"MIT"
] | 1 | 2020-12-27T01:55:19.000Z | 2020-12-27T01:55:19.000Z | import requests
def get_html_to_file(url:str, fname: str):
resp = requests.get(url)
resp.raise_for_status()
f = open(fname, "w")
f.write(resp.text)
f.close() | 19.888889 | 42 | 0.648045 |
e78eb5498fbaab2ee69f1fa96924cffb24117d06 | 658 | py | Python | CodeWars/reflection.py | SabariVig/python-programs | 682f11df8157d4a070c294eded341f86845e38a8 | [
"MIT"
] | null | null | null | CodeWars/reflection.py | SabariVig/python-programs | 682f11df8157d4a070c294eded341f86845e38a8 | [
"MIT"
] | 1 | 2018-07-11T10:49:29.000Z | 2018-07-11T10:51:25.000Z | CodeWars/reflection.py | SabariVig/python-programs | 682f11df8157d4a070c294eded341f86845e38a8 | [
"MIT"
] | null | null | null | ################## CODE WARS KATA ##################
#KYU LEVEL : 6
#KATA TITLE : Framed Reflection
#KATA LINK : https://www.codewars.com/kata/framed-reflection/train/python
#####################################################
def mirror(text):
q=[]
text=text.split()
q.append("*"*(len(text[0])+4))
for i in text:
q.append(("* "+i[::-1]+" *"))
q.append("*"*(len(text[0])+4))
return("\n".join(q))
################## TEST CASES ##################
# mirror("Hello World")
print(mirror("Hello World"), "*********\n* olleH *\n* dlroW *\n*********");
# print(mirror("Codewars"), "************\n* srawedoC *\n************");
| 24.37037 | 75 | 0.428571 |
c8ca6dc58b440770b67528e46f34aceb958aa7f3 | 540 | py | Python | ch6/exercises/many_users.py | hewittaj/python_crash_course | 52a3341eec79c2eb6c7f9f1cb7f0806c3b2d61aa | [
"MIT"
] | null | null | null | ch6/exercises/many_users.py | hewittaj/python_crash_course | 52a3341eec79c2eb6c7f9f1cb7f0806c3b2d61aa | [
"MIT"
] | null | null | null | ch6/exercises/many_users.py | hewittaj/python_crash_course | 52a3341eec79c2eb6c7f9f1cb7f0806c3b2d61aa | [
"MIT"
] | null | null | null | users = {
'aeinstein': {
'first': 'albert',
'last': 'einstein',
'location': 'princeton',
},
'mcurie': {
'first': 'marie',
'last': 'curie',
'location': 'paris',
}
}
for username, user_info in users.items():
print(f"\nUsername: {username}")
full_name = f"{user_info['first']} {user_info['last']}"
location = user_info['location']
print(f"\tFull name: {full_name.title()}")
print(f"\tLocation: {location.title()}") | 25.714286 | 59 | 0.492593 |
deb1a632f467ea3e0f90c69af4b0cb0e28670f51 | 10,497 | py | Python | datasets/static_detmot.py | anonymous4669/MOTR | c60d8108f169668e65d59e833d0052ba043c2a4d | [
"MIT"
] | null | null | null | datasets/static_detmot.py | anonymous4669/MOTR | c60d8108f169668e65d59e833d0052ba043c2a4d | [
"MIT"
] | null | null | null | datasets/static_detmot.py | anonymous4669/MOTR | c60d8108f169668e65d59e833d0052ba043c2a4d | [
"MIT"
] | null | null | null | # ------------------------------------------------------------------------
# Copyright (c) 2021 4669 (for eccv submission only). All Rights Reserved.
# ------------------------------------------------------------------------
# Modified from Deformable DETR (https://github.com/fundamentalvision/Deformable-DETR)
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# ------------------------------------------------------------------------
# Modified from DETR (https://github.com/facebookresearch/detr)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# ------------------------------------------------------------------------
"""
MOT dataset which returns image_id for evaluation.
"""
from pathlib import Path
import cv2
import numpy as np
import torch
import torch.utils.data
import os.path as osp
from PIL import Image, ImageDraw
import copy
import datasets.transforms as T
from models.structures import Instances
class DetMOTDetection:
def __init__(self, args, data_txt_path: str, seqs_folder, transforms):
self.args = args
self._transforms = transforms
self.num_frames_per_batch = max(args.sampler_lengths)
self.sample_mode = args.sample_mode
self.sample_interval = args.sample_interval
self.vis = args.vis
self.video_dict = {}
with open(data_txt_path, 'r') as file:
self.img_files = file.readlines()
self.img_files = [osp.join(seqs_folder, x.strip()) for x in self.img_files]
self.img_files = list(filter(lambda x: len(x) > 0, self.img_files))
self.label_files = [(x.replace('images', 'labels_with_ids').replace('.png', '.txt').replace('.jpg', '.txt'))
for x in self.img_files]
# The number of images per sample: 1 + (num_frames - 1) * interval.
# The number of valid samples: num_images - num_image_per_sample + 1.
self.item_num = len(self.img_files) - (self.num_frames_per_batch - 1) * self.sample_interval
self._register_videos()
# video sampler.
self.sampler_steps: list = args.sampler_steps
self.lengths: list = args.sampler_lengths
print("sampler_steps={} lenghts={}".format(self.sampler_steps, self.lengths))
if self.sampler_steps is not None and len(self.sampler_steps) > 0:
# Enable sampling length adjustment.
assert len(self.lengths) > 0
assert len(self.lengths) == len(self.sampler_steps) + 1
for i in range(len(self.sampler_steps) - 1):
assert self.sampler_steps[i] < self.sampler_steps[i + 1]
self.item_num = len(self.img_files) - (self.lengths[-1] - 1) * self.sample_interval
self.period_idx = 0
self.num_frames_per_batch = self.lengths[0]
self.current_epoch = 0
def _register_videos(self):
for label_name in self.label_files:
video_name = '/'.join(label_name.split('/')[:-1])
if video_name not in self.video_dict:
print("register {}-th video: {} ".format(len(self.video_dict) + 1, video_name))
self.video_dict[video_name] = len(self.video_dict)
assert len(self.video_dict) <= 300
def set_epoch(self, epoch):
self.current_epoch = epoch
if self.sampler_steps is None or len(self.sampler_steps) == 0:
# fixed sampling length.
return
for i in range(len(self.sampler_steps)):
if epoch >= self.sampler_steps[i]:
self.period_idx = i + 1
print("set epoch: epoch {} period_idx={}".format(epoch, self.period_idx))
self.num_frames_per_batch = self.lengths[self.period_idx]
def step_epoch(self):
# one epoch finishes.
print("Dataset: epoch {} finishes".format(self.current_epoch))
self.set_epoch(self.current_epoch + 1)
@staticmethod
def _targets_to_instances(targets: dict, img_shape) -> Instances:
gt_instances = Instances(tuple(img_shape))
gt_instances.boxes = targets['boxes']
gt_instances.labels = targets['labels']
gt_instances.obj_ids = targets['obj_ids']
gt_instances.area = targets['area']
return gt_instances
def _pre_single_frame(self, idx: int):
img_path = self.img_files[idx].replace('.jpg', '.png')
label_path = self.label_files[idx]
img = Image.open(img_path)
targets = {}
w, h = img._size
assert w > 0 and h > 0, "invalid image {} with shape {} {}".format(img_path, w, h)
if osp.isfile(label_path):
labels0 = np.loadtxt(label_path, dtype=np.float32).reshape(-1, 6)
# normalized cewh to pixel xyxy format
labels = labels0.copy()
labels[:, 2] = w * (labels0[:, 2] - labels0[:, 4] / 2)
labels[:, 3] = h * (labels0[:, 3] - labels0[:, 5] / 2)
labels[:, 4] = w * (labels0[:, 2] + labels0[:, 4] / 2)
labels[:, 5] = h * (labels0[:, 3] + labels0[:, 5] / 2)
else:
raise ValueError('invalid label path: {}'.format(label_path))
video_name = '/'.join(label_path.split('/')[:-1])
obj_idx_offset = self.video_dict[video_name] * 100000 # 100000 unique ids is enough for a video.
targets['boxes'] = []
targets['area'] = []
targets['iscrowd'] = []
targets['labels'] = []
targets['obj_ids'] = []
targets['image_id'] = torch.as_tensor(idx)
targets['size'] = torch.as_tensor([h, w])
targets['orig_size'] = torch.as_tensor([h, w])
for label in labels:
targets['boxes'].append(label[2:6].tolist())
targets['area'].append(label[4] * label[5])
targets['iscrowd'].append(0)
targets['labels'].append(0)
obj_id = label[1] + obj_idx_offset if label[1] >= 0 else label[1]
targets['obj_ids'].append(obj_id) # relative id
targets['area'] = torch.as_tensor(targets['area'])
targets['iscrowd'] = torch.as_tensor(targets['iscrowd'])
targets['labels'] = torch.as_tensor(targets['labels'])
targets['obj_ids'] = torch.as_tensor(targets['obj_ids'])
targets['boxes'] = torch.as_tensor(targets['boxes'], dtype=torch.float32).reshape(-1, 4)
targets['boxes'][:, 0::2].clamp_(min=0, max=w)
targets['boxes'][:, 1::2].clamp_(min=0, max=h)
return img, targets
def _get_sample_range(self, start_idx):
# take default sampling method for normal dataset.
assert self.sample_mode in ['fixed_interval', 'random_interval'], 'invalid sample mode: {}'.format(self.sample_mode)
if self.sample_mode == 'fixed_interval':
sample_interval = self.sample_interval
elif self.sample_mode == 'random_interval':
sample_interval = np.random.randint(1, self.sample_interval + 1)
default_range = start_idx, start_idx + (self.num_frames_per_batch - 1) * sample_interval + 1, sample_interval
return default_range
def pre_continuous_frames(self, idx):
targets = []
images = []
# repeat current img and target for twice
for i in range(self.num_frames_per_batch):
img_i, targets_i = self._pre_single_frame(idx)
images.append(img_i)
targets.append(targets_i)
return images, targets
def __getitem__(self, idx):
images, targets = self.pre_continuous_frames(idx)
data = {}
if self._transforms is not None:
images, targets = self._transforms(images, targets)
gt_instances = []
for img_i, targets_i in zip(images, targets):
gt_instances_i = self._targets_to_instances(targets_i, img_i.shape[1:3])
gt_instances.append(gt_instances_i)
data.update({
'imgs': images,
'gt_instances': gt_instances,
})
if self.args.vis:
data['ori_img'] = [target_i['ori_img'] for target_i in targets]
return data
def __len__(self):
return self.item_num
class DetMOTDetectionValidation(DetMOTDetection):
def __init__(self, args, seqs_folder, transforms):
args.data_txt_path = args.val_data_txt_path
super().__init__(args, seqs_folder, transforms)
def make_detmot_transforms(image_set, args=None):
normalize = T.MotCompose([
T.MotToTensor(),
T.MotNormalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
scales = [608, 640, 672, 704, 736, 768, 800, 832, 864, 896, 928, 960, 992]
if image_set == 'train':
color_transforms = []
if args.cj:
print('Training with RandomColorJitter.')
color_transforms.append(T.MoTColorJitter(brightness=0.5, contrast=0.5, saturation=0.5, hue=0))
if not args.crop:
scale_transforms = [
T.MotRandomHorizontalFlip(),
T.FixedMotRandomShift(bs=1),
T.MotRandomResize(scales, max_size=1536),
normalize,
]
else:
print('Training with RandomCrop.')
scale_transforms = [
T.MotRandomHorizontalFlip(),
T.FixedMotRandomShift(bs=1),
T.MotRandomSelect(
T.MotRandomResize(scales, max_size=1536),
T.MotCompose([
T.MotRandomResize([400, 500, 600]),
T.FixedMotRandomCrop(384, 600),
T.MotRandomResize(scales, max_size=1536),
])
),
normalize,
]
return T.MotCompose(color_transforms + scale_transforms)
if image_set == 'val':
return T.MotCompose([
T.MotRandomResize([800], max_size=1536),
normalize,
])
raise ValueError(f'unknown {image_set}')
def build(image_set, args):
root = Path(args.mot_path)
assert root.exists(), f'provided MOT path {root} does not exist'
transforms = make_detmot_transforms(image_set, args)
if image_set == 'train':
data_txt_path = args.data_txt_path_train
dataset = DetMOTDetection(args, data_txt_path=data_txt_path, seqs_folder=root, transforms=transforms)
if image_set == 'val':
data_txt_path = args.data_txt_path_val
dataset = DetMOTDetection(args, data_txt_path=data_txt_path, seqs_folder=root, transforms=transforms)
return dataset
| 41.988 | 124 | 0.592836 |
c59e3bc41c902f15b9f3348bd84e333cef7df67b | 1,458 | py | Python | 8a/app/main/views.py | fredsonchaves07/flask-course | 4e9a3a94c3c49595c1d810794ba7533499811b58 | [
"MIT"
] | 1 | 2021-01-05T01:29:22.000Z | 2021-01-05T01:29:22.000Z | 8a/app/main/views.py | fredsonchaves07/flask-course | 4e9a3a94c3c49595c1d810794ba7533499811b58 | [
"MIT"
] | null | null | null | 8a/app/main/views.py | fredsonchaves07/flask-course | 4e9a3a94c3c49595c1d810794ba7533499811b58 | [
"MIT"
] | null | null | null | from datetime import datetime
from flask import render_template, session, redirect, url_for, current_app
from . import main
from .forms import NameForm
from .. import db
from ..models import User
@main.route('/', methods=['GET', 'POST'])
def index():
form = NameForm()
if form.validate_on_submit():
user = User.query.filter_by(name=form.name.data).first()
if user is None:
user = User(name=form.name.data)
db.session.add(user)
db.session.commit()
session['known'] = False
if current_app.config['FLASK_ADMIN']:
send_email(
app.config['FLASK_ADMIN'],
'New User',
'mail/new_user',
user=user
)
else:
session['known'] = True
session['name'] = form.name.data
form.name.data = ''
return redirect(url_for('main.index'))
return render_template(
'index.html',
form=form,
name=session.get('name'),
known=session.get('known', False)
)
return render_template('index.html',
current_time=datetime.utcnow(),
form=form,
name=session.get('name'))
@main.route('/user/<name>')
def user(name):
return render_template('user.html', name=name)
| 28.588235 | 74 | 0.515775 |
309386312de56b30b7b1d3b06ce13e47c99e3b13 | 5,476 | py | Python | weaviate/gql/aggregate.py | ooxoo-bv/weaviate-python-client | f646a5c16b1c0cc7940b3ffa17a71efb6e96063a | [
"BSD-3-Clause"
] | 14 | 2019-11-04T14:18:21.000Z | 2022-03-31T09:11:51.000Z | weaviate/gql/aggregate.py | ooxoo-bv/weaviate-python-client | f646a5c16b1c0cc7940b3ffa17a71efb6e96063a | [
"BSD-3-Clause"
] | 91 | 2019-11-04T11:26:42.000Z | 2022-03-22T10:22:44.000Z | weaviate/gql/aggregate.py | ooxoo-bv/weaviate-python-client | f646a5c16b1c0cc7940b3ffa17a71efb6e96063a | [
"BSD-3-Clause"
] | 7 | 2021-05-14T14:53:42.000Z | 2022-03-31T15:09:55.000Z | """
GraphQL `Aggregate` command.
"""
import json
from typing import List, Optional
from weaviate.connect import Connection
from weaviate.util import _capitalize_first_letter
from .filter import Where, GraphQL
class AggregateBuilder(GraphQL):
"""
AggregateBuilder class used to aggregate Weaviate objects.
"""
def __init__(self, class_name: str, connection: Connection):
"""
Initialize a AggregateBuilder class instance.
Parameters
----------
class_name : str
Class name of the objects to be aggregated.
connection : weaviate.connect.Connection
Connection object to an active and running Weaviate instance.
"""
super().__init__(connection)
self._class_name = _capitalize_first_letter(class_name)
self._with_meta_count = False
self._fields: List[str] = []
self._where: Optional[Where] = None
self._group_by_properties: Optional[List[str]] = None
self._uses_filter = False
def with_meta_count(self) -> 'AggregateBuilder':
"""
Set Meta Count to True.
Returns
-------
weaviate.gql.aggregate.AggregateBuilder
Updated AggregateBuilder.
"""
self._with_meta_count = True
return self
def with_fields(self, field: str) -> 'AggregateBuilder':
"""
Include a field in the aggregate query.
Parameters
----------
field : str
Field to include in the aggregate query.
e.g. '<property_name> { count }'
Returns
-------
weaviate.gql.aggregate.AggregateBuilder
Updated AggregateBuilder.
"""
self._fields.append(field)
return self
def with_where(self, content: dict) -> 'AggregateBuilder':
"""
Set 'where' filter.
Parameters
----------
content : dict
The where filter to include in the aggregate query. See examples below.
Examples
--------
The `content` prototype is like this:
>>> content = {
... 'operator': '<operator>',
... 'operands': [
... {
... 'path': [path],
... 'operator': '<operator>'
... '<valueType>': <value>
... },
... {
... 'path': [<matchPath>],
... 'operator': '<operator>',
... '<valueType>': <value>
... }
... ]
... }
This is a complete `where` filter but it does not have to be like this all the time.
Single operand:
>>> content = {
... 'path': ["wordCount"], # Path to the property that should be used
... 'operator': 'GreaterThan', # operator
... 'valueInt': 1000 # value (which is always = to the type of the path property)
... }
Or
>>> content = {
... 'path': ["id"],
... 'operator': 'Equal',
... 'valueString': "e5dc4a4c-ef0f-3aed-89a3-a73435c6bbcf"
... }
Multiple operands:
>>> content = {
... 'operator': 'And',
... 'operands': [
... {
... 'path': ["wordCount"],
... 'operator': 'GreaterThan',
... 'valueInt': 1000
... },
... {
... 'path': ["wordCount"],
... 'operator': 'LessThan',
... 'valueInt': 1500
... }
... ]
... }
Returns
-------
weaviate.gql.aggregate.AggregateBuilder
Updated AggregateBuilder.
"""
self._where = Where(content)
self._uses_filter = True
return self
def with_group_by_filter(self, properties: List[str]) -> 'AggregateBuilder':
"""
Add a group by filter to the query. Might requires the user to set
an additional group by clause using `with_fields(..)`.
Parameters
----------
properties : list of str
list of properties that are included in the group by filter.
Generates a filter like: 'groupBy: ["property1", "property2"]'
from a list ["property1", "property2"]
Returns
-------
weaviate.gql.aggregate.AggregateBuilder
Updated AggregateBuilder.
"""
self._group_by_properties = properties
self._uses_filter = True
return self
def build(self) -> str:
"""
Build the query and return the string.
Returns
-------
str
The GraphQL query as a string.
"""
# Path
query = f"{{Aggregate{{{self._class_name}"
# Filter
if self._uses_filter:
query += "("
if self._where is not None:
query += str(self._where)
if self._group_by_properties is not None:
query += f"groupBy: {json.dumps(self._group_by_properties)}"
if self._uses_filter:
query += ")"
# Body
query += "{"
if self._with_meta_count:
query += "meta{count}"
for field in self._fields:
query += field
# close
query += "}}}"
return query
| 27.796954 | 99 | 0.496348 |
f8668f4c0e56e2ec28528994aad4910ff6b21ad0 | 14,333 | py | Python | tfx/orchestration/portable/mlmd/execution_lib.py | dhruvesh09/tfx | dc99696cc87752a4e79d4aff85d42f93e06943f1 | [
"Apache-2.0"
] | 1 | 2019-10-02T18:03:55.000Z | 2019-10-02T18:03:55.000Z | tfx/orchestration/portable/mlmd/execution_lib.py | dhruvesh09/tfx | dc99696cc87752a4e79d4aff85d42f93e06943f1 | [
"Apache-2.0"
] | null | null | null | tfx/orchestration/portable/mlmd/execution_lib.py | dhruvesh09/tfx | dc99696cc87752a4e79d4aff85d42f93e06943f1 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Portable libraries for execution related APIs."""
import collections
import itertools
import re
from typing import Dict, Iterable, List, Mapping, Optional, Sequence, Set, Tuple
from absl import logging
from tfx import types
from tfx.orchestration import data_types_utils
from tfx.orchestration import metadata
from tfx.orchestration.portable.mlmd import common_utils
from tfx.orchestration.portable.mlmd import event_lib
from tfx.proto.orchestration import execution_result_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.types import artifact_utils
from tfx.utils import proto_utils
from tfx.utils import typing_utils
from google.protobuf import json_format
from ml_metadata.proto import metadata_store_pb2
_EXECUTION_RESULT = '__execution_result__'
_PROPERTY_SCHEMA_PREFIX = '__schema__'
_PROPERTY_SCHEMA_SUFFIX = '__'
def is_execution_successful(execution: metadata_store_pb2.Execution) -> bool:
"""Whether or not an execution is successful.
Args:
execution: An execution message.
Returns:
A bool value indicating whether or not the execution is successful.
"""
return (execution.last_known_state == metadata_store_pb2.Execution.COMPLETE or
execution.last_known_state == metadata_store_pb2.Execution.CACHED)
def is_execution_active(execution: metadata_store_pb2.Execution) -> bool:
"""Returns `True` if an execution is active.
Args:
execution: An execution message.
Returns:
A bool value indicating whether or not the execution is active.
"""
return (execution.last_known_state == metadata_store_pb2.Execution.NEW or
execution.last_known_state == metadata_store_pb2.Execution.RUNNING)
def is_schema_key(key: str) -> bool:
"""Returns `True` if the input key corresponds to a schema stored in execution property."""
return re.fullmatch(r'^__schema__.*__$', key) is not None
def get_schema_key(key: str) -> str:
"""Returns key for storing execution property schema."""
return _PROPERTY_SCHEMA_PREFIX + key + _PROPERTY_SCHEMA_SUFFIX
def sort_executions_newest_to_oldest(
executions: Iterable[metadata_store_pb2.Execution]
) -> List[metadata_store_pb2.Execution]:
"""Returns MLMD executions in sorted order, newest to oldest.
Args:
executions: An iterable of MLMD executions.
Returns:
Executions sorted newest to oldest (based on MLMD execution creation time).
"""
return sorted(
executions, key=lambda e: e.create_time_since_epoch, reverse=True)
def prepare_execution(
metadata_handler: metadata.Metadata,
execution_type: metadata_store_pb2.ExecutionType,
state: metadata_store_pb2.Execution.State,
exec_properties: Optional[Mapping[str, types.ExecPropertyTypes]] = None,
) -> metadata_store_pb2.Execution:
"""Creates an execution proto based on the information provided.
Args:
metadata_handler: A handler to access MLMD store.
execution_type: A metadata_pb2.ExecutionType message describing the type of
the execution.
state: The state of the execution.
exec_properties: Execution properties that need to be attached.
Returns:
A metadata_store_pb2.Execution message.
"""
execution = metadata_store_pb2.Execution()
execution.last_known_state = state
execution.type_id = common_utils.register_type_if_not_exist(
metadata_handler, execution_type).id
exec_properties = exec_properties or {}
# For every execution property, put it in execution.properties if its key is
# in execution type schema. Otherwise, put it in execution.custom_properties.
for k, v in exec_properties.items():
value = pipeline_pb2.Value()
value = data_types_utils.set_parameter_value(value, v)
if value.HasField('schema'):
# Stores schema in custom_properties for non-primitive types to allow
# parsing in later stages.
data_types_utils.set_metadata_value(
execution.custom_properties[get_schema_key(k)],
proto_utils.proto_to_json(value.schema))
if (execution_type.properties.get(k) ==
data_types_utils.get_metadata_value_type(v)):
execution.properties[k].CopyFrom(value.field_value)
else:
execution.custom_properties[k].CopyFrom(value.field_value)
logging.debug('Prepared EXECUTION:\n %s', execution)
return execution
def _create_artifact_and_event_pairs(
metadata_handler: metadata.Metadata,
artifact_dict: typing_utils.ArtifactMultiMap,
event_type: metadata_store_pb2.Event.Type,
) -> List[Tuple[metadata_store_pb2.Artifact, metadata_store_pb2.Event]]:
"""Creates a list of [Artifact, Event] tuples.
The result of this function will be used in a MLMD put_execution() call.
Args:
metadata_handler: A handler to access MLMD store.
artifact_dict: The source of artifacts to work on. For each artifact in the
dict, creates a tuple for that. Note that all artifacts of the same key in
the artifact_dict are expected to share the same artifact type.
event_type: The event type of the event to be attached to the artifact
Returns:
A list of [Artifact, Event] tuples
"""
result = []
for key, artifact_list in artifact_dict.items():
artifact_type = None
for index, artifact in enumerate(artifact_list):
# TODO(b/153904840): If artifact id is present, skip putting the artifact
# into the pair when MLMD API is ready.
event = event_lib.generate_event(
event_type=event_type, key=key, index=index)
# Reuses already registered type in the same list whenever possible as
# the artifacts in the same list share the same artifact type.
if artifact_type:
assert artifact_type.name == artifact.artifact_type.name, (
'Artifacts under the same key should share the same artifact type.')
artifact_type = common_utils.register_type_if_not_exist(
metadata_handler, artifact.artifact_type)
artifact.set_mlmd_artifact_type(artifact_type)
result.append((artifact.mlmd_artifact, event))
return result
def put_execution(
metadata_handler: metadata.Metadata,
execution: metadata_store_pb2.Execution,
contexts: Sequence[metadata_store_pb2.Context],
input_artifacts: Optional[typing_utils.ArtifactMultiMap] = None,
output_artifacts: Optional[typing_utils.ArtifactMultiMap] = None,
input_event_type: metadata_store_pb2.Event.Type = metadata_store_pb2.Event
.INPUT,
output_event_type: metadata_store_pb2.Event.Type = metadata_store_pb2.Event
.OUTPUT
) -> metadata_store_pb2.Execution:
"""Writes an execution-centric subgraph to MLMD.
This function mainly leverages metadata.put_execution() method to write the
execution centric subgraph to MLMD.
Args:
metadata_handler: A handler to access MLMD.
execution: The execution to be written to MLMD.
contexts: MLMD contexts to associated with the execution.
input_artifacts: Input artifacts of the execution. Each artifact will be
linked with the execution through an event with type input_event_type.
Each artifact will also be linked with every context in the `contexts`
argument.
output_artifacts: Output artifacts of the execution. Each artifact will be
linked with the execution through an event with type output_event_type.
Each artifact will also be linked with every context in the `contexts`
argument.
input_event_type: The type of the input event, default to be INPUT.
output_event_type: The type of the output event, default to be OUTPUT.
Returns:
An MLMD execution that is written to MLMD, with id pupulated.
"""
artifact_and_events = []
if input_artifacts:
artifact_and_events.extend(
_create_artifact_and_event_pairs(
metadata_handler=metadata_handler,
artifact_dict=input_artifacts,
event_type=input_event_type))
if output_artifacts:
artifact_and_events.extend(
_create_artifact_and_event_pairs(
metadata_handler=metadata_handler,
artifact_dict=output_artifacts,
event_type=output_event_type))
execution_id, artifact_ids, contexts_ids = (
metadata_handler.store.put_execution(
execution=execution,
artifact_and_events=artifact_and_events,
contexts=contexts,
reuse_context_if_already_exist=True))
execution.id = execution_id
for artifact_and_event, a_id in zip(artifact_and_events, artifact_ids):
artifact, _ = artifact_and_event
artifact.id = a_id
for context, c_id in zip(contexts, contexts_ids):
context.id = c_id
return execution
def get_executions_associated_with_all_contexts(
metadata_handler: metadata.Metadata,
contexts: Iterable[metadata_store_pb2.Context]
) -> List[metadata_store_pb2.Execution]:
"""Returns executions that are associated with all given contexts.
Args:
metadata_handler: A handler to access MLMD.
contexts: MLMD contexts for which to fetch associated executions.
Returns:
A list of executions associated with all given contexts.
"""
executions_dict = None
for context in contexts:
executions = metadata_handler.store.get_executions_by_context(context.id)
if executions_dict is None:
executions_dict = {e.id: e for e in executions}
else:
executions_dict = {e.id: e for e in executions if e.id in executions_dict}
return list(executions_dict.values()) if executions_dict else []
def get_artifact_ids_by_event_type_for_execution_id(
metadata_handler: metadata.Metadata,
execution_id: int) -> Dict['metadata_store_pb2.Event.Type', Set[int]]:
"""Returns artifact ids corresponding to the execution id grouped by event type.
Args:
metadata_handler: A handler to access MLMD.
execution_id: Id of the execution for which to get artifact ids.
Returns:
A `dict` mapping event type to `set` of artifact ids.
"""
events = metadata_handler.store.get_events_by_execution_ids([execution_id])
result = collections.defaultdict(set)
for event in events:
result[event.type].add(event.artifact_id)
return result
def get_artifacts_dict(
metadata_handler: metadata.Metadata, execution_id: int,
event_type: 'metadata_store_pb2.Event.Type'
) -> typing_utils.ArtifactMultiDict:
"""Returns a map from key to an ordered list of artifacts for the given execution id.
The dict is constructed purely from information stored in MLMD for the
execution given by `execution_id`. The "key" is the tag associated with the
`InputSpec` or `OutputSpec` in the pipeline IR.
Args:
metadata_handler: A handler to access MLMD.
execution_id: Id of the execution for which to get artifacts.
event_type: Event type to filter by.
Returns:
A dict mapping key to an ordered list of artifacts.
Raises:
ValueError: If the events are badly formed and correct ordering of
artifacts cannot be determined or if all the artifacts could not be
fetched from MLMD.
"""
events = metadata_handler.store.get_events_by_execution_ids([execution_id])
# Create a map from "key" to list of (index, artifact_id)s.
indexed_artifact_ids_dict = collections.defaultdict(list)
for event in events:
if event.type != event_type:
continue
key, index = event_lib.get_artifact_path(event)
artifact_id = event.artifact_id
indexed_artifact_ids_dict[key].append((index, artifact_id))
# Create a map from "key" to ordered list of artifact ids.
artifact_ids_dict = {}
for key, indexed_artifact_ids in indexed_artifact_ids_dict.items():
ordered_artifact_ids = sorted(indexed_artifact_ids, key=lambda x: x[0])
# There shouldn't be any missing or duplicate indices.
indices = [idx for idx, _ in ordered_artifact_ids]
if indices != list(range(0, len(indices))):
raise ValueError(
f'Cannot construct artifact ids dict due to missing or duplicate '
f'indices: {indexed_artifact_ids_dict}')
artifact_ids_dict[key] = [aid for _, aid in ordered_artifact_ids]
# Fetch all the relevant artifacts.
all_artifact_ids = list(itertools.chain(*artifact_ids_dict.values()))
mlmd_artifacts = metadata_handler.store.get_artifacts_by_id(all_artifact_ids)
if len(all_artifact_ids) != len(mlmd_artifacts):
raise ValueError('Could not find all mlmd artifacts for ids: {}'.format(
', '.join(all_artifact_ids)))
# Fetch artifact types and create a map keyed by artifact type id.
artifact_type_ids = set(a.type_id for a in mlmd_artifacts)
artifact_types = metadata_handler.store.get_artifact_types_by_id(
artifact_type_ids)
artifact_types_by_id = {a.id: a for a in artifact_types}
# Create a map from artifact id to `types.Artifact` instances.
artifacts_by_id = {
a.id: artifact_utils.deserialize_artifact(artifact_types_by_id[a.type_id],
a) for a in mlmd_artifacts
}
# Create a map from "key" to ordered list of `types.Artifact` to be returned.
# The ordering of artifacts is in accordance with their "index" derived from
# the events above.
result = collections.defaultdict(list)
for key, artifact_ids in artifact_ids_dict.items():
for artifact_id in artifact_ids:
result[key].append(artifacts_by_id[artifact_id])
return result
def set_execution_result(execution_result: execution_result_pb2.ExecutionResult,
execution: metadata_store_pb2.Execution):
"""Sets execution result as a custom property of execution.
Args:
execution_result: The result of execution. It is typically generated by
executor.
execution: The execution to set to.
"""
execution.custom_properties[_EXECUTION_RESULT].string_value = (
json_format.MessageToJson(execution_result))
| 38.842818 | 93 | 0.748901 |
dba37b157e26a9173b655af3a2974754b0f6c757 | 19,978 | py | Python | appenv.py | gocept/batou | 4d239996f464c406cde82c48155e5b8273a9063d | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | appenv.py | gocept/batou | 4d239996f464c406cde82c48155e5b8273a9063d | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | appenv.py | gocept/batou | 4d239996f464c406cde82c48155e5b8273a9063d | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | #!/usr/bin/env python3
# appenv - a single file 'application in venv bootstrapping and updating
# mechanism for python-based (CLI) applications
# Assumptions:
#
# - the appenv file is placed in a repo with the name of the application
# - the name of the application/file is an entrypoint XXX
# - python3.X+ with ensurepip
# - a requirements.txt file next to the appenv file
# TODO
#
# - provide a `clone` meta command to create a new project based on this one
# maybe use an entry point to allow further initialisation of the clone.
import argparse
import glob
import hashlib
import http.client
import os
import os.path
import shutil
import subprocess
import sys
import tempfile
import venv
def cmd(c, merge_stderr=True, quiet=False):
# TODO revisit the cmd() architecture w/ python 3
# XXX better IO management for interactive output and seeing original
# errors and output at appropriate places ...
try:
kwargs = {"shell": True}
if merge_stderr:
kwargs["stderr"] = subprocess.STDOUT
return subprocess.check_output([c], **kwargs)
except subprocess.CalledProcessError as e:
print("{} returned with exit code {}".format(c, e.returncode))
print(e.output.decode("ascii"))
raise ValueError(e.output.decode("ascii"))
def get(host, path, f):
conn = http.client.HTTPSConnection(host)
conn.request("GET", path)
r1 = conn.getresponse()
assert r1.status == 200, (r1.status, host, path, r1.read()[:100])
chunk = r1.read(16 * 1024)
while chunk:
f.write(chunk)
chunk = r1.read(16 * 1024)
conn.close()
def ensure_venv(target):
if os.path.exists(os.path.join(target, "bin", "pip3")):
# XXX Support probing the target whether it works properly and rebuild
# if necessary
return
if os.path.exists(target):
print("Deleting unclean target)")
cmd("rm -rf {target}".format(target=target))
version = sys.version.split()[0]
python_maj_min = ".".join(str(x) for x in sys.version_info[:2])
print("Creating venv ...")
venv.create(target, with_pip=False)
try:
# This is trying to detect whether we're on a proper Python stdlib
# or on a broken Debian. See various StackOverflow questions about
# this.
import distutils.util # noqa: F401 imported but unused
import ensurepip # noqa: F401 imported but unused
except ImportError:
# Okay, lets repair this, if we can. May need privilege escalation
# at some point.
# We could do: apt-get -y -q install python3-distutils python3-venv
# on some systems but it requires root and is specific to Debian.
# I decided to go a more sledge hammer route.
# XXX we can speed this up by storing this in ~/.appenv/overlay instead
# of doing the download for every venv we manage
print("Activating broken distutils/ensurepip stdlib workaround ...")
tmp_base = tempfile.mkdtemp()
try:
download = os.path.join(tmp_base, "download.tar.gz")
with open(download, mode="wb") as f:
get("www.python.org",
"/ftp/python/{v}/Python-{v}.tgz".format(v=version), f)
cmd("tar xf {} -C {}".format(download, tmp_base))
assert os.path.exists(
os.path.join(tmp_base, "Python-{}".format(version)))
for module in ["ensurepip", "distutils"]:
print(module)
shutil.copytree(
os.path.join(tmp_base, "Python-{}".format(version), "Lib",
module),
os.path.join(target, "lib",
"python{}.{}".format(*sys.version_info[:2]),
"site-packages", module))
# (always) prepend the site packages so we can actually have a
# fixed distutils installation.
site_packages = os.path.abspath(
os.path.join(target, "lib", "python" + python_maj_min,
"site-packages"))
with open(os.path.join(site_packages, "batou.pth"), "w") as f:
f.write("import sys; sys.path.insert(0, '{}')\n".format(
site_packages))
finally:
shutil.rmtree(tmp_base)
print("Ensuring pip ...")
cmd("{target}/bin/python -m ensurepip --default-pip".format(target=target))
cmd("{target}/bin/python -m pip install --upgrade pip".format(
target=target))
def ensure_minimal_python():
current_python = os.path.realpath(sys.executable)
preferences = None
if os.path.exists('requirements.txt'):
with open('requirements.txt') as f:
for line in f:
# Expected format:
# # appenv-python-preference: 3.1,3.9,3.4
if not line.startswith("# appenv-python-preference: "):
continue
preferences = line.split(':')[1]
preferences = [x.strip() for x in preferences.split(',')]
preferences = list(filter(None, preferences))
break
if not preferences:
# We have no preferences defined, use the current python.
print("Update lockfile with with {}.".format(current_python))
print("If you want to use a different version, set it via")
print(" `# appenv-python-preference:` in requirements.txt.")
return
preferences.sort(key=lambda s: [int(u) for u in s.split('.')])
for version in preferences[0:1]:
python = shutil.which("python{}".format(version))
if not python:
# not a usable python
continue
python = os.path.realpath(python)
if python == current_python:
# found a preferred python and we're already running as it
break
# Try whether this Python works
try:
subprocess.check_call([python, "-c", "print(1)"],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
except subprocess.CalledProcessError:
continue
argv = [os.path.basename(python)] + sys.argv
os.environ["APPENV_BEST_PYTHON"] = python
os.execv(python, argv)
else:
print("Could not find the minimal preferred Python version.")
print("To ensure a working requirements.lock on all Python versions")
print("make Python {} available on this system.".format(
preferences[0]))
sys.exit(66)
def ensure_best_python(base):
os.chdir(base)
if "APPENV_BEST_PYTHON" in os.environ:
# Don't do this twice to avoid being surprised with
# accidental infinite loops.
return
import shutil
# use newest Python available if nothing else is requested
preferences = ['3.{}'.format(x) for x in reversed(range(4, 20))]
if os.path.exists('requirements.txt'):
with open('requirements.txt') as f:
for line in f:
# Expected format:
# # appenv-python-preference: 3.1,3.9,3.4
if not line.startswith("# appenv-python-preference: "):
continue
preferences = line.split(':')[1]
preferences = [x.strip() for x in preferences.split(',')]
preferences = list(filter(None, preferences))
break
current_python = os.path.realpath(sys.executable)
for version in preferences:
python = shutil.which("python{}".format(version))
if not python:
# not a usable python
continue
python = os.path.realpath(python)
if python == current_python:
# found a preferred python and we're already running as it
break
# Try whether this Python works
try:
subprocess.check_call([python, "-c", "print(1)"],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
except subprocess.CalledProcessError:
continue
argv = [os.path.basename(python)] + sys.argv
os.environ["APPENV_BEST_PYTHON"] = python
os.execv(python, argv)
else:
print("Could not find a preferred Python version.")
print("Preferences: {}".format(', '.join(preferences)))
sys.exit(65)
class AppEnv(object):
base = None # The directory where we add the environments. Co-located
# with the application script - not necessarily the appenv
# script so we can link to an appenv script from multiple
# locations.
env_dir = None # The current specific venv that we're working with.
appenv_dir = None # The directory where to place specific venvs.
def __init__(self, base):
self.base = base
# This used to be computed based on the application name but
# as we can have multiple application names now, we always put the
# environments into '.appenv'. They're hashed anyway.
self.appenv_dir = os.path.join(self.base, '.appenv')
# Allow simplifying a lot of code by assuming that all the
# meta-operations happen in the base directory. Store the original
# working directory here so we switch back at the appropriate time.
self.original_cwd = os.path.abspath(os.curdir)
def meta(self):
# Parse the appenv arguments
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
p = subparsers.add_parser(
"update-lockfile", help="Update the lock file.")
p.set_defaults(func=self.update_lockfile)
p = subparsers.add_parser("init", help="Create a new appenv project.")
p.set_defaults(func=self.init)
p = subparsers.add_parser("reset", help="Reset the environment.")
p.set_defaults(func=self.reset)
p = subparsers.add_parser(
"python", help="Spawn the embedded Python interpreter REPL")
p.set_defaults(func=self.python)
p = subparsers.add_parser(
"run",
help="Run a script from the bin/ directory of the virtual env.")
p.add_argument("script", help="Name of the script to run.")
p.set_defaults(func=self.run_script)
args, remaining = parser.parse_known_args()
if not hasattr(args, 'func'):
parser.print_usage()
else:
args.func(args, remaining)
def run(self, command, argv):
self._prepare()
cmd = os.path.join(self.env_dir, 'bin', command)
argv = [cmd] + argv
os.environ['APPENV_BASEDIR'] = self.base
os.chdir(self.original_cwd)
os.execv(cmd, argv)
def _assert_requirements_lock(self):
if not os.path.exists('requirements.lock'):
print('No requirements.lock found. Generate it using'
' ./appenv update-lockfile')
sys.exit(67)
with open('requirements.lock') as f:
locked_hash = None
for line in f:
if line.startswith("# appenv-requirements-hash: "):
locked_hash = line.split(':')[1].strip()
break
if locked_hash != self._hash_requirements():
print('requirements.txt seems out of date (hash mismatch). '
'Regenerate using ./appenv update-lockfile')
sys.exit(67)
def _hash_requirements(self):
with open('requirements.txt', 'rb') as f:
hash_content = f.read()
return hashlib.new("sha256", hash_content).hexdigest()
def _prepare(self):
# copy used requirements.txt into the target directory so we can use
# that to check later
# - when to clean up old versions? keep like one or two old revisions?
# - enumerate the revisions and just copy the requirements.txt, check
# for ones that are clean or rebuild if necessary
os.chdir(self.base)
self._assert_requirements_lock()
hash_content = []
requirements = open("requirements.lock", "rb").read()
hash_content.append(os.fsencode(os.path.realpath(sys.executable)))
hash_content.append(requirements)
hash_content.append(open(__file__, "rb").read())
env_hash = hashlib.new("sha256",
b"".join(hash_content)).hexdigest()[:8]
env_dir = os.path.join(self.appenv_dir, env_hash)
whitelist = set([env_dir, os.path.join(self.appenv_dir, "unclean")])
for path in glob.glob(
"{appenv_dir}/*".format(appenv_dir=self.appenv_dir)):
if path not in whitelist:
print("Removing expired path: {path} ...".format(path=path))
if not os.path.isdir(path):
os.unlink(path)
else:
shutil.rmtree(path)
if os.path.exists(env_dir):
# check whether the existing environment is OK, it might be
# nice to rebuild in a separate place if necessary to avoid
# interruptions to running services, but that isn't what we're
# using it for at the moment
try:
if not os.path.exists(
"{env_dir}/appenv.ready".format(env_dir=env_dir)):
raise Exception()
except Exception:
print("Existing envdir not consistent, deleting")
cmd("rm -rf {env_dir}".format(env_dir=env_dir))
if not os.path.exists(env_dir):
ensure_venv(env_dir)
with open(os.path.join(env_dir, "requirements.lock"), "wb") as f:
f.write(requirements)
print("Installing ...")
cmd("{env_dir}/bin/python -m pip install --no-deps -r"
" {env_dir}/requirements.lock".format(env_dir=env_dir))
cmd("{env_dir}/bin/python -m pip check".format(env_dir=env_dir))
with open(os.path.join(env_dir, "appenv.ready"), "w") as f:
f.write("Ready or not, here I come, you can't hide\n")
self.env_dir = env_dir
def init(self, args=None, remaining=None):
print("Let's create a new appenv project.\n")
command = None
while not command:
command = input("What should the command be named? ").strip()
dependency = input(
"What is the main dependency as found on PyPI? [{}] ".format(
command)).strip()
if not dependency:
dependency = command
default_target = os.path.abspath(
os.path.join(self.original_cwd, command))
target = input("Where should we create this? [{}] ".format(
default_target)).strip()
if target:
target = os.path.join(self.original_cwd, target)
else:
target = default_target
target = os.path.abspath(target)
if not os.path.exists(target):
os.makedirs(target)
print()
print("Creating appenv setup in {} ...".format(target))
with open(__file__, "rb") as bootstrap_file:
bootstrap_data = bootstrap_file.read()
os.chdir(target)
with open('appenv', "wb") as new_appenv:
new_appenv.write(bootstrap_data)
os.chmod('appenv', 0o755)
if os.path.exists(command):
os.unlink(command)
os.symlink('appenv', command)
with open("requirements.txt", "w") as requirements_txt:
requirements_txt.write(dependency + "\n")
print()
print("Done. You can now `cd {}` and call"
" `./{}` to bootstrap and run it.".format(
os.path.relpath(target, self.original_cwd), command))
def python(self, args, remaining):
self.run('python', remaining)
def run_script(self, args, remaining):
self.run(args.script, remaining)
def reset(self, args=None, remaining=None):
print(
"Resetting ALL application environments in {appenvdir} ...".format(
appenvdir=self.appenv_dir))
cmd("rm -rf {appenvdir}".format(appenvdir=self.appenv_dir))
def update_lockfile(self, args=None, remaining=None):
ensure_minimal_python()
os.chdir(self.base)
print("Updating lockfile")
tmpdir = os.path.join(self.appenv_dir, "updatelock")
if os.path.exists(tmpdir):
cmd("rm -rf {tmpdir}".format(tmpdir=tmpdir))
ensure_venv(tmpdir)
print("Installing packages ...")
cmd("{tmpdir}/bin/python -m pip install -r requirements.txt".format(
tmpdir=tmpdir))
# Hack because we might not have pkg_resources, but the venv should
tmp_paths = cmd(
"{tmpdir}/bin/python -c"
" 'import sys; print(\"\\n\".join(sys.path))'".format(
tmpdir=tmpdir),
merge_stderr=False).decode(sys.getfilesystemencoding())
for line in tmp_paths.splitlines():
line = line.strip()
if not line:
continue
sys.path.append(line)
import pkg_resources
extra_specs = []
result = cmd(
"{tmpdir}/bin/python -m pip freeze".format(tmpdir=tmpdir),
merge_stderr=False).decode('ascii')
pinned_versions = {}
for line in result.splitlines():
if line.strip().startswith('-e '):
# We'd like to pick up the original -e statement here.
continue
spec = list(pkg_resources.parse_requirements(line))[0]
pinned_versions[spec.project_name] = spec
requested_versions = {}
with open('requirements.txt') as f:
for line in f.readlines():
if line.strip().startswith('-e '):
extra_specs.append(line.strip())
continue
# filter comments, in particular # appenv-python-preferences
if line.strip().startswith('#'):
continue
spec = list(pkg_resources.parse_requirements(line))[0]
requested_versions[spec.project_name] = spec
final_versions = {}
for spec in requested_versions.values():
# Pick versions with URLs to ensure we don't get the screwed up
# results from pip freeze.
if spec.url:
final_versions[spec.project_name] = spec
for spec in pinned_versions.values():
# Ignore versions we already picked
if spec.project_name in final_versions:
continue
final_versions[spec.project_name] = spec
lines = [str(spec) for spec in final_versions.values()]
lines.extend(extra_specs)
lines.sort()
with open(os.path.join(self.base, "requirements.lock"), "w") as f:
f.write('# appenv-requirements-hash: {}\n'.format(
self._hash_requirements()))
f.write('\n'.join(lines))
f.write('\n')
cmd("rm -rf {tmpdir}".format(tmpdir=tmpdir))
def main():
base = os.path.dirname(__file__)
ensure_best_python(base)
# clear PYTHONPATH variable to get a defined environment
# XXX this is a bit of history. not sure whether its still needed. keeping
# it for good measure
if "PYTHONPATH" in os.environ:
del os.environ["PYTHONPATH"]
# Determine whether we're being called as appenv or as an application name
application_name = os.path.splitext(os.path.basename(__file__))[0]
appenv = AppEnv(base)
try:
if application_name == 'appenv':
appenv.meta()
else:
appenv.run(application_name, sys.argv[1:])
finally:
os.chdir(appenv.original_cwd)
if __name__ == "__main__":
main()
| 38.717054 | 79 | 0.588147 |
4ce52645b34e30690290d320f8c60c591374fe8d | 749 | py | Python | my_site/my_site/urls.py | bobruk76/D3 | b720f97e70b257b447cec2a6526b7dce01788fed | [
"MIT"
] | null | null | null | my_site/my_site/urls.py | bobruk76/D3 | b720f97e70b257b447cec2a6526b7dce01788fed | [
"MIT"
] | 5 | 2021-03-30T13:41:55.000Z | 2021-09-22T19:15:53.000Z | my_site/my_site/urls.py | bobruk76/D3 | b720f97e70b257b447cec2a6526b7dce01788fed | [
"MIT"
] | null | null | null | """my_site URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| 34.045455 | 77 | 0.708945 |
b5bacc890b75efe5cb27c2944c105fcdd8835e33 | 5,115 | py | Python | lib/galaxy/managers/configuration.py | ramezrawas/galaxy-1 | c03748dd49c060a68d07bce56eae33e0ba154414 | [
"CC-BY-3.0"
] | 6 | 2018-11-03T22:43:35.000Z | 2022-02-15T17:51:33.000Z | lib/galaxy/managers/configuration.py | ramezrawas/galaxy-1 | c03748dd49c060a68d07bce56eae33e0ba154414 | [
"CC-BY-3.0"
] | 7 | 2016-12-07T22:19:37.000Z | 2019-01-30T15:04:26.000Z | lib/galaxy/managers/configuration.py | ramezrawas/galaxy-1 | c03748dd49c060a68d07bce56eae33e0ba154414 | [
"CC-BY-3.0"
] | 10 | 2017-04-10T21:40:22.000Z | 2022-02-21T16:50:10.000Z | """
Serializers for Galaxy config file data: ConfigSerializer for all users
and a more expanded set of data for admin in AdminConfigSerializer.
Used by both the API and bootstrapped data.
"""
# TODO: this is a bit of an odd duck. It uses the serializer structure from managers
# but doesn't have a model like them. It might be better in config.py or a
# totally new area, but I'm leaving it in managers for now for class consistency.
from galaxy.web.framework.base import server_starttime
from galaxy.managers import base
import logging
log = logging.getLogger( __name__ )
class ConfigSerializer( base.ModelSerializer ):
"""Configuration (galaxy.ini) settings viewable by all users"""
def __init__( self, app ):
super( ConfigSerializer, self ).__init__( app )
self.default_view = 'all'
self.add_view( 'all', self.serializers.keys() )
def default_serializer( self, config, key ):
return getattr( config, key, None )
def add_serializers( self ):
def _defaults_to( default ):
return lambda i, k, **c: getattr( i, k, default )
self.serializers = {
# TODO: this is available from user data, remove
'is_admin_user' : lambda *a, **c: False,
'brand' : _defaults_to( '' ),
# TODO: this doesn't seem right
'logo_url' : lambda i, k, **c: self.url_for( i.get( k, '/' ) ),
'logo_src' : lambda i, k, **c: self.url_for( '/static/images/galaxyIcon_noText.png' ),
'terms_url' : _defaults_to( '' ),
# TODO: don't hardcode here - hardcode defaults once in config.py
'wiki_url' : _defaults_to( "http://galaxyproject.org/" ),
'search_url' : _defaults_to( "http://galaxyproject.org/search/usegalaxy/" ),
'mailing_lists' : _defaults_to( "https://wiki.galaxyproject.org/MailingLists" ),
'screencasts_url' : _defaults_to( "https://vimeo.com/galaxyproject" ),
'citation_url' : _defaults_to( "https://wiki.galaxyproject.org/CitingGalaxy" ),
'support_url' : _defaults_to( "https://wiki.galaxyproject.org/Support" ),
'lims_doc_url' : _defaults_to( "https://usegalaxy.org/u/rkchak/p/sts" ),
'biostar_url' : _defaults_to( '' ),
'biostar_url_redirect' : lambda *a, **c: self.url_for( controller='biostar', action='biostar_redirect',
qualified=True ),
'communication_server_host' : _defaults_to( None ),
'communication_server_port' : _defaults_to( None ),
'persistent_communication_rooms' : _defaults_to( None ),
'allow_user_creation' : _defaults_to( False ),
'use_remote_user' : _defaults_to( None ),
'remote_user_logout_href' : _defaults_to( '' ),
'datatypes_disable_auto' : _defaults_to( False ),
'allow_user_dataset_purge' : _defaults_to( False ),
'ga_code' : _defaults_to( None ),
'enable_unique_workflow_defaults' : _defaults_to( False ),
# TODO: is there no 'correct' way to get an api url? controller='api', action='tools' is a hack
# at any rate: the following works with path_prefix but is still brittle
# TODO: change this to (more generic) upload_path and incorporate config.nginx_upload_path into building it
'nginx_upload_path' : lambda i, k, **c: getattr( i, k, False ) or self.url_for( '/api/tools' ),
'ftp_upload_dir' : _defaults_to( None ),
'ftp_upload_site' : _defaults_to( None ),
'version_major' : _defaults_to( None ),
'require_login' : _defaults_to( None ),
'inactivity_box_content' : _defaults_to( None ),
'message_box_content' : _defaults_to( None ),
'message_box_visible' : _defaults_to( False ),
'message_box_class' : _defaults_to( 'info' ),
'server_startttime' : lambda i, k, **c: server_starttime,
}
class AdminConfigSerializer( ConfigSerializer ):
"""Configuration attributes viewable only by admin users"""
def add_serializers( self ):
super( AdminConfigSerializer, self ).add_serializers()
def _defaults_to( default ):
return lambda i, k, **c: getattr( i, k, default )
self.serializers.update({
# TODO: this is available from user serialization: remove
'is_admin_user' : lambda *a: True,
'library_import_dir' : _defaults_to( None ),
'user_library_import_dir' : _defaults_to( None ),
'allow_library_path_paste' : _defaults_to( False ),
'allow_user_deletion' : _defaults_to( False ),
})
| 50.643564 | 120 | 0.579863 |
d168308ca3b3756d79f49c1e0daf99e7f01a6e5c | 2,744 | py | Python | test/unitTests/nodeTests/testSignalNode.py | pieter-hendriks/STL-monitoring | 114b73b1f4b0687b11b8842b3c4a1c8af7b0d9df | [
"MIT"
] | null | null | null | test/unitTests/nodeTests/testSignalNode.py | pieter-hendriks/STL-monitoring | 114b73b1f4b0687b11b8842b3c4a1c8af7b0d9df | [
"MIT"
] | null | null | null | test/unitTests/nodeTests/testSignalNode.py | pieter-hendriks/STL-monitoring | 114b73b1f4b0687b11b8842b3c4a1c8af7b0d9df | [
"MIT"
] | null | null | null | import unittest
from stl.signals import SignalList, Signal, BooleanSignal
from .helpers import getCosSignal, getShiftedCosSignal
from stl.tree import SignalNode
# SignalNode is a node on the tree returning a given signal name from the signal list.
class SignalNodeTest(unittest.TestCase):
def setUp(self):
pass
def signalNotFoundTest(self):
node: SignalNode = SignalNode()
node.processToken('notfound')
# In case of signal with specified name not in list, we expect a runtime failure
# TODO: Determine if more appropriate (non-failing) error handling is desired. This suffices for now, though.
with self.assertRaisesRegex(RuntimeError, "Signal with name 'notfound' not found."):
node.booleanValidate(SignalList(), None)
with self.assertRaisesRegex(RuntimeError, "Signal with name 'notfound' not found."):
node.quantitativeValidate(SignalList(), None)
def testEmptySignal(self):
# Create node to test
node: SignalNode = SignalNode()
node.processToken("empty")
# Test both Boolean and Non-Boolean empty signals.
for emptySignal in [BooleanSignal("empty"), Signal("empty")]:
# Boolean validation should return boolean empty signal no matter what input
self.assertEqual(BooleanSignal("empty"), node.booleanValidate(SignalList([emptySignal]), None))
# Quant validation should return quant empty signal no matter what input
self.assertEqual(Signal("empty"), node.quantitativeValidate(SignalList([emptySignal]), None))
self.assertNotEqual(BooleanSignal("empty"), Signal("empty"))
def testBooleanSignal(self):
# Create node to test
node: SignalNode = SignalNode()
node.processToken("cos")
# Create signal to test with
cosSignal = getCosSignal(10, name='cos', booleanSignal = True)
# The returned signal must be equal to the signal we created as input
self.assertEqual(cosSignal, node.booleanValidate(SignalList([cosSignal]), None))
# Type conversion must applied when necessary
self.assertEqual(Signal.fromBooleanSignal(cosSignal), node.quantitativeValidate(SignalList([cosSignal]), None))
def testNonBooleanSignal(self):
# Create node to test
node: SignalNode = SignalNode()
node.processToken("cos")
# Create signal to test with
cosSignal = getCosSignal(10, name='cos', booleanSignal = False)
if cosSignal.getName() != 'cos':
raise RuntimeError(f"{cosSignal.getName()}")
# The returned signal must be equal to the signal we created as input
self.assertEqual(cosSignal, node.quantitativeValidate(SignalList([cosSignal]), None))
# And booleanized version thereof in case of boolean evaluation
self.assertEqual(BooleanSignal.fromSignal(cosSignal), node.booleanValidate(SignalList([cosSignal]), None))
if __name__ == "__main__":
unittest.main() | 41.575758 | 113 | 0.761297 |
973f959247022b2c9c322120fcb34b5266a289be | 1,005 | py | Python | saleor/dashboard/category/filters.py | prayjourney/saleor | 0ec214a3deb606a951a268f7f83e659857be4dd7 | [
"BSD-3-Clause"
] | 1 | 2020-10-24T14:25:53.000Z | 2020-10-24T14:25:53.000Z | saleor/dashboard/category/filters.py | prayjourney/saleor | 0ec214a3deb606a951a268f7f83e659857be4dd7 | [
"BSD-3-Clause"
] | 1 | 2022-02-10T08:13:42.000Z | 2022-02-10T08:13:42.000Z | saleor/dashboard/category/filters.py | dotslash227/foodezmn-ecommerce | 4a49053d7dd4386f01d52cb76a8f9bd64470888f | [
"BSD-3-Clause"
] | 1 | 2020-09-29T14:21:31.000Z | 2020-09-29T14:21:31.000Z | from django.utils.translation import pgettext_lazy
from django_filters import CharFilter, OrderingFilter
from ...core.filters import SortedFilterSet
from ...product.models import Category
SORT_BY_FIELDS = {
'name': pgettext_lazy('Category list sorting option', 'name'),
'description': pgettext_lazy(
'Category list sorting option', 'description'),
'is_hidden': pgettext_lazy('Category list sorting option', 'is hidden')}
IS_HIDDEN_CHOICES = (
('1', pgettext_lazy('Is hidden filter choice', 'Hidden')),
('0', pgettext_lazy('Is hidden filter choice', 'Not hidden')))
class CategoryFilter(SortedFilterSet):
name = CharFilter(
label=pgettext_lazy('Category list filter label', 'Name'),
lookup_expr='icontains')
sort_by = OrderingFilter(
label=pgettext_lazy('Category list sorting filter label', 'Sort by'),
fields=SORT_BY_FIELDS.keys(),
field_labels=SORT_BY_FIELDS)
class Meta:
model = Category
fields = []
| 33.5 | 77 | 0.697512 |
f2d0913d305c968b2bf671b9e96f61ae7d8d9665 | 1,518 | py | Python | backend/battle/battles/base_stats.py | DeborahAlmeida/pokebattle | c8cf741eaca2372cd3aca02709c65b12e237fe11 | [
"MIT"
] | null | null | null | backend/battle/battles/base_stats.py | DeborahAlmeida/pokebattle | c8cf741eaca2372cd3aca02709c65b12e237fe11 | [
"MIT"
] | 5 | 2021-04-08T17:52:40.000Z | 2021-08-16T13:46:15.000Z | backend/battle/battles/base_stats.py | DeborahAlmeida/pokebattle | c8cf741eaca2372cd3aca02709c65b12e237fe11 | [
"MIT"
] | null | null | null | from pokemon.helpers import get_pokemon_from_api
from battle.models import PokemonTeam
def get_total_point_pokemon(data_pokemons):
pokemon_first = get_pokemon_from_api(data_pokemons[0])
pokemon_second = get_pokemon_from_api(data_pokemons[1])
pokemon_third = get_pokemon_from_api(data_pokemons[2])
all_pokemons_data = [
pokemon_first,
pokemon_second,
pokemon_third
]
info = sum_all_pokemons(all_pokemons_data)
sum_pokemons_valid = sum_all_points(info)
return sum_pokemons_valid
def sum_all_points(info):
if (info[0] + info[1] + info[2]) <= 600:
return True
else:
return False
def sum_all_pokemons(pokemons):
pokemons_sum = []
for pokemon in pokemons:
base_stats = sumValid(pokemon)
pokemons_sum.append(base_stats)
return pokemons_sum
def sumValid(pokemon):
sum_result = pokemon["attack"] + pokemon["defense"] + pokemon["hp"]
return sum_result
def get_pokemons_team(battle, trainer):
pokemons_team = {
"pokemon_1": None,
"pokemon_2": None,
"pokemon_3": None,
}
pkn_teams = PokemonTeam.objects.filter(
team__battle=battle,
team__trainer=trainer).select_related('pokemon').order_by('order')
if pkn_teams:
pokemons_team["pokemon_1"] = pkn_teams[0].pokemon
pokemons_team["pokemon_2"] = pkn_teams[1].pokemon
pokemons_team["pokemon_3"] = pkn_teams[2].pokemon
return pokemons_team
else:
return False
| 27.107143 | 74 | 0.685112 |
461b27e94c0d540e9da66c956bfa168c243f9eb6 | 3,181 | py | Python | homeassistant/components/knx/switch.py | alindeman/home-assistant | b274b10f3874c196f0db8f9cfa5f47eb756d1f8e | [
"Apache-2.0"
] | 4 | 2019-07-03T22:36:57.000Z | 2019-08-10T15:33:25.000Z | homeassistant/components/knx/switch.py | alindeman/home-assistant | b274b10f3874c196f0db8f9cfa5f47eb756d1f8e | [
"Apache-2.0"
] | 7 | 2019-08-23T05:26:02.000Z | 2022-03-11T23:57:18.000Z | homeassistant/components/knx/switch.py | alindeman/home-assistant | b274b10f3874c196f0db8f9cfa5f47eb756d1f8e | [
"Apache-2.0"
] | 3 | 2019-04-28T16:35:45.000Z | 2020-05-28T15:21:59.000Z | """Support for KNX/IP switches."""
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchDevice
from homeassistant.const import CONF_ADDRESS, CONF_NAME
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from . import ATTR_DISCOVER_DEVICES, DATA_KNX
CONF_STATE_ADDRESS = 'state_address'
DEFAULT_NAME = 'KNX Switch'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_ADDRESS): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_STATE_ADDRESS): cv.string,
})
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up switch(es) for KNX platform."""
if discovery_info is not None:
async_add_entities_discovery(hass, discovery_info, async_add_entities)
else:
async_add_entities_config(hass, config, async_add_entities)
@callback
def async_add_entities_discovery(hass, discovery_info, async_add_entities):
"""Set up switches for KNX platform configured via xknx.yaml."""
entities = []
for device_name in discovery_info[ATTR_DISCOVER_DEVICES]:
device = hass.data[DATA_KNX].xknx.devices[device_name]
entities.append(KNXSwitch(device))
async_add_entities(entities)
@callback
def async_add_entities_config(hass, config, async_add_entities):
"""Set up switch for KNX platform configured within platform."""
import xknx
switch = xknx.devices.Switch(
hass.data[DATA_KNX].xknx,
name=config.get(CONF_NAME),
group_address=config.get(CONF_ADDRESS),
group_address_state=config.get(CONF_STATE_ADDRESS))
hass.data[DATA_KNX].xknx.devices.add(switch)
async_add_entities([KNXSwitch(switch)])
class KNXSwitch(SwitchDevice):
"""Representation of a KNX switch."""
def __init__(self, device):
"""Initialize of KNX switch."""
self.device = device
@callback
def async_register_callbacks(self):
"""Register callbacks to update hass after device was changed."""
async def after_update_callback(device):
"""Call after device was updated."""
await self.async_update_ha_state()
self.device.register_device_updated_cb(after_update_callback)
async def async_added_to_hass(self):
"""Store register state change callback."""
self.async_register_callbacks()
@property
def name(self):
"""Return the name of the KNX device."""
return self.device.name
@property
def available(self):
"""Return true if entity is available."""
return self.hass.data[DATA_KNX].connected
@property
def should_poll(self):
"""Return the polling state. Not needed within KNX."""
return False
@property
def is_on(self):
"""Return true if device is on."""
return self.device.state
async def async_turn_on(self, **kwargs):
"""Turn the device on."""
await self.device.set_on()
async def async_turn_off(self, **kwargs):
"""Turn the device off."""
await self.device.set_off()
| 32.131313 | 78 | 0.696322 |
f2220076f0a329081039b67b512d1ff8389332ee | 613 | py | Python | apps/home/urls.py | naisyahisa/keagat | d9a02ef57cc48482ee79b31a43a41c03a59e1144 | [
"MIT"
] | null | null | null | apps/home/urls.py | naisyahisa/keagat | d9a02ef57cc48482ee79b31a43a41c03a59e1144 | [
"MIT"
] | null | null | null | apps/home/urls.py | naisyahisa/keagat | d9a02ef57cc48482ee79b31a43a41c03a59e1144 | [
"MIT"
] | 1 | 2022-02-13T06:58:54.000Z | 2022-02-13T06:58:54.000Z | from django.urls import path, re_path
from django.conf import settings
from django.conf.urls.static import static
from apps.home import views
urlpatterns = [
# The home page
path('', views.index, name='home'),
path('inbox/', views.helpdesk_inbox, name='inbox'),
# Matches any html file
re_path(r'^.*\.*', views.pages, name='pages'),
]
# + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# print('urlpattern',urlpatterns)
# print('static', static)
# if settings.DEBUG:
# urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# print(urlpatterns) | 29.190476 | 82 | 0.712887 |
0038f4bbf3dedc9d832daa9824f0ed57fc0c4879 | 32,001 | py | Python | tensorflow/python/kernel_tests/reader_ops_test.py | dongjiewhu/tensorflow | fac9b8830e967bb5dd9adc61f86a9b354c31744d | [
"Apache-2.0"
] | 1 | 2017-06-28T10:56:40.000Z | 2017-06-28T10:56:40.000Z | tensorflow/python/kernel_tests/reader_ops_test.py | LaiPeter/tensorflow | 08ed32dbb9e8f67eec9efce3807b5bdb3933eb2f | [
"Apache-2.0"
] | null | null | null | tensorflow/python/kernel_tests/reader_ops_test.py | LaiPeter/tensorflow | 08ed32dbb9e8f67eec9efce3807b5bdb3933eb2f | [
"Apache-2.0"
] | 2 | 2017-07-16T13:54:08.000Z | 2018-05-21T09:02:34.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Reader ops from io_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import gzip
import os
import threading
import zlib
import six
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.lib.io import tf_record
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.util import compat
# pylint: disable=invalid-name
TFRecordCompressionType = tf_record.TFRecordCompressionType
# pylint: enable=invalid-name
# Edgar Allan Poe's 'Eldorado'
_TEXT = b"""Gaily bedight,
A gallant knight,
In sunshine and in shadow,
Had journeyed long,
Singing a song,
In search of Eldorado.
But he grew old
This knight so bold
And o'er his heart a shadow
Fell as he found
No spot of ground
That looked like Eldorado.
And, as his strength
Failed him at length,
He met a pilgrim shadow
'Shadow,' said he,
'Where can it be
This land of Eldorado?'
'Over the Mountains
Of the Moon'
Down the Valley of the Shadow,
Ride, boldly ride,'
The shade replied,
'If you seek for Eldorado!'
"""
class IdentityReaderTest(test.TestCase):
def _ExpectRead(self, sess, key, value, expected):
k, v = sess.run([key, value])
self.assertAllEqual(expected, k)
self.assertAllEqual(expected, v)
def testOneEpoch(self):
with self.test_session() as sess:
reader = io_ops.IdentityReader("test_reader")
work_completed = reader.num_work_units_completed()
produced = reader.num_records_produced()
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
queued_length = queue.size()
key, value = reader.read(queue)
self.assertAllEqual(0, work_completed.eval())
self.assertAllEqual(0, produced.eval())
self.assertAllEqual(0, queued_length.eval())
queue.enqueue_many([["A", "B", "C"]]).run()
queue.close().run()
self.assertAllEqual(3, queued_length.eval())
self._ExpectRead(sess, key, value, b"A")
self.assertAllEqual(1, produced.eval())
self._ExpectRead(sess, key, value, b"B")
self._ExpectRead(sess, key, value, b"C")
self.assertAllEqual(3, produced.eval())
self.assertAllEqual(0, queued_length.eval())
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
sess.run([key, value])
self.assertAllEqual(3, work_completed.eval())
self.assertAllEqual(3, produced.eval())
self.assertAllEqual(0, queued_length.eval())
def testMultipleEpochs(self):
with self.test_session() as sess:
reader = io_ops.IdentityReader("test_reader")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
enqueue = queue.enqueue_many([["DD", "EE"]])
key, value = reader.read(queue)
enqueue.run()
self._ExpectRead(sess, key, value, b"DD")
self._ExpectRead(sess, key, value, b"EE")
enqueue.run()
self._ExpectRead(sess, key, value, b"DD")
self._ExpectRead(sess, key, value, b"EE")
enqueue.run()
self._ExpectRead(sess, key, value, b"DD")
self._ExpectRead(sess, key, value, b"EE")
queue.close().run()
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
sess.run([key, value])
def testSerializeRestore(self):
with self.test_session() as sess:
reader = io_ops.IdentityReader("test_reader")
produced = reader.num_records_produced()
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
queue.enqueue_many([["X", "Y", "Z"]]).run()
key, value = reader.read(queue)
self._ExpectRead(sess, key, value, b"X")
self.assertAllEqual(1, produced.eval())
state = reader.serialize_state().eval()
self._ExpectRead(sess, key, value, b"Y")
self._ExpectRead(sess, key, value, b"Z")
self.assertAllEqual(3, produced.eval())
queue.enqueue_many([["Y", "Z"]]).run()
queue.close().run()
reader.restore_state(state).run()
self.assertAllEqual(1, produced.eval())
self._ExpectRead(sess, key, value, b"Y")
self._ExpectRead(sess, key, value, b"Z")
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
sess.run([key, value])
self.assertAllEqual(3, produced.eval())
self.assertEqual(bytes, type(state))
with self.assertRaises(ValueError):
reader.restore_state([])
with self.assertRaises(ValueError):
reader.restore_state([state, state])
with self.assertRaisesOpError(
"Could not parse state for IdentityReader 'test_reader'"):
reader.restore_state(state[1:]).run()
with self.assertRaisesOpError(
"Could not parse state for IdentityReader 'test_reader'"):
reader.restore_state(state[:-1]).run()
with self.assertRaisesOpError(
"Could not parse state for IdentityReader 'test_reader'"):
reader.restore_state(state + b"ExtraJunk").run()
with self.assertRaisesOpError(
"Could not parse state for IdentityReader 'test_reader'"):
reader.restore_state(b"PREFIX" + state).run()
with self.assertRaisesOpError(
"Could not parse state for IdentityReader 'test_reader'"):
reader.restore_state(b"BOGUS" + state[5:]).run()
def testReset(self):
with self.test_session() as sess:
reader = io_ops.IdentityReader("test_reader")
work_completed = reader.num_work_units_completed()
produced = reader.num_records_produced()
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
queued_length = queue.size()
key, value = reader.read(queue)
queue.enqueue_many([["X", "Y", "Z"]]).run()
self._ExpectRead(sess, key, value, b"X")
self.assertLess(0, queued_length.eval())
self.assertAllEqual(1, produced.eval())
self._ExpectRead(sess, key, value, b"Y")
self.assertLess(0, work_completed.eval())
self.assertAllEqual(2, produced.eval())
reader.reset().run()
self.assertAllEqual(0, work_completed.eval())
self.assertAllEqual(0, produced.eval())
self.assertAllEqual(1, queued_length.eval())
self._ExpectRead(sess, key, value, b"Z")
queue.enqueue_many([["K", "L"]]).run()
self._ExpectRead(sess, key, value, b"K")
class WholeFileReaderTest(test.TestCase):
def setUp(self):
super(WholeFileReaderTest, self).setUp()
self._filenames = [
os.path.join(self.get_temp_dir(), "whole_file.%d.txt" % i)
for i in range(3)
]
self._content = [b"One\na\nb\n", b"Two\nC\nD", b"Three x, y, z"]
for fn, c in zip(self._filenames, self._content):
with open(fn, "wb") as h:
h.write(c)
def tearDown(self):
for fn in self._filenames:
os.remove(fn)
super(WholeFileReaderTest, self).tearDown()
def _ExpectRead(self, sess, key, value, index):
k, v = sess.run([key, value])
self.assertAllEqual(compat.as_bytes(self._filenames[index]), k)
self.assertAllEqual(self._content[index], v)
def testOneEpoch(self):
with self.test_session() as sess:
reader = io_ops.WholeFileReader("test_reader")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
queue.enqueue_many([self._filenames]).run()
queue.close().run()
key, value = reader.read(queue)
self._ExpectRead(sess, key, value, 0)
self._ExpectRead(sess, key, value, 1)
self._ExpectRead(sess, key, value, 2)
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
sess.run([key, value])
def testInfiniteEpochs(self):
with self.test_session() as sess:
reader = io_ops.WholeFileReader("test_reader")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
enqueue = queue.enqueue_many([self._filenames])
key, value = reader.read(queue)
enqueue.run()
self._ExpectRead(sess, key, value, 0)
self._ExpectRead(sess, key, value, 1)
enqueue.run()
self._ExpectRead(sess, key, value, 2)
self._ExpectRead(sess, key, value, 0)
self._ExpectRead(sess, key, value, 1)
enqueue.run()
self._ExpectRead(sess, key, value, 2)
self._ExpectRead(sess, key, value, 0)
class TextLineReaderTest(test.TestCase):
def setUp(self):
super(TextLineReaderTest, self).setUp()
self._num_files = 2
self._num_lines = 5
def _LineText(self, f, l):
return compat.as_bytes("%d: %d" % (f, l))
def _CreateFiles(self, crlf=False):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "text_line.%d.txt" % i)
filenames.append(fn)
with open(fn, "wb") as f:
for j in range(self._num_lines):
f.write(self._LineText(i, j))
# Always include a newline after the record unless it is
# at the end of the file, in which case we include it sometimes.
if j + 1 != self._num_lines or i == 0:
f.write(b"\r\n" if crlf else b"\n")
return filenames
def _testOneEpoch(self, files):
with self.test_session() as sess:
reader = io_ops.TextLineReader(name="test_reader")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
queue.enqueue_many([files]).run()
queue.close().run()
for i in range(self._num_files):
for j in range(self._num_lines):
k, v = sess.run([key, value])
self.assertAllEqual("%s:%d" % (files[i], j + 1), compat.as_text(k))
self.assertAllEqual(self._LineText(i, j), v)
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
k, v = sess.run([key, value])
def testOneEpochLF(self):
self._testOneEpoch(self._CreateFiles(crlf=False))
def testOneEpochCRLF(self):
self._testOneEpoch(self._CreateFiles(crlf=True))
def testSkipHeaderLines(self):
files = self._CreateFiles()
with self.test_session() as sess:
reader = io_ops.TextLineReader(skip_header_lines=1, name="test_reader")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
queue.enqueue_many([files]).run()
queue.close().run()
for i in range(self._num_files):
for j in range(self._num_lines - 1):
k, v = sess.run([key, value])
self.assertAllEqual("%s:%d" % (files[i], j + 2), compat.as_text(k))
self.assertAllEqual(self._LineText(i, j + 1), v)
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
k, v = sess.run([key, value])
class FixedLengthRecordReaderTest(test.TestCase):
def setUp(self):
super(FixedLengthRecordReaderTest, self).setUp()
self._num_files = 2
self._num_records = 7
self._header_bytes = 5
self._record_bytes = 3
self._footer_bytes = 2
self._hop_bytes = 2
self._num_overlapped_records = 3
def _Record(self, f, r):
return compat.as_bytes(str(f * 2 + r) * self._record_bytes)
def _OverlappedRecord(self, f, r):
record_str = "".join([
str(i)[0]
for i in range(r * self._hop_bytes,
r * self._hop_bytes + self._record_bytes)
])
return compat.as_bytes(record_str)
def _CreateFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "fixed_length_record.%d.txt" % i)
filenames.append(fn)
with open(fn, "wb") as f:
f.write(b"H" * self._header_bytes)
for j in range(self._num_records):
f.write(self._Record(i, j))
f.write(b"F" * self._footer_bytes)
return filenames
def _CreateOverlappedRecordFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(),
"fixed_length_overlapped_record.%d.txt" % i)
filenames.append(fn)
with open(fn, "wb") as f:
f.write(b"H" * self._header_bytes)
all_records_str = "".join([
str(i)[0]
for i in range(self._record_bytes + self._hop_bytes *
(self._num_overlapped_records - 1))
])
f.write(compat.as_bytes(all_records_str))
f.write(b"F" * self._footer_bytes)
return filenames
def testOneEpoch(self):
files = self._CreateFiles()
with self.test_session() as sess:
reader = io_ops.FixedLengthRecordReader(
header_bytes=self._header_bytes,
record_bytes=self._record_bytes,
footer_bytes=self._footer_bytes,
hop_bytes=0,
name="test_reader")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
queue.enqueue_many([files]).run()
queue.close().run()
for i in range(self._num_files):
for j in range(self._num_records):
k, v = sess.run([key, value])
self.assertAllEqual("%s:%d" % (files[i], j), compat.as_text(k))
self.assertAllEqual(self._Record(i, j), v)
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
k, v = sess.run([key, value])
def testOneEpochWithHopBytes(self):
files = self._CreateOverlappedRecordFiles()
with self.test_session() as sess:
reader = io_ops.FixedLengthRecordReader(
header_bytes=self._header_bytes,
record_bytes=self._record_bytes,
footer_bytes=self._footer_bytes,
hop_bytes=self._hop_bytes,
name="test_reader")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
queue.enqueue_many([files]).run()
queue.close().run()
for i in range(self._num_files):
for j in range(self._num_overlapped_records):
k, v = sess.run([key, value])
print(v)
self.assertAllEqual("%s:%d" % (files[i], j), compat.as_text(k))
self.assertAllEqual(self._OverlappedRecord(i, j), v)
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
k, v = sess.run([key, value])
class TFRecordReaderTest(test.TestCase):
def setUp(self):
super(TFRecordReaderTest, self).setUp()
self._num_files = 2
self._num_records = 7
def _Record(self, f, r):
return compat.as_bytes("Record %d of file %d" % (r, f))
def _CreateFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "tf_record.%d.txt" % i)
filenames.append(fn)
writer = tf_record.TFRecordWriter(fn)
for j in range(self._num_records):
writer.write(self._Record(i, j))
return filenames
def testOneEpoch(self):
files = self._CreateFiles()
with self.test_session() as sess:
reader = io_ops.TFRecordReader(name="test_reader")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
queue.enqueue_many([files]).run()
queue.close().run()
for i in range(self._num_files):
for j in range(self._num_records):
k, v = sess.run([key, value])
self.assertTrue(compat.as_text(k).startswith("%s:" % files[i]))
self.assertAllEqual(self._Record(i, j), v)
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
k, v = sess.run([key, value])
def testReadUpTo(self):
files = self._CreateFiles()
with self.test_session() as sess:
reader = io_ops.TFRecordReader(name="test_reader")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
batch_size = 3
key, value = reader.read_up_to(queue, batch_size)
queue.enqueue_many([files]).run()
queue.close().run()
num_k = 0
num_v = 0
while True:
try:
k, v = sess.run([key, value])
# Test reading *up to* batch_size records
self.assertLessEqual(len(k), batch_size)
self.assertLessEqual(len(v), batch_size)
num_k += len(k)
num_v += len(v)
except errors_impl.OutOfRangeError:
break
# Test that we have read everything
self.assertEqual(self._num_files * self._num_records, num_k)
self.assertEqual(self._num_files * self._num_records, num_v)
def testReadZlibFiles(self):
files = self._CreateFiles()
zlib_files = []
for i, fn in enumerate(files):
with open(fn, "rb") as f:
cdata = zlib.compress(f.read())
zfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.z" % i)
with open(zfn, "wb") as f:
f.write(cdata)
zlib_files.append(zfn)
with self.test_session() as sess:
options = tf_record.TFRecordOptions(TFRecordCompressionType.ZLIB)
reader = io_ops.TFRecordReader(name="test_reader", options=options)
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
queue.enqueue_many([zlib_files]).run()
queue.close().run()
for i in range(self._num_files):
for j in range(self._num_records):
k, v = sess.run([key, value])
self.assertTrue(compat.as_text(k).startswith("%s:" % zlib_files[i]))
self.assertAllEqual(self._Record(i, j), v)
def testReadGzipFiles(self):
files = self._CreateFiles()
gzip_files = []
for i, fn in enumerate(files):
with open(fn, "rb") as f:
cdata = f.read()
zfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.gz" % i)
with gzip.GzipFile(zfn, "wb") as f:
f.write(cdata)
gzip_files.append(zfn)
with self.test_session() as sess:
options = tf_record.TFRecordOptions(TFRecordCompressionType.GZIP)
reader = io_ops.TFRecordReader(name="test_reader", options=options)
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
queue.enqueue_many([gzip_files]).run()
queue.close().run()
for i in range(self._num_files):
for j in range(self._num_records):
k, v = sess.run([key, value])
self.assertTrue(compat.as_text(k).startswith("%s:" % gzip_files[i]))
self.assertAllEqual(self._Record(i, j), v)
class TFRecordWriterZlibTest(test.TestCase):
def setUp(self):
super(TFRecordWriterZlibTest, self).setUp()
self._num_files = 2
self._num_records = 7
def _Record(self, f, r):
return compat.as_bytes("Record %d of file %d" % (r, f))
def _CreateFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "tf_record.%d.txt" % i)
filenames.append(fn)
options = tf_record.TFRecordOptions(
compression_type=TFRecordCompressionType.ZLIB)
writer = tf_record.TFRecordWriter(fn, options=options)
for j in range(self._num_records):
writer.write(self._Record(i, j))
writer.close()
del writer
return filenames
def _WriteRecordsToFile(self, records, name="tf_record"):
fn = os.path.join(self.get_temp_dir(), name)
writer = tf_record.TFRecordWriter(fn, options=None)
for r in records:
writer.write(r)
writer.close()
del writer
return fn
def _ZlibCompressFile(self, infile, name="tfrecord.z"):
# zlib compress the file and write compressed contents to file.
with open(infile, "rb") as f:
cdata = zlib.compress(f.read())
zfn = os.path.join(self.get_temp_dir(), name)
with open(zfn, "wb") as f:
f.write(cdata)
return zfn
def testOneEpoch(self):
files = self._CreateFiles()
with self.test_session() as sess:
options = tf_record.TFRecordOptions(
compression_type=TFRecordCompressionType.ZLIB)
reader = io_ops.TFRecordReader(name="test_reader", options=options)
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
queue.enqueue_many([files]).run()
queue.close().run()
for i in range(self._num_files):
for j in range(self._num_records):
k, v = sess.run([key, value])
self.assertTrue(compat.as_text(k).startswith("%s:" % files[i]))
self.assertAllEqual(self._Record(i, j), v)
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
k, v = sess.run([key, value])
def testZLibFlushRecord(self):
fn = self._WriteRecordsToFile([b"small record"], "small_record")
with open(fn, "rb") as h:
buff = h.read()
# creating more blocks and trailing blocks shouldn't break reads
compressor = zlib.compressobj(9, zlib.DEFLATED, zlib.MAX_WBITS)
output = b""
for c in buff:
if isinstance(c, int):
c = six.int2byte(c)
output += compressor.compress(c)
output += compressor.flush(zlib.Z_FULL_FLUSH)
output += compressor.flush(zlib.Z_FULL_FLUSH)
output += compressor.flush(zlib.Z_FULL_FLUSH)
output += compressor.flush(zlib.Z_FINISH)
# overwrite the original file with the compressed data
with open(fn, "wb") as h:
h.write(output)
with self.test_session() as sess:
options = tf_record.TFRecordOptions(
compression_type=TFRecordCompressionType.ZLIB)
reader = io_ops.TFRecordReader(name="test_reader", options=options)
queue = data_flow_ops.FIFOQueue(1, [dtypes.string], shapes=())
key, value = reader.read(queue)
queue.enqueue(fn).run()
queue.close().run()
k, v = sess.run([key, value])
self.assertTrue(compat.as_text(k).startswith("%s:" % fn))
self.assertAllEqual(b"small record", v)
def testZlibReadWrite(self):
"""Verify that files produced are zlib compatible."""
original = [b"foo", b"bar"]
fn = self._WriteRecordsToFile(original, "zlib_read_write.tfrecord")
zfn = self._ZlibCompressFile(fn, "zlib_read_write.tfrecord.z")
# read the compressed contents and verify.
actual = []
for r in tf_record.tf_record_iterator(
zfn,
options=tf_record.TFRecordOptions(
tf_record.TFRecordCompressionType.ZLIB)):
actual.append(r)
self.assertEqual(actual, original)
def testZlibReadWriteLarge(self):
"""Verify that writing large contents also works."""
# Make it large (about 5MB)
original = [_TEXT * 10240]
fn = self._WriteRecordsToFile(original, "zlib_read_write_large.tfrecord")
zfn = self._ZlibCompressFile(fn, "zlib_read_write_large.tfrecord.z")
# read the compressed contents and verify.
actual = []
for r in tf_record.tf_record_iterator(
zfn,
options=tf_record.TFRecordOptions(
tf_record.TFRecordCompressionType.ZLIB)):
actual.append(r)
self.assertEqual(actual, original)
def testGzipReadWrite(self):
"""Verify that files produced are gzip compatible."""
original = [b"foo", b"bar"]
fn = self._WriteRecordsToFile(original, "gzip_read_write.tfrecord")
# gzip compress the file and write compressed contents to file.
with open(fn, "rb") as f:
cdata = f.read()
gzfn = os.path.join(self.get_temp_dir(), "tf_record.gz")
with gzip.GzipFile(gzfn, "wb") as f:
f.write(cdata)
actual = []
for r in tf_record.tf_record_iterator(
gzfn, options=tf_record.TFRecordOptions(TFRecordCompressionType.GZIP)):
actual.append(r)
self.assertEqual(actual, original)
class TFRecordIteratorTest(test.TestCase):
def setUp(self):
super(TFRecordIteratorTest, self).setUp()
self._num_records = 7
def _Record(self, r):
return compat.as_bytes("Record %d" % r)
def _WriteCompressedRecordsToFile(
self,
records,
name="tfrecord.z",
compression_type=tf_record.TFRecordCompressionType.ZLIB):
fn = os.path.join(self.get_temp_dir(), name)
options = tf_record.TFRecordOptions(compression_type=compression_type)
writer = tf_record.TFRecordWriter(fn, options=options)
for r in records:
writer.write(r)
writer.close()
del writer
return fn
def _ZlibDecompressFile(self, infile, name="tfrecord", wbits=zlib.MAX_WBITS):
with open(infile, "rb") as f:
cdata = zlib.decompress(f.read(), wbits)
zfn = os.path.join(self.get_temp_dir(), name)
with open(zfn, "wb") as f:
f.write(cdata)
return zfn
def testIterator(self):
fn = self._WriteCompressedRecordsToFile(
[self._Record(i) for i in range(self._num_records)],
"compressed_records")
options = tf_record.TFRecordOptions(
compression_type=TFRecordCompressionType.ZLIB)
reader = tf_record.tf_record_iterator(fn, options)
for i in range(self._num_records):
record = next(reader)
self.assertAllEqual(self._Record(i), record)
with self.assertRaises(StopIteration):
record = next(reader)
def testWriteZlibRead(self):
"""Verify compression with TFRecordWriter is zlib library compatible."""
original = [b"foo", b"bar"]
fn = self._WriteCompressedRecordsToFile(original,
"write_zlib_read.tfrecord.z")
zfn = self._ZlibDecompressFile(fn, "write_zlib_read.tfrecord")
actual = []
for r in tf_record.tf_record_iterator(zfn):
actual.append(r)
self.assertEqual(actual, original)
def testWriteZlibReadLarge(self):
"""Verify compression for large records is zlib library compatible."""
# Make it large (about 5MB)
original = [_TEXT * 10240]
fn = self._WriteCompressedRecordsToFile(original,
"write_zlib_read_large.tfrecord.z")
zfn = self._ZlibDecompressFile(fn, "write_zlib_read_large.tf_record")
actual = []
for r in tf_record.tf_record_iterator(zfn):
actual.append(r)
self.assertEqual(actual, original)
def testWriteGzipRead(self):
original = [b"foo", b"bar"]
fn = self._WriteCompressedRecordsToFile(
original,
"write_gzip_read.tfrecord.gz",
compression_type=TFRecordCompressionType.GZIP)
with gzip.GzipFile(fn, "rb") as f:
cdata = f.read()
zfn = os.path.join(self.get_temp_dir(), "tf_record")
with open(zfn, "wb") as f:
f.write(cdata)
actual = []
for r in tf_record.tf_record_iterator(zfn):
actual.append(r)
self.assertEqual(actual, original)
def testBadFile(self):
"""Verify that tf_record_iterator throws an exception on bad TFRecords."""
fn = os.path.join(self.get_temp_dir(), "bad_file")
with tf_record.TFRecordWriter(fn) as writer:
writer.write(b"123")
fn_truncated = os.path.join(self.get_temp_dir(), "bad_file_truncated")
with open(fn, "rb") as f:
with open(fn_truncated, "wb") as f2:
# DataLossError requires that we've written the header, so this must
# be at least 12 bytes.
f2.write(f.read(14))
with self.assertRaises(errors_impl.DataLossError):
for _ in tf_record.tf_record_iterator(fn_truncated):
pass
class AsyncReaderTest(test.TestCase):
def testNoDeadlockFromQueue(self):
"""Tests that reading does not block main execution threads."""
config = config_pb2.ConfigProto(
inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
with self.test_session(config=config) as sess:
thread_data_t = collections.namedtuple("thread_data_t",
["thread", "queue", "output"])
thread_data = []
# Create different readers, each with its own queue.
for i in range(3):
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
reader = io_ops.TextLineReader()
_, line = reader.read(queue)
output = []
t = threading.Thread(
target=AsyncReaderTest._RunSessionAndSave,
args=(sess, [line], output))
thread_data.append(thread_data_t(t, queue, output))
# Start all readers. They are all blocked waiting for queue entries.
sess.run(variables.global_variables_initializer())
for d in thread_data:
d.thread.start()
# Unblock the readers.
for i, d in enumerate(reversed(thread_data)):
fname = os.path.join(self.get_temp_dir(), "deadlock.%s.txt" % i)
with open(fname, "wb") as f:
f.write(("file-%s" % i).encode())
d.queue.enqueue_many([[fname]]).run()
d.thread.join()
self.assertEqual([[("file-%s" % i).encode()]], d.output)
@staticmethod
def _RunSessionAndSave(sess, args, output):
output.append(sess.run(args))
class LMDBReaderTest(test.TestCase):
def setUp(self):
super(LMDBReaderTest, self).setUp()
def testReadFromFile(self):
with self.test_session() as sess:
reader = io_ops.LMDBReader(name="test_read_from_file")
path = os.path.join("tensorflow", "core", "lib", "lmdb", "testdata",
"data.mdb")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
queue.enqueue([path]).run()
queue.close().run()
for i in range(10):
k, v = sess.run([key, value])
self.assertAllEqual(compat.as_bytes(k), compat.as_bytes(str(i)))
self.assertAllEqual(compat.as_bytes(v), compat.as_bytes(str(chr(ord('a') + i))))
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
k, v = sess.run([key, value])
def testReadFromFolder(self):
with self.test_session() as sess:
reader = io_ops.LMDBReader(name="test_read_from_folder")
path = os.path.join("tensorflow", "core", "lib", "lmdb", "testdata")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
queue.enqueue([path]).run()
queue.close().run()
for i in range(10):
k, v = sess.run([key, value])
self.assertAllEqual(compat.as_bytes(k), compat.as_bytes(str(i)))
self.assertAllEqual(compat.as_bytes(v), compat.as_bytes(str(chr(ord('a') + i))))
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
k, v = sess.run([key, value])
if __name__ == "__main__":
test.main()
| 35.321192 | 88 | 0.640761 |
265452f9d0098d6703590198e5a30cdf5327d653 | 523 | py | Python | app/quickbooks/urls.py | jaydhanani99/recko | d569f34593e18691602f0618f0388fe0da2b86e0 | [
"MIT"
] | null | null | null | app/quickbooks/urls.py | jaydhanani99/recko | d569f34593e18691602f0618f0388fe0da2b86e0 | [
"MIT"
] | null | null | null | app/quickbooks/urls.py | jaydhanani99/recko | d569f34593e18691602f0618f0388fe0da2b86e0 | [
"MIT"
] | null | null | null | from django.urls import path, include
from rest_framework.routers import DefaultRouter
from quickbooks import views
router = DefaultRouter(trailing_slash=False)
router.register('quickbooks', views.QuickbooksViewSet)
app_name = 'quickbooks'
urlpatterns = [
path('', include(router.urls)),
path('quickbooks/auth/response', views.QuickbooksAuthResponseView.as_view(), name="quickbooks_auth_response"),
path('quickbooks/auth/request', views.QuickbooksAuthRequestView.as_view(), name="quickbooks_auth_request")
] | 34.866667 | 114 | 0.793499 |
0c4e94e07091396142d2395faba2e7c1299cd641 | 2,907 | py | Python | bot.py | Borahb/Allie | 7f467e8499bab0962773ec98717fe4a659404093 | [
"MIT"
] | null | null | null | bot.py | Borahb/Allie | 7f467e8499bab0962773ec98717fe4a659404093 | [
"MIT"
] | null | null | null | bot.py | Borahb/Allie | 7f467e8499bab0962773ec98717fe4a659404093 | [
"MIT"
] | null | null | null | #importing libraries
import pickle
import nltk
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
import numpy as np
from keras.models import load_model
import json
import random
import discord
import requests
import nest_asyncio
nest_asyncio.apply()
#Dependencies
model = load_model('chatbot_model.h5')
intents = json.loads(open('intents.json').read())
words = pickle.load(open('words.pkl','rb'))
classes = pickle.load(open('classes.pkl','rb'))
#preprocessing input data
def clean_up_sentence(sentence):
sentence_words = nltk.word_tokenize(sentence)
sentence_words = [lemmatizer.lemmatize(word.lower()) for word in sentence_words]
return sentence_words
# return bag of words array: 0 or 1 for each word in the bag that exists in the sentence
def bow(sentence, words, show_details=True):
# tokenize the pattern
sentence_words = clean_up_sentence(sentence)
# bag of words - matrix of N words, vocabulary matrix
bag = [0]*len(words)
for s in sentence_words:
for i,w in enumerate(words):
if w == s:
# assign 1 if current word is in the vocabulary position
bag[i] = 1
if show_details:
print ("found in bag: %s" % w)
return(np.array(bag))
def predict_class(sentence, model):
# filter out predictions below a threshold
p = bow(sentence, words,show_details=False)
res = model.predict(np.array([p]))[0]
ERROR_THRESHOLD = 0.25
results = [[i,r] for i,r in enumerate(res) if r>ERROR_THRESHOLD]
# sort by strength of probability
results.sort(key=lambda x: x[1], reverse=True)
return_list = []
for r in results:
return_list.append({"intent": classes[r[0]], "probability": str(r[1])})
return return_list
def getResponse(ints, intents_json):
tag = ints[0]['intent']
list_of_intents = intents_json['intents']
for i in list_of_intents:
if(i['tag']== tag):
result = random.choice(i['responses'])
break
return result
def chatbot_response(msg):
ints = predict_class(msg, model)
if not ints:
return "Sorry, I do not understand"
res = getResponse(ints, intents)
return res
#---------------------------Discord_Client-----------------------------------------------
client = discord.Client()
@client.event
async def on_ready():
print('We have logged in !')
general_channel = client.get_channel(CHANNEL_ID)
await general_channel.send('Hello Everyone !')
@client.event
async def on_message(message):
if message.author == client.user:
return
msgc = message.content
inp = msgc
results = chatbot_response(inp)
general_channel = client.get_channel(803876773360041997)
await general_channel.send(results)
client.run(API_KEY)
| 27.951923 | 96 | 0.645683 |
5a32293a6df798fd749f83ee0ac0d2634618ab4a | 426 | py | Python | oppgavefem.py | NicoDerp/sannsynlighet | 427d0016bfc136efe5d69227ceb33520b2008fa5 | [
"Apache-2.0"
] | null | null | null | oppgavefem.py | NicoDerp/sannsynlighet | 427d0016bfc136efe5d69227ceb33520b2008fa5 | [
"Apache-2.0"
] | null | null | null | oppgavefem.py | NicoDerp/sannsynlighet | 427d0016bfc136efe5d69227ceb33520b2008fa5 | [
"Apache-2.0"
] | null | null | null | from random import randint
antallkast = 10000 # Øk for mer nøyaktighet
myntsum = 0
for i in range(antallkast):
s = sum([randint(0,1) for _ in range(3)]) # Antall kronestykker som ble mynt ut av de tre kronestrykkene
if s==2: # Sjekker om 2 ut av 3 kronestykker ble mynt
myntsum += 1
relativ_frekvens = myntsum / antallkast # Sannsynligheten
print("Antall kast:", antallkast)
print("Relativ frekvens:", relativ_frekvens)
| 32.769231 | 105 | 0.744131 |
6d17b10b67797d206aa9538677a50a7a95cd9d26 | 713 | py | Python | scripts/solve_farmer.py | SmilingHeretic/paradigm-ctf-2021-solutions-brownie | adb7fb543be4ee6062246675cfd4eb3b5ef594dc | [
"MIT"
] | null | null | null | scripts/solve_farmer.py | SmilingHeretic/paradigm-ctf-2021-solutions-brownie | adb7fb543be4ee6062246675cfd4eb3b5ef594dc | [
"MIT"
] | null | null | null | scripts/solve_farmer.py | SmilingHeretic/paradigm-ctf-2021-solutions-brownie | adb7fb543be4ee6062246675cfd4eb3b5ef594dc | [
"MIT"
] | null | null | null | from brownie import (
network,
accounts,
config,
interface,
FarmerSetup,
CompDaiFarmer,
CompFaucet
)
from scripts.helpful_scripts import (
get_account,
check_solution
)
from web3 import Web3
def main():
deployer = get_account(index=0)
attacker = get_account(index=1)
# setup challenge
setup = FarmerSetup.deploy({"from": deployer, "value": Web3.toWei(100, 'ether')})
farmer = CompDaiFarmer.at(setup.farmer())
# solve challenge
# it looks like I hacked the success conditions...
farmer.claim({"from": attacker})
farmer.recycle({"from": attacker})
farmer.mint({"from": attacker})
# check the solution
check_solution(setup) | 22.28125 | 85 | 0.661992 |
9994defd486a3b4e16cc34a03d10446564ea1cf0 | 2,347 | py | Python | swagger_server/models/legacy_model.py | cropsinsilico/cis-apiserver | d93986c4da685c33485ebff41596427e14899fdc | [
"BSD-3-Clause"
] | null | null | null | swagger_server/models/legacy_model.py | cropsinsilico/cis-apiserver | d93986c4da685c33485ebff41596427e14899fdc | [
"BSD-3-Clause"
] | null | null | null | swagger_server/models/legacy_model.py | cropsinsilico/cis-apiserver | d93986c4da685c33485ebff41596427e14899fdc | [
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from swagger_server.models.base_model_ import Model
from swagger_server import util
class LegacyModel(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, name: str=None, path: str=None): # noqa: E501
"""LegacyModel - a model defined in Swagger
:param name: The name of this LegacyModel. # noqa: E501
:type name: str
:param path: The path of this LegacyModel. # noqa: E501
:type path: str
"""
self.swagger_types = {
'name': str,
'path': str
}
self.attribute_map = {
'name': 'name',
'path': 'path'
}
self._name = name
self._path = path
@classmethod
def from_dict(cls, dikt) -> 'LegacyModel':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The LegacyModel of this LegacyModel. # noqa: E501
:rtype: LegacyModel
"""
return util.deserialize_model(dikt, cls)
@property
def name(self) -> str:
"""Gets the name of this LegacyModel.
The short name / identifier of this model # noqa: E501
:return: The name of this LegacyModel.
:rtype: str
"""
return self._name
@name.setter
def name(self, name: str):
"""Sets the name of this LegacyModel.
The short name / identifier of this model # noqa: E501
:param name: The name of this LegacyModel.
:type name: str
"""
self._name = name
@property
def path(self) -> str:
"""Gets the path of this LegacyModel.
Path to the YAML file on disk representing this model # noqa: E501
:return: The path of this LegacyModel.
:rtype: str
"""
return self._path
@path.setter
def path(self, path: str):
"""Sets the path of this LegacyModel.
Path to the YAML file on disk representing this model # noqa: E501
:param path: The path of this LegacyModel.
:type path: str
"""
self._path = path
| 24.705263 | 80 | 0.581594 |
85cbc6a8dd3fe57c66d2c19749461f8d3b0f9744 | 1,423 | py | Python | tests/handler_console_test.py | ricshaw/NiftyNet | 8bd7516fbadff9871414360c81eab6e8bf332048 | [
"Apache-2.0"
] | 1 | 2018-12-27T03:39:00.000Z | 2018-12-27T03:39:00.000Z | tests/handler_console_test.py | ricshaw/NiftyNet | 8bd7516fbadff9871414360c81eab6e8bf332048 | [
"Apache-2.0"
] | 9 | 2020-11-13T19:08:08.000Z | 2022-02-10T02:25:56.000Z | tests/handler_console_test.py | ricshaw/NiftyNet | 8bd7516fbadff9871414360c81eab6e8bf332048 | [
"Apache-2.0"
] | 1 | 2019-03-21T18:12:38.000Z | 2019-03-21T18:12:38.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import tensorflow as tf
from tests.application_driver_test import get_initialised_driver
from niftynet.engine.application_iteration import IterationMessage
from niftynet.engine.signal import GRAPH_CREATED, SESS_STARTED, ITER_FINISHED
class EventConsoleTest(tf.test.TestCase):
def test_init(self):
ITER_FINISHED.connect(self.iteration_listener)
app_driver = get_initialised_driver()
app_driver.load_event_handlers(
['niftynet.engine.handler_model.ModelRestorer',
'niftynet.engine.handler_console.ConsoleLogger',
'niftynet.engine.handler_sampler.SamplerThreading'])
graph = app_driver.create_graph(app_driver.app, 1, True)
with self.test_session(graph=graph) as sess:
GRAPH_CREATED.send(app_driver.app, iter_msg=None)
SESS_STARTED.send(app_driver.app, iter_msg=None)
msg = IterationMessage()
msg.current_iter = 1
app_driver.loop(app_driver.app, [msg])
app_driver.app.stop()
ITER_FINISHED.disconnect(self.iteration_listener)
def iteration_listener(self, sender, **msg):
msg = msg['iter_msg']
self.assertRegexpMatches(msg.to_console_string(), 'mean')
self.assertRegexpMatches(msg.to_console_string(), 'var')
if __name__ == "__main__":
tf.test.main()
| 37.447368 | 77 | 0.709065 |
3ba55542ec4f1261e6215b97302f6ffcc78f10a3 | 4,758 | py | Python | quati/reporter.py | onenoc/quati | ba372b2ad14076294af62cbcbc27e1b3ca8421c1 | [
"MIT"
] | 2 | 2021-01-30T21:20:36.000Z | 2021-01-30T22:15:07.000Z | quati/reporter.py | onenoc/quati | ba372b2ad14076294af62cbcbc27e1b3ca8421c1 | [
"MIT"
] | null | null | null | quati/reporter.py | onenoc/quati | ba372b2ad14076294af62cbcbc27e1b3ca8421c1 | [
"MIT"
] | 1 | 2021-01-18T23:12:18.000Z | 2021-01-18T23:12:18.000Z | import logging
from quati.stats import BestValueEpoch
logger = logging.getLogger(__name__)
def get_line_bar(template_head):
line_head = list('-' * len(template_head.strip()))
bar_indexes = [i for i, c in enumerate(template_head) if c == '|']
for i in bar_indexes:
line_head[i] = '+'
return ''.join(line_head)
class Reporter:
"""
Simple class to print stats on the screen using logger.info and
optionally, tensorboard.
Args:
output_dir (str): Path location to save tensorboard artifacts.
use_tensorboard (bool): Whether to log stats on tensorboard server.
"""
def __init__(self, output_dir, use_tensorboard):
self.tb_writer = None
if use_tensorboard:
logger.info('Starting tensorboard logger...')
logger.info('Type `tensorboard --logdir runs/` in your terminal '
'to see live stats.')
from torch.utils.tensorboard import SummaryWriter
self.tb_writer = SummaryWriter(output_dir)
self.mode = None
self.epoch = None
self.output_dir = output_dir
self.template_head = 'Loss (val / epoch) | '
self.template_head += 'Prec. '
self.template_head += 'Rec. '
self.template_head += 'F1 (val / epoch) | '
self.template_head += 'ACC (val / epoch) | '
self.template_head += 'MCC (val / epoch) | '
self.template_line = get_line_bar(self.template_head)
self.template_body = '{:7.4f} ({:.4f} / {:2d}) |' # loss (best/best)
self.template_body += '{:7.4f} ' # prec.
self.template_body += '{:7.4f} ' # rec.
self.template_body += '{:7.4f} ({:.4f} / {:2d}) |' # F1 (best/best)
self.template_body += '{:7.4f} ({:.4f} / {:2d}) |' # ACC (best/best)
self.template_body += '{:7.4f} ({:.4f} / {:2d}) |' # MCC (best/best)
self.template_footer = '---'
def set_mode(self, mode):
self.mode = mode
def set_epoch(self, epoch):
self.epoch = epoch
def show_head(self):
logger.info(self.template_head)
logger.info(self.template_line)
def show_footer(self):
logger.info(self.template_footer)
def show_stats(self, stats_dict, epoch=None):
text = self.template_body.format(
stats_dict['loss'],
stats_dict['best_loss'].value,
stats_dict['best_loss'].epoch,
stats_dict['prec_rec_f1'][0],
stats_dict['prec_rec_f1'][1],
stats_dict['prec_rec_f1'][2],
stats_dict['best_prec_rec_f1'].value[2],
stats_dict['best_prec_rec_f1'].epoch,
stats_dict['acc'],
stats_dict['best_acc'].value,
stats_dict['best_acc'].epoch,
stats_dict['mcc'],
stats_dict['best_mcc'].value,
stats_dict['best_mcc'].epoch,
)
if epoch is not None:
text += '< Ep. {}'.format(epoch)
logger.info(text)
def report_progress(self, i, nb_iters, loss):
print('Loss ({}/{}): {:.4f}'.format(i, nb_iters, loss), end='\r')
if self.tb_writer is not None:
j = (self.epoch - 1) * nb_iters + i
mode_metric = '{}/{}'.format(self.mode, 'moving_loss')
self.tb_writer.add_scalar(mode_metric, loss, j)
def report_stats(self, stats_dict):
self.show_head()
self.show_stats(stats_dict)
self.show_footer()
if self.tb_writer is not None:
for metric, value in stats_dict.items():
if isinstance(value, BestValueEpoch):
continue
if metric == 'prec_rec_f1':
mm_0 = '{}/{}'.format(self.mode, 'precision')
mm_1 = '{}/{}'.format(self.mode, 'recall')
mm_2 = '{}/{}'.format(self.mode, 'f1')
self.tb_writer.add_scalar(mm_0, value[0], self.epoch)
self.tb_writer.add_scalar(mm_1, value[1], self.epoch)
self.tb_writer.add_scalar(mm_2, value[2], self.epoch)
else:
mode_metric = '{}/{}'.format(self.mode, metric)
self.tb_writer.add_scalar(mode_metric, value, self.epoch)
def report_stats_history(self, stats_history, start=1):
self.show_head()
for i, stats_dict in enumerate(stats_history, start=start):
self.show_stats(stats_dict, epoch=i)
self.show_footer()
def close(self):
if self.tb_writer is not None:
# all_scalars_path = Path(self.output_dir, 'all_scalars.json')
# self.tb_writer.export_scalars_to_json(str(all_scalars_path))
self.tb_writer.close()
| 38.682927 | 77 | 0.568306 |
f2d670bc9c0cc47ab9a6a16f2f56213f0ce89e4a | 1,805 | py | Python | 2020/d04/d04.py | pravin/advent-2016 | ecb0f72b9152c13e9c05d3ed2510bf7b8aa0907c | [
"Apache-2.0"
] | null | null | null | 2020/d04/d04.py | pravin/advent-2016 | ecb0f72b9152c13e9c05d3ed2510bf7b8aa0907c | [
"Apache-2.0"
] | null | null | null | 2020/d04/d04.py | pravin/advent-2016 | ecb0f72b9152c13e9c05d3ed2510bf7b8aa0907c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
passports = []
data = {}
with open('input.txt') as fp:
for line in fp:
if line.strip() == '':
passports.append(data)
data = {}
continue
kv_pairs = line.split()
for kv in kv_pairs:
k, v = kv.split(':')
data[k] = v
passports.append(data)
fields = ['byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid']
def common(skip):
num_valid = 0
for p in passports:
is_valid = True
for f in fields:
if not (f in p and isFieldValid(f, p[f], skip)):
is_valid = False
break
if is_valid:
num_valid += 1
return num_valid
def isFieldValid(field, value, skip):
if skip: return True
response = False
if field == 'byr':
v = int(value)
response = v >= 1920 and v <= 2002
elif field == 'iyr':
v = int(value)
response = v >= 2010 and v <= 2020
elif field == 'eyr':
v = int(value)
response = v >= 2020 and v <= 2030
elif field == 'hgt':
v = int(value[:-2])
if value.endswith('cm'):
response = v >= 150 and v <= 193
elif value.endswith('in'):
response = v >= 59 and v <= 76
elif field == 'hcl':
if value[0] == '#' and len(value) == 7:
try:
int(value[1:], 16)
response = True
except:
response = False
elif field == 'ecl':
response = value in ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']
elif field == 'pid':
response = value.isnumeric() and len(value) == 9
return response
def part1():
return common(True)
def part2():
return common(False)
print(part1())
print(part2())
| 25.069444 | 77 | 0.482548 |
05d27941a0b8989b31015820bdfc98f6f43b01c2 | 719 | py | Python | tests/test_utils/test_time_series.py | phil-lo/pyportlib | 96f8531c0c18c58d3476832de9d4c9b2c9285f62 | [
"MIT"
] | 2 | 2021-11-18T21:40:02.000Z | 2021-12-13T21:01:18.000Z | tests/test_utils/test_time_series.py | phil-lo/PortfolioCore | 3fbe7460c809a80e48615e934990dcd2d1f5003b | [
"CC0-1.0"
] | 1 | 2022-01-24T21:10:09.000Z | 2022-01-24T21:10:09.000Z | tests/test_utils/test_time_series.py | phil-lo/PortfolioCore | 3fbe7460c809a80e48615e934990dcd2d1f5003b | [
"CC0-1.0"
] | null | null | null | import pandas as pd
from pyportlib.utils.time_series import remove_leading_zeroes, remove_consecutive_zeroes
class TestTimeSeries:
def test_remove_leading_zeroes(self):
s = pd.Series(data=[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1])
result = remove_leading_zeroes(s)
assert result[13] == 0
assert result[14] == 0
assert result[15] == 0
assert result.iloc[0] != 0
def test_remove_consecutive_zeroes(self):
s = pd.Series(data=[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1])
result = remove_consecutive_zeroes(s)
assert result.iloc[0] != 0
assert result[15] == 0
assert result.iloc[-1] == 1
| 26.62963 | 88 | 0.589708 |
a8146e29ddf2a1f39d2c1c1271c461267d7f4e82 | 2,486 | py | Python | code/vocabulary.py | tremol/knausj_talon | 6af8b27fa27682f5354d89f03a8dcfa9d1a7ba1a | [
"MIT"
] | null | null | null | code/vocabulary.py | tremol/knausj_talon | 6af8b27fa27682f5354d89f03a8dcfa9d1a7ba1a | [
"MIT"
] | null | null | null | code/vocabulary.py | tremol/knausj_talon | 6af8b27fa27682f5354d89f03a8dcfa9d1a7ba1a | [
"MIT"
] | null | null | null | from talon import Module
from .user_settings import bind_list_to_csv, bind_word_map_to_csv
mod = Module()
mod.list("vocabulary", desc="additional vocabulary words")
# Default words that will need to be capitalized (particularly under w2l).
# NB. These defaults and those later in this file are ONLY used when
# auto-creating the corresponding settings/*.csv files. Those csv files
# determine the contents of user.vocabulary and dictate.word_map. Once they
# exist, the contents of the lists/dictionaries below are irrelevant.
_capitalize_defaults = [
"I",
"I'm",
"I've",
"I'll",
"I'd",
"Monday",
"Mondays",
"Tuesday",
"Tuesdays",
"Wednesday",
"Wednesdays",
"Thursday",
"Thursdays",
"Friday",
"Fridays",
"Saturday",
"Saturdays",
"Sunday",
"Sundays",
"January",
"February",
# March omitted because it's a regular word too
"April",
# May omitted because it's a regular word too
"June",
"July",
"August",
"September",
"October",
"November",
"December",
]
# Default words that need to be remapped.
_word_map_defaults = {
# E.g:
# "cash": "cache",
}
_word_map_defaults.update({word.lower(): word for word in _capitalize_defaults})
# "dictate.word_map" is used by `actions.dictate.replace_words` to rewrite words
# Talon recognized. Entries in word_map don't change the priority with which
# Talon recognizes some words over others.
bind_word_map_to_csv(
"words_to_replace.csv",
csv_headers=("Replacement", "Original"),
default_values=_word_map_defaults,
)
# Default words that should be added to Talon's vocabulary.
# _simple_vocab_default = ["nmap", "admin", "Cisco", "Citrix", "VPN", "DNS", "Minecraft"]
_simple_vocab_default = ["admin", "Citrix", "VPN", "DNS", "Minecraft"]
# Defaults for different pronounciations of words that need to be added to
# Talon's vocabulary.
_default_vocabulary = {
# "N map": "nmap",
"under documented": "under-documented",
}
_default_vocabulary.update({word: word for word in _simple_vocab_default})
# "user.vocabulary" is used to explicitly add words/phrases that Talon doesn't
# recognize. Words in user.vocabulary (or other lists and captures) are
# "command-like" and their recognition is prioritized over ordinary words.
bind_list_to_csv(
"user.vocabulary",
"additional_words.csv",
csv_headers=("Word(s)", "Spoken Form (If Different)"),
default_values=_default_vocabulary,
)
| 29.247059 | 89 | 0.695897 |
0cd2ac1b5fb0af5583e66c931817ccd7ce8b7f71 | 7,792 | py | Python | tests/test_global_observation.py | null-pi/flatland-challenge | babc6895551f96da1c1b6bea6953db498cfa8644 | [
"MIT"
] | null | null | null | tests/test_global_observation.py | null-pi/flatland-challenge | babc6895551f96da1c1b6bea6953db498cfa8644 | [
"MIT"
] | 1 | 2020-09-04T13:57:01.000Z | 2020-09-09T19:21:33.000Z | tests/test_global_observation.py | null-pi/flatland-challenge | babc6895551f96da1c1b6bea6953db498cfa8644 | [
"MIT"
] | null | null | null | import numpy as np
from flatland.envs.agent_utils import EnvAgent, RailAgentStatus
from flatland.envs.observations import GlobalObsForRailEnv
from flatland.envs.rail_env import RailEnv, RailEnvActions
from flatland.envs.rail_generators import sparse_rail_generator
from flatland.envs.schedule_generators import sparse_schedule_generator
def test_get_global_observation():
number_of_agents = 20
stochastic_data = {'prop_malfunction': 1., # Percentage of defective agents
'malfunction_rate': 30, # Rate of malfunction occurence
'min_duration': 3, # Minimal duration of malfunction
'max_duration': 20 # Max duration of malfunction
}
speed_ration_map = {1.: 0.25, # Fast passenger train
1. / 2.: 0.25, # Fast freight train
1. / 3.: 0.25, # Slow commuter train
1. / 4.: 0.25} # Slow freight train
env = RailEnv(width=50, height=50, rail_generator=sparse_rail_generator(max_num_cities=6,
max_rails_between_cities=4,
seed=15,
grid_mode=False
),
schedule_generator=sparse_schedule_generator(speed_ration_map), number_of_agents=number_of_agents,
obs_builder_object=GlobalObsForRailEnv())
env.reset()
obs, all_rewards, done, _ = env.step({i: RailEnvActions.MOVE_FORWARD for i in range(number_of_agents)})
for i in range(len(env.agents)):
agent: EnvAgent = env.agents[i]
print("[{}] status={}, position={}, target={}, initial_position={}".format(i, agent.status, agent.position,
agent.target,
agent.initial_position))
for i, agent in enumerate(env.agents):
obs_agents_state = obs[i][1]
obs_targets = obs[i][2]
# test first channel of obs_targets: own target
nr_agents = np.count_nonzero(obs_targets[:, :, 0])
assert nr_agents == 1, "agent {}: something wrong with own target, found {}".format(i, nr_agents)
# test second channel of obs_targets: other agent's target
for r in range(env.height):
for c in range(env.width):
_other_agent_target = 0
for other_i, other_agent in enumerate(env.agents):
if other_agent.target == (r, c):
_other_agent_target = 1
break
assert obs_targets[(r, c)][
1] == _other_agent_target, "agent {}: at {} expected to be other agent's target = {}".format(
i, (r, c),
_other_agent_target)
# test first channel of obs_agents_state: direction at own position
for r in range(env.height):
for c in range(env.width):
if (agent.status == RailAgentStatus.ACTIVE or agent.status == RailAgentStatus.DONE) and (
r, c) == agent.position:
assert np.isclose(obs_agents_state[(r, c)][0], agent.direction), \
"agent {} in status {} at {} expected to contain own direction {}, found {}" \
.format(i, agent.status, (r, c), agent.direction, obs_agents_state[(r, c)][0])
elif (agent.status == RailAgentStatus.READY_TO_DEPART) and (r, c) == agent.initial_position:
assert np.isclose(obs_agents_state[(r, c)][0], agent.direction), \
"agent {} in status {} at {} expected to contain own direction {}, found {}" \
.format(i, agent.status, (r, c), agent.direction, obs_agents_state[(r, c)][0])
else:
assert np.isclose(obs_agents_state[(r, c)][0], -1), \
"agent {} in status {} at {} expected contain -1 found {}" \
.format(i, agent.status, (r, c), obs_agents_state[(r, c)][0])
# test second channel of obs_agents_state: direction at other agents position
for r in range(env.height):
for c in range(env.width):
has_agent = False
for other_i, other_agent in enumerate(env.agents):
if i == other_i:
continue
if other_agent.status in [RailAgentStatus.ACTIVE, RailAgentStatus.DONE] and (
r, c) == other_agent.position:
assert np.isclose(obs_agents_state[(r, c)][1], other_agent.direction), \
"agent {} in status {} at {} should see other agent with direction {}, found = {}" \
.format(i, agent.status, (r, c), other_agent.direction, obs_agents_state[(r, c)][1])
has_agent = True
if not has_agent:
assert np.isclose(obs_agents_state[(r, c)][1], -1), \
"agent {} in status {} at {} should see no other agent direction (-1), found = {}" \
.format(i, agent.status, (r, c), obs_agents_state[(r, c)][1])
# test third and fourth channel of obs_agents_state: malfunction and speed of own or other agent in the grid
for r in range(env.height):
for c in range(env.width):
has_agent = False
for other_i, other_agent in enumerate(env.agents):
if other_agent.status in [RailAgentStatus.ACTIVE,
RailAgentStatus.DONE] and other_agent.position == (r, c):
assert np.isclose(obs_agents_state[(r, c)][2], other_agent.malfunction_data['malfunction']), \
"agent {} in status {} at {} should see agent malfunction {}, found = {}" \
.format(i, agent.status, (r, c), other_agent.malfunction_data['malfunction'],
obs_agents_state[(r, c)][2])
assert np.isclose(obs_agents_state[(r, c)][3], other_agent.speed_data['speed'])
has_agent = True
if not has_agent:
assert np.isclose(obs_agents_state[(r, c)][2], -1), \
"agent {} in status {} at {} should see no agent malfunction (-1), found = {}" \
.format(i, agent.status, (r, c), obs_agents_state[(r, c)][2])
assert np.isclose(obs_agents_state[(r, c)][3], -1), \
"agent {} in status {} at {} should see no agent speed (-1), found = {}" \
.format(i, agent.status, (r, c), obs_agents_state[(r, c)][3])
# test fifth channel of obs_agents_state: number of agents ready to depart in to this cell
for r in range(env.height):
for c in range(env.width):
count = 0
for other_i, other_agent in enumerate(env.agents):
if other_agent.status == RailAgentStatus.READY_TO_DEPART and other_agent.initial_position == (r, c):
count += 1
assert np.isclose(obs_agents_state[(r, c)][4], count), \
"agent {} in status {} at {} should see {} agents ready to depart, found{}" \
.format(i, agent.status, (r, c), count, obs_agents_state[(r, c)][4])
| 61.354331 | 120 | 0.51694 |
4ed81cc27a82e442cd42a4597145398257609877 | 2,090 | py | Python | Plugins.Operators.py | islamamer666/Wikibooks_ETL_Pipeline | 5317cee7fe9e01638f78d348387ae9f20c4d5d4e | [
"MIT"
] | null | null | null | Plugins.Operators.py | islamamer666/Wikibooks_ETL_Pipeline | 5317cee7fe9e01638f78d348387ae9f20c4d5d4e | [
"MIT"
] | null | null | null | Plugins.Operators.py | islamamer666/Wikibooks_ETL_Pipeline | 5317cee7fe9e01638f78d348387ae9f20c4d5d4e | [
"MIT"
] | null | null | null | from airflow.hooks.postgres_hook import PostgresHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class DataQualityOperator(BaseOperator):
ui_color = '#89DA59'
@apply_defaults
def __init__(self,
redshift_conn_id="",
tables=None,
*args, **kwargs):
super(DataQualityOperator, self).__init__(*args, **kwargs)
if tables is None:
tables = []
self.redshift_conn_id = redshift_conn_id
self.tables = tables
def execute(self):
redshift_hook = PostgresHook(postgres_conn_id = self.redshift_conn_id)
for table in self.tables:
self.log.info("Starting data quality validation on table : {}".format(table))
records = redshift_hook.get_records("select count(*) from {};".format(table))
if len(records) < 1 or len(records[0]) < 1 or records[0][0] < 1:
self.log.error("Data Quality validation failed for table : {}.".format(table))
raise ValueError("Data Quality validation failed for table : {}".format(table))
self.log.info("Data Quality Validation Passed on table : {}!!!".format(table))
class LoadAnalyticsOperator(BaseOperator):
ui_color = '#80BD9E'
@apply_defaults
def __init__(self,
redshift_conn_id="",
sql_query=None,
*args, **kwargs):
super(LoadAnalyticsOperator, self).__init__(*args, **kwargs)
if sql_query is None:
sql_query = []
self.redshift_conn_id = redshift_conn_id
self.sql_query = sql_query
def execute(self):
redshift_hook = PostgresHook(postgres_conn_id=self.redshift_conn_id)
for query in self.sql_query:
self.log.info("Running Analytics query : {}".format(query))
redshift_hook.run(self.sql_query)
self.log.info("Query ran successfully!!")
| 37.321429 | 96 | 0.592344 |
d4f7c77584a95f4b02d2bc7ba246f27f37abd2fd | 1,849 | py | Python | anuvaad-etl/anuvaad-extractor/document-processor/word-detector/craft/src/utilities/request_parse.py | srihari-nagaraj/anuvaad | b09b01a033a033e97db6e404c088e0e6332053e4 | [
"MIT"
] | null | null | null | anuvaad-etl/anuvaad-extractor/document-processor/word-detector/craft/src/utilities/request_parse.py | srihari-nagaraj/anuvaad | b09b01a033a033e97db6e404c088e0e6332053e4 | [
"MIT"
] | null | null | null | anuvaad-etl/anuvaad-extractor/document-processor/word-detector/craft/src/utilities/request_parse.py | srihari-nagaraj/anuvaad | b09b01a033a033e97db6e404c088e0e6332053e4 | [
"MIT"
] | null | null | null | import src.utilities.app_context as app_context
from anuvaad_auditor.loghandler import log_exception
import copy
import json
import config
def log_error(method):
def wrapper(*args, **kwargs):
try:
output = method(*args, **kwargs)
return output
except Exception as e:
log_exception('Invalid request, required key missing of {}'.format(e), app_context.application_context, e)
return None
return wrapper
class File:
def __init__(self, file):
self.file = file
@log_error
def get_format(self):
return self.file['file']['type']
@log_error
def get_name(self):
return self.file['file']['name']
@log_error
def get_pages(self):
return self.file['page_info']
@log_error
def get_words(self, page_index):
return self.file['pages'][page_index]['words']
@log_error
def get_lines(self, page_index):
return self.file['pages'][page_index]['lines']
@log_error
def get_regions(self, page_index):
return self.file['pages'][page_index]['regions']
@log_error
def get_language(self):
return self.file['config']['OCR']['language']
@log_error
def get_craft_config(self):
if 'craft' not in self.file['config'].keys():
return None
else:
return self.file['config']['craft']
@log_error
def get_file(self):
return self.file
def get_files(application_context):
files = copy.deepcopy(application_context['input']['inputs'])
return files
def get_languages(app_context):
languages = []
files = get_files(app_context.application_context)
for file in files :
file_properties = File(file)
languages.append(file_properties.get_language())
return languages
| 22.82716 | 118 | 0.63656 |
d7b08861066df4b9ec7dbf353fe69d5399159682 | 4,950 | py | Python | src/kayako/objects/department.py | iXsystems/kayako-python-api-library | 5c43ae331904eac1a66301e2f40d29a4e52fd49d | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | src/kayako/objects/department.py | iXsystems/kayako-python-api-library | 5c43ae331904eac1a66301e2f40d29a4e52fd49d | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | src/kayako/objects/department.py | iXsystems/kayako-python-api-library | 5c43ae331904eac1a66301e2f40d29a4e52fd49d | [
"BSD-2-Clause-FreeBSD"
] | 8 | 2018-04-10T19:28:53.000Z | 2022-02-05T18:56:07.000Z | # -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# Copyright (c) 2011, Evan Leis
#
# Distributed under the terms of the Lesser GNU General Public License (LGPL)
#-----------------------------------------------------------------------------
'''
Created on May 5, 2011
@author: evan
'''
from lxml import etree
from kayako.core.object import KayakoObject
__all__ = [
'Department',
]
class Department(KayakoObject):
'''
Kayako Department API Object.
title The title of the department.
module The module the department should be associated with ('tickets' or 'livechat').
type The accessibility level of the department ('public' or 'private').
displayorder A positive integer that the helpdesk will use to sort departments when displaying them (ascending).
parentdepartmentid A positive integer of the parent department for this department.
uservisibilitycustom 1 or 0 boolean that controls whether or not to restrict visibility of this department to particular user groups (see usergroupid[]).
usergroupid[] A list of usergroup id's identifying the user groups to be assigned to this department.
'''
controller = '/Base/Department'
__parameters__ = ['id', 'title', 'type', 'module', 'displayorder', 'parentdepartmentid', 'uservisibilitycustom', 'usergroupid']
__required_add_parameters__ = ['title', 'module', 'type']
__add_parameters__ = ['title', 'module', 'type', 'displayorder', 'parentdepartmentid', 'uservisibilitycustom', 'usergroupid']
__required_save_parameters__ = ['title']
__save_parameters__ = ['title', 'type', 'displayorder', 'parentdepartmentid', 'uservisibilitycustom', 'usergroupid']
@classmethod
def _parse_department(cls, department_tree):
usergroups = []
usergroups_node = department_tree.find('usergroups')
if usergroups_node is not None:
for id_node in usergroups_node.findall('id'):
id = cls._get_int(id_node)
usergroups.append(id)
params = dict(
id=cls._get_int(department_tree.find('id')),
title=cls._get_string(department_tree.find('title')),
type=cls._get_string(department_tree.find('type')),
module=cls._get_string(department_tree.find('module')),
displayorder=cls._get_int(department_tree.find('displayorder')),
parentdepartmentid=cls._get_int(department_tree.find('parentdepartmentid'), required=False),
uservisibilitycustom=cls._get_boolean(department_tree.find('uservisibilitycustom')),
usergroupid=usergroups,
)
return params
def _update_from_response(self, department_tree):
usergroups_node = department_tree.find('usergroups')
if usergroups_node is not None:
usergroups = []
for id_node in usergroups_node.findall('id'):
id = self._get_int(id_node)
usergroups.append(id)
self.usergroupid = usergroups
for int_node in ['id', 'displayorder', 'parentdepartmentid']:
node = department_tree.find(int_node)
if node is not None:
setattr(self, int_node, self._get_int(node, required=False))
for str_node in ['title', 'type', 'module']:
node = department_tree.find(str_node)
if node is not None:
setattr(self, str_node, self._get_string(node))
for bool_node in ['uservisibilitycustom']:
node = department_tree.find(bool_node)
if node is not None:
setattr(self, bool_node, self._get_boolean(node, required=False))
@classmethod
def get_all(cls, api):
response = api._request(cls.controller, 'GET')
tree = etree.parse(response)
return [Department(api, **cls._parse_department(department_tree)) for department_tree in tree.findall('department')]
@classmethod
def get(cls, api, id):
response = api._request('%s/%s/' % (cls.controller, id), 'GET')
tree = etree.parse(response)
node = tree.find('department')
if node is None:
return None
params = cls._parse_department(node)
return Department(api, **params)
def add(self):
response = self._add(self.controller)
tree = etree.parse(response)
node = tree.find('department')
self._update_from_response(node)
def save(self):
response = self._save('%s/%s/' % (self.controller, self.id))
tree = etree.parse(response)
node = tree.find('department')
self._update_from_response(node)
def delete(self):
self._delete('%s/%s/' % (self.controller, self.id))
def __str__(self):
return '<Department (%s): %s/%s>' % (self.id, self.title, self.module)
| 38.372093 | 158 | 0.62101 |
1bccdf3808bb1cc265b532f98f3a28ad8d3688b9 | 2,640 | py | Python | pfrock-plugins/pfrock-static-plugin/pfrock_static_plugin/handlers/files.py | knightliao/pfrock | 33587f11caeeccc11d0b8219b4e02df153905486 | [
"Apache-2.0"
] | 62 | 2016-02-24T10:47:17.000Z | 2019-04-27T01:36:56.000Z | pfrock-plugins/pfrock-static-plugin/pfrock_static_plugin/handlers/files.py | knightliao/pfrock | 33587f11caeeccc11d0b8219b4e02df153905486 | [
"Apache-2.0"
] | 1 | 2019-04-19T12:13:21.000Z | 2021-08-10T09:16:09.000Z | pfrock-plugins/pfrock-static-plugin/pfrock_static_plugin/handlers/files.py | knightliao/pfrock | 33587f11caeeccc11d0b8219b4e02df153905486 | [
"Apache-2.0"
] | 24 | 2016-03-01T14:59:29.000Z | 2019-09-02T08:12:00.000Z | #!/usr/bin/env python
# coding=utf8
from tornado.web import os
from pfrock_static_plugin.handlers import ROUTER_HEADER, ROUTER_STATIC_FILES, ROUTER_STATIC_FILE
from pfrock_static_plugin.handlers.file import FrockStaticFileHandler
from pfrock_static_plugin.handlers.utils.rule import VariableRuleParser
ROUTER_STATIC_FILE_RULE = 'rule'
class FrockStaticFilesHandler(FrockStaticFileHandler):
def post(self):
return self.get()
def delete(self):
return self.get()
def put(self):
return self.get()
def initialize(self, **kwargs):
# get argument map
argument_map = {}
for argu in self.request.arguments:
argument_map[argu] = self.request.arguments[argu][-1]
#
has_found_rule = False
target_file_path = None
# search match rule
cur_rule_files = kwargs.get(ROUTER_STATIC_FILES, [])
for rule_file in cur_rule_files:
rule = rule_file[ROUTER_STATIC_FILE_RULE]
try:
# no rule should pass
if rule is "":
is_valid = True
else:
variable_rule_parser = VariableRuleParser(rule, True, argument_map)
is_valid = variable_rule_parser.evaluate_variable()
if is_valid is True:
has_found_rule = True
target_file_path = rule_file[ROUTER_STATIC_FILE]
break
except Exception, e:
# when no found , return 404 just ok
self.set_status(404)
# match should return or return 404
if has_found_rule:
super(FrockStaticFilesHandler, self).initialize(target_file_path, **kwargs)
else:
self.set_status(404)
@staticmethod
def get_handler(url, options):
# rule files
valid_rule_files = []
rule_files = options[ROUTER_STATIC_FILES] if ROUTER_STATIC_FILES in options else []
for rule_file in rule_files:
if ROUTER_STATIC_FILE_RULE not in rule_file:
rule_file[ROUTER_STATIC_FILE_RULE] = ""
if ROUTER_STATIC_FILE in rule_file:
if os.path.exists(rule_file[ROUTER_STATIC_FILE]):
valid_rule_files.append(rule_file)
# header
headers = options[ROUTER_HEADER] if ROUTER_HEADER in options else {}
if len(valid_rule_files) != 0:
handler = (url, FrockStaticFilesHandler, {ROUTER_STATIC_FILES: valid_rule_files, ROUTER_HEADER: headers})
else:
handler = None
return handler
| 32.195122 | 117 | 0.620076 |
35e659874251dd171b7e0ecd4850185aa5f20fa4 | 10,638 | py | Python | pronunciation.py | jmdict-kindle/jmdict-kindle | ec3acc655b51ee79743ee10d39ecbc93b61c0081 | [
"MIT"
] | 9 | 2022-01-23T02:09:14.000Z | 2022-03-19T03:28:47.000Z | pronunciation.py | jmdict-kindle/jmdict-kindle | ec3acc655b51ee79743ee10d39ecbc93b61c0081 | [
"MIT"
] | 6 | 2022-01-22T08:52:47.000Z | 2022-02-09T08:42:41.000Z | pronunciation.py | jmdict-kindle/jmdict-kindle | ec3acc655b51ee79743ee10d39ecbc93b61c0081 | [
"MIT"
] | 1 | 2022-03-04T17:23:14.000Z | 2022-03-04T17:23:14.000Z | import csv
from html import escape
import sys
hiragana = (
"がぎぐげござじずぜぞだぢづでどばびぶべぼぱぴぷぺぽ"
"あいうえおかきくけこさしすせそたちつてと"
"なにぬねのはひふへほまみむめもやゆよらりるれろ"
"わをんぁぃぅぇぉゃゅょっ"
)
katakana = (
"ガギグゲゴザジズゼゾダヂヅデドバビブベボパピプペポ"
"アイウエオカキクケコサシスセソタチツテト"
"ナニヌネノハヒフヘホマミムメモヤユヨラリルレロ"
"ワヲンァィゥェォャュョッ"
)
hiragana = [ord(char) for char in hiragana]
translate_table = dict(zip(hiragana, katakana))
HIGH_STATE = 0
LOW_STATE = 1
class Pronunciation:
def __init__(self):
self.dict = {}
with open("./pronunciation/ACCDB_unicode.csv", encoding="utf-8") as file:
csv_reader = csv.DictReader(file, delimiter=",")
for row in csv_reader:
self.dict[
f"{row['kanjiexpr']}-{row['midashigo'].translate(translate_table)}"
] = {
"nopronouncepos": row["nopronouncepos"],
"nasalsoundpos": row["nasalsoundpos"],
"ac": row["ac"],
"source": "ACCDB_unicode.csv",
}
with open("./pronunciation/accents.tsv", encoding="utf-8") as file:
csv_reader = csv.DictReader(
file, delimiter="\t", fieldnames=["kanjiexpr", "midashigo", "accent"]
)
for row in csv_reader:
if row["midashigo"] == "":
row["midashigo"] = row[
"kanjiexpr"
] # if only kana then there is no midashigo
accents = row["accent"].split(
","
) # there can be multiple pronunciations per word
# find best accent based on priority
used_accent = ""
priority = -1
for accent in accents:
if (
"(" in accent
): # some accents contain info about when to use. Do not use elif since the same accen position can have multiple qualifiers
if "名" in accent and priority < 40: # noun
priority = 40
used_accent = accent
if "副" in accent and priority < 35: # adverb
priority = 35
used_accent = accent
if "代" in accent and priority < 30: # pronoun
priority = 30
used_accent = accent
if "形動" in accent and priority < 10: # adjectival noun
priority = 10
used_accent = accent
if "感" in accent and priority < 20: # interjection
priority = 20
used_accent = accent
if not (
"名" in accent
or "副" in accent
or "代" in accent
or "形動" in accent
or "感" in accent
):
raise Exception(f"unknown word type for: {accent}")
elif priority < 1: # by default use first without qualifier
used_accent = accent
priority = 1
if priority == -1:
raise Exception(
f"no accent selected for {row['kanjiexpr']} {row['midashigo']} {row['accent']}"
)
# Turn accent position into notation used by ACCDB_unicode.csv
accent_position_sound = int(
"".join((ch if ch in "0123456789" else "") for ch in used_accent)
) # get only number from string.
falling_accent_position = accent_position_sound # the accent position is per sound, but some sounds consist of two characters e.g. しゃ. That's why we need this distinction
kana_count = len(row["midashigo"])
if accent_position_sound > kana_count:
sys.stdout.write(
f"the accent position for {row['midashigo']} is greater than the string length ({accent_position_sound})\n"
)
continue
# The accent position in the file looks at a word per sound, however some sounds consist of two kana e.g. しゃ
# correct for that here
i = 0
sound_changer = "ぁぃぅぇぉゃゅょァィゥェォャュョ" # っ isn't in there by design as it is considered a separate sound by accents.tsv.
while i < falling_accent_position - 1:
if row["midashigo"][i] in sound_changer:
falling_accent_position += 1
i += 1
if kana_count > falling_accent_position:
if (
row["midashigo"][falling_accent_position] in sound_changer
or row["midashigo"][falling_accent_position] == "ー"
): # if position after falling_accent_position is as small kana. Not +1 because array starts at 0
falling_accent_position += 1
ac = ""
# create actual pronunciation string
# check if the first sound consist of 2 characters
first_sound_character_count = 1
if kana_count > 1:
if row["midashigo"][1] in sound_changer:
first_sound_character_count = 2
# leading zeroes do not have to be included but can
# for more details on pitch accent see https://en.wikipedia.org/wiki/Japanese_pitch_accent
if accent_position_sound == 0:
ac = "".join(
"0" for i in range(1, first_sound_character_count + 1)
) + "".join(
"1"
for i in range(first_sound_character_count + 1, kana_count + 1)
)
elif accent_position_sound == 1:
ac = (
"".join("1" for i in range(1, first_sound_character_count))
+ "2"
+ "".join(
"0"
for i in range(
first_sound_character_count + 1, kana_count + 1
)
)
)
else:
ac = (
"".join("0" for i in range(1, first_sound_character_count + 1))
+ "".join(
"1"
for i in range(
first_sound_character_count + 1, falling_accent_position
)
)
+ "2"
+ "".join(
"0"
for i in range(falling_accent_position + 1, kana_count + 1)
)
)
translation = row["midashigo"].translate(translate_table)
if (
f"{row['kanjiexpr']}-{translation}" not in self.dict
): # prefer accents.tsv data
self.dict[f"{row['kanjiexpr']}-{translation}"] = {
"nopronouncepos": None,
"nasalsoundpos": None,
"ac": ac,
"source": "accents.tsv",
}
else:
self.dict[f"{row['kanjiexpr']}-{translation}"]["ac"] = ac
self.dict[f"{row['kanjiexpr']}-{translation}"][
"source"
] = "accents.tsv"
def addPronunciation(self, entries):
count = 0
for entry in entries:
for reading in entry.readings:
if reading.re_restr != None:
kanji = reading.re_restr
elif len(entry.kanjis) > 0:
kanji = entry.kanjis[0].keb
else:
kanji = reading.reb
key = f"{kanji}-{reading.reb.translate(translate_table)}"
if key in self.dict:
reading.pronunciation = self.dict[key]
count += 1
else:
reading.pronunciation = None
return count
def format_pronunciations(reading):
"""Format an entry from the data in the original database to something that uses html"""
txt = reading.reb
strlen = len(txt)
if reading.pronunciation == None:
return escape(txt, quote=False)
acclen = len(reading.pronunciation["ac"])
accent = "0" * (strlen - acclen) + reading.pronunciation["ac"]
# Get the nasal positions
nasal = []
if reading.pronunciation["nasalsoundpos"]:
positions = reading.pronunciation["nasalsoundpos"].split("0")
for p in positions:
if p:
nasal.append(int(p))
if not p:
# e.g. "20" would result in ['2', '']
nasal[-1] = nasal[-1] * 10
# Get the no pronounce positions
nopron = []
if reading.pronunciation["nopronouncepos"]:
positions = reading.pronunciation["nopronouncepos"].split("0")
for p in positions:
if p:
nopron.append(int(p))
if not p:
# e.g. "20" would result in ['2', '']
nopron[-1] = nopron[-1] * 10
outstr = ""
if int(accent[0]) > 0:
state = HIGH_STATE
outstr = outstr + '<span class="h">'
else:
state = LOW_STATE
outstr = outstr + '<span class="l">'
for i in range(strlen):
a = int(accent[i])
if state == HIGH_STATE:
if a == 0:
outstr = outstr + '</span><span class="l">'
state = LOW_STATE
else:
if a > 0:
outstr = outstr + '</span><span class="h">'
state = HIGH_STATE
outstr = outstr + escape(txt[i], quote=False)
if (i + 1) in nopron:
# outstr = outstr + "</span>" dont know what to do here
outstr = outstr
if (i + 1) in nasal:
outstr = outstr + '<span class="nas">°</span>'
if a == 2:
outstr = outstr + '</span><span class="l">ꜜ'
state = LOW_STATE
outstr = outstr + "</span>"
return outstr
| 40.448669 | 187 | 0.461083 |
172ba22545f3be08c0129b9a8230506254b800cd | 4,034 | py | Python | json_to_yaml.py | ascdso2020/ascdso-devops-python-tools | a5cfe0579f7c52ac861c92044b3d7215af0b8918 | [
"MIT"
] | null | null | null | json_to_yaml.py | ascdso2020/ascdso-devops-python-tools | a5cfe0579f7c52ac861c92044b3d7215af0b8918 | [
"MIT"
] | null | null | null | json_to_yaml.py | ascdso2020/ascdso-devops-python-tools | a5cfe0579f7c52ac861c92044b3d7215af0b8918 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# vim:ts=4:sts=4:sw=4:et
#
# Author: Hari Sekhon
# Date: 2019-12-19 17:54:21 +0000 (Thu, 19 Dec 2019)
#
# https://github.com/HariSekhon/DevOps-Python-tools
#
# License: see accompanying Hari Sekhon LICENSE file
#
# If you're using my code you're welcome to connect with me on LinkedIn and optionally send me feedback
# to help improve or steer this or other code I publish
#
# http://www.linkedin.com/in/harisekhon
#
"""
Tool to convert JSON to YAML
Reads any given files as JSON and prints the equivalent YAML to stdout for piping or redirecting to a file.
Directories if given are detected and recursed, processing all files in the directory tree ending in a .json suffix.
Works like a standard unix filter program - if no files are passed as arguments or '-' is passed then reads from
standard input.
Written to convert old AWS CloudFormation json templates to yaml
See also:
json2yaml.sh - https://github.com/HariSekhon/DevOps-Bash-tools
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
#from __future__ import unicode_literals
import json
import os
import re
import sys
import yaml
libdir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'pylib'))
sys.path.append(libdir)
try:
# pylint: disable=wrong-import-position
from harisekhon.utils import die, ERRORS, log, log_option
from harisekhon import CLI
except ImportError as _:
print('module import failed: %s' % _, file=sys.stderr)
print("Did you remember to build the project by running 'make'?", file=sys.stderr)
print("Alternatively perhaps you tried to copy this program out without it's adjacent libraries?", file=sys.stderr)
sys.exit(4)
__author__ = 'Hari Sekhon'
__version__ = '0.2.0'
class JsonToYaml(CLI):
def __init__(self):
# Python 2.x
super(JsonToYaml, self).__init__()
# Python 3.x
# super().__init__()
self.re_json_suffix = re.compile(r'.*\.json$', re.I)
@staticmethod
def json_to_yaml(content, filepath=None):
try:
_ = json.loads(content)
except (KeyError, ValueError) as _:
file_detail = ''
if filepath is not None:
file_detail = ' in file \'{0}\''.format(filepath)
die("Failed to parse JSON{0}: {1}".format(file_detail, _))
return yaml.safe_dump(_)
def run(self):
if not self.args:
self.args.append('-')
for arg in self.args:
if arg == '-':
continue
if not os.path.exists(arg):
print("'%s' not found" % arg)
sys.exit(ERRORS['WARNING'])
if os.path.isfile(arg):
log_option('file', arg)
elif os.path.isdir(arg):
log_option('directory', arg)
else:
die("path '%s' could not be determined as either a file or directory" % arg)
for arg in self.args:
self.process_path(arg)
def process_path(self, path):
if path == '-' or os.path.isfile(path):
self.process_file(path)
elif os.path.isdir(path):
for root, _, files in os.walk(path):
for filename in files:
filepath = os.path.join(root, filename)
if self.re_json_suffix.match(filepath):
self.process_file(filepath)
else:
die("failed to determine if path '%s' is a file or directory" % path)
def process_file(self, filepath):
log.debug('processing filepath \'%s\'', filepath)
if filepath == '-':
filepath = '<STDIN>'
if filepath == '<STDIN>':
print(self.json_to_yaml(sys.stdin.read()))
else:
with open(filepath) as _:
content = _.read()
print('---')
print(self.json_to_yaml(content, filepath=filepath))
if __name__ == '__main__':
JsonToYaml().main()
| 31.515625 | 119 | 0.618741 |
eb92c900fc9829c9316c06788ff01e987508921b | 7,776 | py | Python | models/modules/spade_modules/base_spade_distiller_modules.py | NguyenHoangAn0511/gan-compression | 6512c067d4adebc7451635991418b54ab76dd711 | [
"BSD-3-Clause"
] | 1,005 | 2020-03-20T04:13:59.000Z | 2022-03-30T01:16:45.000Z | models/modules/spade_modules/base_spade_distiller_modules.py | shadow2496/gan-compression | 9f3a2b51bedca040cc7d31c60ca71a77138f2c81 | [
"BSD-3-Clause"
] | 94 | 2020-03-20T08:36:57.000Z | 2022-03-12T00:20:02.000Z | models/modules/spade_modules/base_spade_distiller_modules.py | shadow2496/gan-compression | 9f3a2b51bedca040cc7d31c60ca71a77138f2c81 | [
"BSD-3-Clause"
] | 147 | 2020-03-20T04:49:35.000Z | 2022-03-23T10:44:25.000Z | import copy
import os
import torch
from torch import nn
from models import networks
from models.modules.loss import GANLoss, VGGLoss
from models.modules.spade_modules.spade_model_modules import SPADEModelModules
from models.modules.super_modules import SuperConv2d
from utils import util
class BaseSPADEDistillerModules(SPADEModelModules):
def create_option(self, role):
assert role in ['teacher', 'student', 'pretrained']
opt = copy.deepcopy(self.opt)
opt.ngf = getattr(self.opt, '%s_ngf' % role)
opt.norm_G = getattr(self.opt, '%s_norm_G' % role)
return opt
def __init__(self, opt):
assert opt.isTrain
opt = copy.deepcopy(opt)
if len(opt.gpu_ids) > 0:
opt.gpu_ids = opt.gpu_ids[:1]
self.gpu_ids = opt.gpu_ids
super(SPADEModelModules, self).__init__()
self.opt = opt
self.model_names = ['G_student', 'G_teacher', 'D']
teacher_opt = self.create_option('teacher')
self.netG_teacher = networks.define_G(opt.teacher_netG, gpu_ids=self.gpu_ids, opt=teacher_opt)
student_opt = self.create_option('student')
self.netG_student = networks.define_G(opt.student_netG, init_type=opt.init_type,
init_gain=opt.init_gain, gpu_ids=self.gpu_ids, opt=student_opt)
if hasattr(opt, 'distiller'):
pretrained_opt = self.create_option('pretrained')
self.netG_pretrained = networks.define_G(opt.pretrained_netG, gpu_ids=self.gpu_ids, opt=pretrained_opt)
self.netD = networks.define_D(opt.netD, init_type=opt.init_type,
init_gain=opt.init_gain, gpu_ids=self.gpu_ids, opt=opt)
self.mapping_layers = ['head_0', 'G_middle_1', 'up_1']
self.netAs = nn.ModuleList()
for i, mapping_layer in enumerate(self.mapping_layers):
if mapping_layer != 'up_1':
fs, ft = opt.student_ngf * 16, opt.teacher_ngf * 16
else:
fs, ft = opt.student_ngf * 4, opt.teacher_ngf * 4
if hasattr(opt, 'distiller'):
netA = nn.Conv2d(in_channels=fs, out_channels=ft, kernel_size=1)
else:
netA = SuperConv2d(in_channels=fs, out_channels=ft, kernel_size=1)
networks.init_net(netA, opt.init_type, opt.init_gain, self.gpu_ids)
self.netAs.append(netA)
self.criterionGAN = GANLoss(opt.gan_mode)
self.criterionFeat = nn.L1Loss()
self.criterionVGG = VGGLoss()
self.optimizers = []
self.netG_teacher.eval()
self.config = None
def create_optimizers(self):
if self.opt.no_TTUR:
beta1, beta2 = self.opt.beta1, self.opt.beta2
G_lr, D_lr = self.opt.lr, self.opt.lr
else:
beta1, beta2 = 0, 0.9
G_lr, D_lr = self.opt.lr / 2, self.opt.lr * 2
G_params = list(self.netG_student.parameters())
for netA in self.netAs:
G_params += list(netA.parameters())
optimizer_G = torch.optim.Adam(G_params, lr=G_lr, betas=(beta1, beta2))
optimizer_D = torch.optim.Adam(list(self.netD.parameters()), lr=D_lr, betas=(beta1, beta2))
return optimizer_G, optimizer_D
def forward(self, input_semantics, real_B=None, mode='generate_fake'):
if self.config is not None:
self.netG_student.config = self.config
if mode == 'generate_fake':
with torch.no_grad():
Tfake_B = self.netG_teacher(input_semantics)
Sfake_B = self.netG_student(input_semantics)
return Tfake_B, Sfake_B
elif mode == 'G_loss':
assert real_B is not None
return self.compute_G_loss(input_semantics, real_B)
elif mode == 'D_loss':
assert real_B is not None
return self.compute_D_loss(input_semantics, real_B)
elif mode == 'calibrate':
with torch.no_grad():
self.netG_student(input_semantics)
return
else:
raise NotImplementedError('Unknown forward mode [%s]!!!' % mode)
def profile(self, input_semantics, config=None):
raise NotImplementedError('The distiller is only for training!!!')
def calc_distill_loss(self, Tacts, Sacts):
raise NotImplementedError
def compute_G_loss(self, input_semantics, real_B):
with torch.no_grad():
Tfake_B, Tacts = self.netG_teacher(input_semantics, mapping_layers=self.mapping_layers)
Sfake_B, Sacts = self.netG_student(input_semantics, mapping_layers=self.mapping_layers)
loss_G_distill, losses = self.calc_distill_loss(Tacts, Sacts)
pred_fake, pred_real = self.discriminate(input_semantics, Sfake_B, real_B)
loss_G_gan = self.criterionGAN(pred_fake, True, for_discriminator=False) * self.opt.lambda_gan
num_D = len(pred_fake)
loss_G_feat = 0
for i in range(num_D):
num_intermediate_outputs = len(pred_fake[i]) - 1
for j in range(num_intermediate_outputs): # for each layer output
unweighted_loss = self.criterionFeat(
pred_fake[i][j], pred_real[i][j].detach())
loss_G_feat += unweighted_loss * self.opt.lambda_feat / num_D
loss_G_vgg = self.criterionVGG(Sfake_B, real_B) * self.opt.lambda_vgg
loss_G = loss_G_gan + loss_G_distill + loss_G_feat + loss_G_vgg
losses.update({'loss_G': loss_G, 'G_gan': loss_G_gan,
'G_distill': loss_G_distill,
'G_feat': loss_G_feat, 'G_vgg': loss_G_vgg})
return losses
def compute_D_loss(self, input_semantics, real_B):
with torch.no_grad():
fake_B = self.netG_student(input_semantics)
pred_fake, pred_real = self.discriminate(input_semantics, fake_B, real_B)
loss_D_fake = self.criterionGAN(pred_fake, False, for_discriminator=True)
loss_D_real = self.criterionGAN(pred_real, True, for_discriminator=True)
loss_D = loss_D_fake + loss_D_real
losses = {'loss_D': loss_D, 'D_fake': loss_D_fake, 'D_real': loss_D_real}
return losses
def load_networks(self, verbose=True):
util.load_network(self.netG_teacher, self.opt.restore_teacher_G_path, verbose)
if self.opt.restore_student_G_path is not None:
util.load_network(self.netG_student, self.opt.restore_student_G_path, verbose)
if self.opt.restore_D_path is not None:
util.load_network(self.netD, self.opt.restore_D_path, verbose)
if self.opt.restore_A_path is not None:
for i, netA in enumerate(self.netAs):
path = '%s-%d.pth' % (self.opt.restore_A_path, i)
util.load_network(netA, path, verbose)
def save_networks(self, epoch, save_dir):
def save_net(net, save_path):
if len(self.gpu_ids) > 0 and torch.cuda.is_available():
torch.save(net.cpu().state_dict(), save_path)
net.cuda(self.gpu_ids[0])
else:
torch.save(net.cpu().state_dict(), save_path)
save_filename = '%s_net_%s.pth' % (epoch, 'G')
save_path = os.path.join(save_dir, save_filename)
net = getattr(self, 'net%s_student' % 'G')
save_net(net, save_path)
save_filename = '%s_net_%s.pth' % (epoch, 'D')
save_path = os.path.join(save_dir, save_filename)
net = getattr(self, 'net%s' % 'D')
save_net(net, save_path)
for i, net in enumerate(self.netAs):
save_filename = '%s_net_%s-%d.pth' % (epoch, 'A', i)
save_path = os.path.join(save_dir, save_filename)
save_net(net, save_path)
| 46.285714 | 115 | 0.631044 |
c928426a5aad118124ad03b82ebfaa70844cd770 | 126 | py | Python | tests/ocd_backend/__init__.py | ngi-nix/poliscoops | 491d12f83a44afbb4f1ee525b29ae70dc564e0f7 | [
"CC-BY-4.0"
] | 6 | 2020-04-08T08:23:07.000Z | 2021-12-05T09:56:14.000Z | tests/ocd_backend/__init__.py | ngi-nix/poliscoops | 491d12f83a44afbb4f1ee525b29ae70dc564e0f7 | [
"CC-BY-4.0"
] | 65 | 2020-04-07T08:16:31.000Z | 2022-02-19T00:18:24.000Z | tests/ocd_backend/__init__.py | openstate/coronalert | 9aa24cc0ea75b85e9bda0cfcd6ff592a2c61c95e | [
"CC-BY-4.0"
] | 1 | 2021-08-03T09:49:14.000Z | 2021-08-03T09:49:14.000Z | # from .extractors import *
# from .items import *
# from .transformers import *
# from .loaders import *
from .misc import *
| 21 | 29 | 0.698413 |
3d4a70338b6096fbcf2d0026913847cd72c3bcdb | 3,905 | gyp | Python | chromium/third_party/WebKit/Source/wtf/wtf_tests.gyp | wedataintelligence/vivaldi-source | 22a46f2c969f6a0b7ca239a05575d1ea2738768c | [
"BSD-3-Clause"
] | null | null | null | chromium/third_party/WebKit/Source/wtf/wtf_tests.gyp | wedataintelligence/vivaldi-source | 22a46f2c969f6a0b7ca239a05575d1ea2738768c | [
"BSD-3-Clause"
] | null | null | null | chromium/third_party/WebKit/Source/wtf/wtf_tests.gyp | wedataintelligence/vivaldi-source | 22a46f2c969f6a0b7ca239a05575d1ea2738768c | [
"BSD-3-Clause"
] | null | null | null | # Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
{
'includes': [
'../build/win/precompile.gypi',
'wtf.gypi',
],
'targets': [
{
'target_name': 'wtf_unittests',
'type': 'executable',
'dependencies': [
'wtf_unittest_helpers',
'wtf.gyp:wtf',
'../config.gyp:unittest_config',
'<(DEPTH)/base/base.gyp:test_support_base',
],
'sources': [
'testing/RunAllTests.cpp',
'<@(wtf_unittest_files)',
],
# Disable c4267 warnings until we fix size_t to int truncations.
'msvs_disabled_warnings': [4127, 4510, 4512, 4610, 4706, 4068, 4267],
'conditions': [
['os_posix==1 and OS!="mac" and OS!="android" and OS!="ios" and use_allocator!="none"', {
'dependencies': [
'<(DEPTH)/base/base.gyp:base',
'<(DEPTH)/base/allocator/allocator.gyp:allocator',
],
}],
['OS=="android"', {
'type': 'shared_library',
'dependencies': [
'<(DEPTH)/testing/android/native_test.gyp:native_test_native_code',
'<(DEPTH)/tools/android/forwarder2/forwarder.gyp:forwarder2',
],
}],
]
},
{
'target_name': 'wtf_unittest_helpers',
'type': '<(component)',
'include_dirs': [
'..',
],
'dependencies': [
'wtf.gyp:wtf',
],
'defines': [
'WTF_UNITTEST_HELPERS_IMPLEMENTATION=1',
],
'sources': [
'<@(wtf_unittest_helper_files)',
],
},
],
'conditions': [
['OS=="android" and gtest_target_type=="shared_library"', {
'targets': [{
'target_name': 'wtf_unittests_apk',
'type': 'none',
'dependencies': [
'<(DEPTH)/base/base.gyp:base_java',
'<(DEPTH)/net/net.gyp:net_java',
'wtf_unittests',
],
'variables': {
'test_suite_name': 'wtf_unittests',
},
'includes': [ '../../../../build/apk_test.gypi' ],
}],
}],
['test_isolation_mode != "noop"', {
'targets': [
{
'target_name': 'wtf_unittests_run',
'type': 'none',
'dependencies': [
'wtf_unittests',
],
'includes': [
'../../../../build/isolate.gypi',
],
'sources': [
'wtf_unittests.isolate',
],
},
],
}],
],
}
| 33.376068 | 97 | 0.596927 |
4942995ade816d865252aee3699bd61f4c14f725 | 15,546 | py | Python | tests/hazmat/primitives/utils.py | thatch/cryptography | 0bdcf3d62bcb2051f82a39daf46cc67e43a8f465 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | tests/hazmat/primitives/utils.py | thatch/cryptography | 0bdcf3d62bcb2051f82a39daf46cc67e43a8f465 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | tests/hazmat/primitives/utils.py | thatch/cryptography | 0bdcf3d62bcb2051f82a39daf46cc67e43a8f465 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import binascii
import itertools
import os
import pytest
from cryptography_patched.exceptions import (
AlreadyFinalized, AlreadyUpdated, InvalidSignature, InvalidTag,
NotYetFinalized
)
from cryptography_patched.hazmat.primitives import hashes, hmac
from cryptography_patched.hazmat.primitives.asymmetric import rsa
from cryptography_patched.hazmat.primitives.ciphers import Cipher
from cryptography_patched.hazmat.primitives.kdf.hkdf import HKDF, HKDFExpand
from cryptography_patched.hazmat.primitives.kdf.kbkdf import (
CounterLocation, KBKDFHMAC, Mode
)
from cryptography_patched.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from ...utils import load_vectors_from_file
def _load_all_params(path, file_names, param_loader):
all_params = []
for file_name in file_names:
all_params.extend(
load_vectors_from_file(os.path.join(path, file_name), param_loader)
)
return all_params
def generate_encrypt_test(param_loader, path, file_names, cipher_factory,
mode_factory):
all_params = _load_all_params(path, file_names, param_loader)
@pytest.mark.parametrize("params", all_params)
def test_encryption(self, backend, params):
encrypt_test(backend, cipher_factory, mode_factory, params)
return test_encryption
def encrypt_test(backend, cipher_factory, mode_factory, params):
assert backend.cipher_supported(
cipher_factory(**params), mode_factory(**params)
)
plaintext = params["plaintext"]
ciphertext = params["ciphertext"]
cipher = Cipher(
cipher_factory(**params),
mode_factory(**params),
backend=backend
)
encryptor = cipher.encryptor()
actual_ciphertext = encryptor.update(binascii.unhexlify(plaintext))
actual_ciphertext += encryptor.finalize()
assert actual_ciphertext == binascii.unhexlify(ciphertext)
decryptor = cipher.decryptor()
actual_plaintext = decryptor.update(binascii.unhexlify(ciphertext))
actual_plaintext += decryptor.finalize()
assert actual_plaintext == binascii.unhexlify(plaintext)
def generate_aead_test(param_loader, path, file_names, cipher_factory,
mode_factory):
all_params = _load_all_params(path, file_names, param_loader)
@pytest.mark.parametrize("params", all_params)
def test_aead(self, backend, params):
aead_test(backend, cipher_factory, mode_factory, params)
return test_aead
def aead_test(backend, cipher_factory, mode_factory, params):
if params.get("pt") is not None:
plaintext = params["pt"]
ciphertext = params["ct"]
aad = params["aad"]
if params.get("fail") is True:
cipher = Cipher(
cipher_factory(binascii.unhexlify(params["key"])),
mode_factory(binascii.unhexlify(params["iv"]),
binascii.unhexlify(params["tag"]),
len(binascii.unhexlify(params["tag"]))),
backend
)
decryptor = cipher.decryptor()
decryptor.authenticate_additional_data(binascii.unhexlify(aad))
actual_plaintext = decryptor.update(binascii.unhexlify(ciphertext))
with pytest.raises(InvalidTag):
decryptor.finalize()
else:
cipher = Cipher(
cipher_factory(binascii.unhexlify(params["key"])),
mode_factory(binascii.unhexlify(params["iv"]), None),
backend
)
encryptor = cipher.encryptor()
encryptor.authenticate_additional_data(binascii.unhexlify(aad))
actual_ciphertext = encryptor.update(binascii.unhexlify(plaintext))
actual_ciphertext += encryptor.finalize()
tag_len = len(binascii.unhexlify(params["tag"]))
assert binascii.hexlify(encryptor.tag[:tag_len]) == params["tag"]
cipher = Cipher(
cipher_factory(binascii.unhexlify(params["key"])),
mode_factory(binascii.unhexlify(params["iv"]),
binascii.unhexlify(params["tag"]),
min_tag_length=tag_len),
backend
)
decryptor = cipher.decryptor()
decryptor.authenticate_additional_data(binascii.unhexlify(aad))
actual_plaintext = decryptor.update(binascii.unhexlify(ciphertext))
actual_plaintext += decryptor.finalize()
assert actual_plaintext == binascii.unhexlify(plaintext)
def generate_stream_encryption_test(param_loader, path, file_names,
cipher_factory):
all_params = _load_all_params(path, file_names, param_loader)
@pytest.mark.parametrize("params", all_params)
def test_stream_encryption(self, backend, params):
stream_encryption_test(backend, cipher_factory, params)
return test_stream_encryption
def stream_encryption_test(backend, cipher_factory, params):
plaintext = params["plaintext"]
ciphertext = params["ciphertext"]
offset = params["offset"]
cipher = Cipher(cipher_factory(**params), None, backend=backend)
encryptor = cipher.encryptor()
# throw away offset bytes
encryptor.update(b"\x00" * int(offset))
actual_ciphertext = encryptor.update(binascii.unhexlify(plaintext))
actual_ciphertext += encryptor.finalize()
assert actual_ciphertext == binascii.unhexlify(ciphertext)
decryptor = cipher.decryptor()
decryptor.update(b"\x00" * int(offset))
actual_plaintext = decryptor.update(binascii.unhexlify(ciphertext))
actual_plaintext += decryptor.finalize()
assert actual_plaintext == binascii.unhexlify(plaintext)
def generate_hash_test(param_loader, path, file_names, hash_cls):
all_params = _load_all_params(path, file_names, param_loader)
@pytest.mark.parametrize("params", all_params)
def test_hash(self, backend, params):
hash_test(backend, hash_cls, params)
return test_hash
def hash_test(backend, algorithm, params):
msg, md = params
m = hashes.Hash(algorithm, backend=backend)
m.update(binascii.unhexlify(msg))
expected_md = md.replace(" ", "").lower().encode("ascii")
assert m.finalize() == binascii.unhexlify(expected_md)
def generate_base_hash_test(algorithm, digest_size):
def test_base_hash(self, backend):
base_hash_test(backend, algorithm, digest_size)
return test_base_hash
def base_hash_test(backend, algorithm, digest_size):
m = hashes.Hash(algorithm, backend=backend)
assert m.algorithm.digest_size == digest_size
m_copy = m.copy()
assert m != m_copy
assert m._ctx != m_copy._ctx
m.update(b"abc")
copy = m.copy()
copy.update(b"123")
m.update(b"123")
assert copy.finalize() == m.finalize()
def generate_base_hmac_test(hash_cls):
def test_base_hmac(self, backend):
base_hmac_test(backend, hash_cls)
return test_base_hmac
def base_hmac_test(backend, algorithm):
key = b"ab"
h = hmac.HMAC(binascii.unhexlify(key), algorithm, backend=backend)
h_copy = h.copy()
assert h != h_copy
assert h._ctx != h_copy._ctx
def generate_hmac_test(param_loader, path, file_names, algorithm):
all_params = _load_all_params(path, file_names, param_loader)
@pytest.mark.parametrize("params", all_params)
def test_hmac(self, backend, params):
hmac_test(backend, algorithm, params)
return test_hmac
def hmac_test(backend, algorithm, params):
msg, md, key = params
h = hmac.HMAC(binascii.unhexlify(key), algorithm, backend=backend)
h.update(binascii.unhexlify(msg))
assert h.finalize() == binascii.unhexlify(md.encode("ascii"))
def generate_pbkdf2_test(param_loader, path, file_names, algorithm):
all_params = _load_all_params(path, file_names, param_loader)
@pytest.mark.parametrize("params", all_params)
def test_pbkdf2(self, backend, params):
pbkdf2_test(backend, algorithm, params)
return test_pbkdf2
def pbkdf2_test(backend, algorithm, params):
# Password and salt can contain \0, which should be loaded as a null char.
# The NIST loader loads them as literal strings so we replace with the
# proper value.
kdf = PBKDF2HMAC(
algorithm,
int(params["length"]),
params["salt"],
int(params["iterations"]),
backend
)
derived_key = kdf.derive(params["password"])
assert binascii.hexlify(derived_key) == params["derived_key"]
def generate_aead_exception_test(cipher_factory, mode_factory):
def test_aead_exception(self, backend):
aead_exception_test(backend, cipher_factory, mode_factory)
return test_aead_exception
def aead_exception_test(backend, cipher_factory, mode_factory):
cipher = Cipher(
cipher_factory(binascii.unhexlify(b"0" * 32)),
mode_factory(binascii.unhexlify(b"0" * 24)),
backend
)
encryptor = cipher.encryptor()
encryptor.update(b"a" * 16)
with pytest.raises(NotYetFinalized):
encryptor.tag
with pytest.raises(AlreadyUpdated):
encryptor.authenticate_additional_data(b"b" * 16)
encryptor.finalize()
with pytest.raises(AlreadyFinalized):
encryptor.authenticate_additional_data(b"b" * 16)
with pytest.raises(AlreadyFinalized):
encryptor.update(b"b" * 16)
with pytest.raises(AlreadyFinalized):
encryptor.finalize()
cipher = Cipher(
cipher_factory(binascii.unhexlify(b"0" * 32)),
mode_factory(binascii.unhexlify(b"0" * 24), b"0" * 16),
backend
)
decryptor = cipher.decryptor()
decryptor.update(b"a" * 16)
with pytest.raises(AttributeError):
decryptor.tag
def generate_aead_tag_exception_test(cipher_factory, mode_factory):
def test_aead_tag_exception(self, backend):
aead_tag_exception_test(backend, cipher_factory, mode_factory)
return test_aead_tag_exception
def aead_tag_exception_test(backend, cipher_factory, mode_factory):
cipher = Cipher(
cipher_factory(binascii.unhexlify(b"0" * 32)),
mode_factory(binascii.unhexlify(b"0" * 24)),
backend
)
with pytest.raises(ValueError):
mode_factory(binascii.unhexlify(b"0" * 24), b"000")
with pytest.raises(ValueError):
mode_factory(binascii.unhexlify(b"0" * 24), b"000000", 2)
cipher = Cipher(
cipher_factory(binascii.unhexlify(b"0" * 32)),
mode_factory(binascii.unhexlify(b"0" * 24), b"0" * 16),
backend
)
with pytest.raises(ValueError):
cipher.encryptor()
def hkdf_derive_test(backend, algorithm, params):
hkdf = HKDF(
algorithm,
int(params["l"]),
salt=binascii.unhexlify(params["salt"]) or None,
info=binascii.unhexlify(params["info"]) or None,
backend=backend
)
okm = hkdf.derive(binascii.unhexlify(params["ikm"]))
assert okm == binascii.unhexlify(params["okm"])
def hkdf_extract_test(backend, algorithm, params):
hkdf = HKDF(
algorithm,
int(params["l"]),
salt=binascii.unhexlify(params["salt"]) or None,
info=binascii.unhexlify(params["info"]) or None,
backend=backend
)
prk = hkdf._extract(binascii.unhexlify(params["ikm"]))
assert prk == binascii.unhexlify(params["prk"])
def hkdf_expand_test(backend, algorithm, params):
hkdf = HKDFExpand(
algorithm,
int(params["l"]),
info=binascii.unhexlify(params["info"]) or None,
backend=backend
)
okm = hkdf.derive(binascii.unhexlify(params["prk"]))
assert okm == binascii.unhexlify(params["okm"])
def generate_hkdf_test(param_loader, path, file_names, algorithm):
all_params = _load_all_params(path, file_names, param_loader)
all_tests = [hkdf_extract_test, hkdf_expand_test, hkdf_derive_test]
@pytest.mark.parametrize(
("params", "hkdf_test"),
itertools.product(all_params, all_tests)
)
def test_hkdf(self, backend, params, hkdf_test):
hkdf_test(backend, algorithm, params)
return test_hkdf
def generate_kbkdf_counter_mode_test(param_loader, path, file_names):
all_params = _load_all_params(path, file_names, param_loader)
@pytest.mark.parametrize("params", all_params)
def test_kbkdf(self, backend, params):
kbkdf_counter_mode_test(backend, params)
return test_kbkdf
def kbkdf_counter_mode_test(backend, params):
supported_algorithms = {
'hmac_sha1': hashes.SHA1,
'hmac_sha224': hashes.SHA224,
'hmac_sha256': hashes.SHA256,
'hmac_sha384': hashes.SHA384,
'hmac_sha512': hashes.SHA512,
}
supported_counter_locations = {
"before_fixed": CounterLocation.BeforeFixed,
"after_fixed": CounterLocation.AfterFixed,
}
algorithm = supported_algorithms.get(params.get('prf'))
if algorithm is None or not backend.hmac_supported(algorithm()):
pytest.skip("KBKDF does not support algorithm: {}".format(
params.get('prf')
))
ctr_loc = supported_counter_locations.get(params.get("ctrlocation"))
if ctr_loc is None or not isinstance(ctr_loc, CounterLocation):
pytest.skip("Does not support counter location: {}".format(
params.get('ctrlocation')
))
ctrkdf = KBKDFHMAC(
algorithm(),
Mode.CounterMode,
params['l'] // 8,
params['rlen'] // 8,
None,
ctr_loc,
None,
None,
binascii.unhexlify(params['fixedinputdata']),
backend=backend)
ko = ctrkdf.derive(binascii.unhexlify(params['ki']))
assert binascii.hexlify(ko) == params["ko"]
def generate_rsa_verification_test(param_loader, path, file_names, hash_alg,
pad_factory):
all_params = _load_all_params(path, file_names, param_loader)
all_params = [i for i in all_params
if i["algorithm"] == hash_alg.name.upper()]
@pytest.mark.parametrize("params", all_params)
def test_rsa_verification(self, backend, params):
rsa_verification_test(backend, params, hash_alg, pad_factory)
return test_rsa_verification
def rsa_verification_test(backend, params, hash_alg, pad_factory):
public_numbers = rsa.RSAPublicNumbers(
e=params["public_exponent"],
n=params["modulus"]
)
public_key = public_numbers.public_key(backend)
pad = pad_factory(params, hash_alg)
signature = binascii.unhexlify(params["s"])
msg = binascii.unhexlify(params["msg"])
if params["fail"]:
with pytest.raises(InvalidSignature):
public_key.verify(
signature,
msg,
pad,
hash_alg
)
else:
public_key.verify(
signature,
msg,
pad,
hash_alg
)
def _check_rsa_private_numbers(skey):
assert skey
pkey = skey.public_numbers
assert pkey
assert pkey.e
assert pkey.n
assert skey.d
assert skey.p * skey.q == pkey.n
assert skey.dmp1 == rsa.rsa_crt_dmp1(skey.d, skey.p)
assert skey.dmq1 == rsa.rsa_crt_dmq1(skey.d, skey.q)
assert skey.iqmp == rsa.rsa_crt_iqmp(skey.p, skey.q)
def _check_dsa_private_numbers(skey):
assert skey
pkey = skey.public_numbers
params = pkey.parameter_numbers
assert pow(params.g, skey.x, params.p) == pkey.y
| 32.866808 | 79 | 0.680368 |
60b7fe5b1f843c8d6d2b4280c12bb90064593f0d | 3,498 | py | Python | flags/conditions/conditions.py | edomora97/django-flags | fb22253d77ad0a8dbcee4ebfb78b185348e0c5cf | [
"CC0-1.0"
] | null | null | null | flags/conditions/conditions.py | edomora97/django-flags | fb22253d77ad0a8dbcee4ebfb78b185348e0c5cf | [
"CC0-1.0"
] | null | null | null | flags/conditions/conditions.py | edomora97/django-flags | fb22253d77ad0a8dbcee4ebfb78b185348e0c5cf | [
"CC0-1.0"
] | null | null | null | import re
from distutils.util import strtobool
from django.contrib.auth import get_user_model
from django.utils import dateparse, timezone
from flags.conditions.registry import register
from flags.conditions.validators import (
validate_boolean,
validate_date,
validate_parameter,
validate_path_re,
validate_user,
)
class RequiredForCondition(AttributeError):
"""Raised when a kwarg that is required for a condition is not given"""
@register("boolean", validator=validate_boolean)
def boolean_condition(condition, **kwargs):
"""Basic boolean check"""
try:
return strtobool(condition.strip())
except AttributeError:
return bool(condition)
@register("user", validator=validate_user)
def user_condition(username, request=None, **kwargs):
"""Does request.user match the expected username?"""
if request is None:
raise RequiredForCondition("request is required for condition 'user'")
if request.user.is_anonymous:
return False
return getattr(request.user, get_user_model().USERNAME_FIELD) == username
@register("anonymous", validator=validate_boolean)
def anonymous_condition(boolean_value, request=None, **kwargs):
"""request.user an anonymous user, true or false based on boolean_value"""
if request is None:
raise RequiredForCondition(
"request is required for condition 'anonymous'"
)
is_anonymous = bool(request.user.is_anonymous)
try:
return strtobool(boolean_value.strip().lower()) == is_anonymous
except AttributeError:
return bool(boolean_value) == is_anonymous
@register("parameter", validator=validate_parameter)
def parameter_condition(param_name, request=None, **kwargs):
"""Is the parameter name part of the GET parameters?"""
if request is None:
raise RequiredForCondition(
"request is required for condition 'parameter'"
)
try:
param_name, param_value = param_name.split("=")
except ValueError:
param_value = "True"
return request.GET.get(param_name) == param_value
@register("path matches", validator=validate_path_re)
def path_condition(pattern, request=None, **kwargs):
"""Does the request's path match the given regular expression?"""
if request is None:
raise RequiredForCondition("request is required for condition 'path'")
return bool(re.search(pattern, request.path))
@register("after date", validator=validate_date)
def after_date_condition(date_or_str, **kwargs):
"""Is the the current date after the given date?
date_or_str is either a date object or an ISO 8601 string"""
try:
date = dateparse.parse_datetime(date_or_str)
except TypeError:
date = date_or_str
now = timezone.now()
try:
date_test = now > date
except TypeError:
date_test = False
return date_test
# Keeping the old name of this condition function around for
# backwards-compatibility.
date_condition = after_date_condition
@register("before date", validator=validate_date)
def before_date_condition(date_or_str, **kwargs):
"""Is the current date before the given date?
date_or_str is either a date object or an ISO 8601 string"""
try:
date = dateparse.parse_datetime(date_or_str)
except TypeError:
date = date_or_str
now = timezone.now()
try:
date_test = now < date
except TypeError:
date_test = False
return date_test
| 28.439024 | 78 | 0.707547 |
a041c8ec0934db7d21e1413266fe0e3a7bd739bc | 645 | py | Python | tests/staticfile.py | movermeyer/tangelo | 470034ee9b3d7a01becc1ce5fddc7adc1d5263ef | [
"Apache-2.0"
] | 40 | 2015-01-09T02:56:33.000Z | 2019-03-01T05:34:13.000Z | tests/staticfile.py | movermeyer/tangelo | 470034ee9b3d7a01becc1ce5fddc7adc1d5263ef | [
"Apache-2.0"
] | 98 | 2015-01-05T12:51:29.000Z | 2019-01-23T20:16:48.000Z | tests/staticfile.py | movermeyer/tangelo | 470034ee9b3d7a01becc1ce5fddc7adc1d5263ef | [
"Apache-2.0"
] | 21 | 2015-01-05T19:11:49.000Z | 2020-08-19T04:16:16.000Z | import nose
import requests
import fixture
@nose.with_setup(fixture.start_tangelo, fixture.stop_tangelo)
def test_static_file():
r = requests.get(fixture.url("static_file"))
print r.text
assert r.ok
assert r.status_code == 200
assert r.text == "Infinite Diversity in Infinite Combinations\n"
assert "text/plain" in r.headers["Content-Type"]
r = requests.get(fixture.url(
"static_file?mime_type=application/octet-stream"))
assert r.ok
assert r.status_code == 200
assert r.text == "Infinite Diversity in Infinite Combinations\n"
assert "application/octet-stream" in r.headers["Content-Type"]
| 30.714286 | 68 | 0.717829 |
ce85b8e61eb53f4c86bb47cdb1cdc330392f0539 | 1,510 | py | Python | alipay/aop/api/domain/AlipayCommerceIotDapplyRefundCreateModel.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/AlipayCommerceIotDapplyRefundCreateModel.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/AlipayCommerceIotDapplyRefundCreateModel.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class AlipayCommerceIotDapplyRefundCreateModel(object):
def __init__(self):
self._asset_apply_order_id = None
self._memo = None
@property
def asset_apply_order_id(self):
return self._asset_apply_order_id
@asset_apply_order_id.setter
def asset_apply_order_id(self, value):
self._asset_apply_order_id = value
@property
def memo(self):
return self._memo
@memo.setter
def memo(self, value):
self._memo = value
def to_alipay_dict(self):
params = dict()
if self.asset_apply_order_id:
if hasattr(self.asset_apply_order_id, 'to_alipay_dict'):
params['asset_apply_order_id'] = self.asset_apply_order_id.to_alipay_dict()
else:
params['asset_apply_order_id'] = self.asset_apply_order_id
if self.memo:
if hasattr(self.memo, 'to_alipay_dict'):
params['memo'] = self.memo.to_alipay_dict()
else:
params['memo'] = self.memo
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayCommerceIotDapplyRefundCreateModel()
if 'asset_apply_order_id' in d:
o.asset_apply_order_id = d['asset_apply_order_id']
if 'memo' in d:
o.memo = d['memo']
return o
| 26.964286 | 91 | 0.621192 |
14bc882dcf240935ad849e179ae0e9bc3d5de861 | 545 | py | Python | forest-example/classify_user.py | RedisLabs/spark-redis-ml | c43b817f170cdf3f2f5e2c6a92327294241bf0c3 | [
"BSD-3-Clause"
] | 70 | 2016-10-26T15:48:40.000Z | 2020-06-09T02:50:08.000Z | forest-example/classify_user.py | RedisLabs/spark-redis-ml | c43b817f170cdf3f2f5e2c6a92327294241bf0c3 | [
"BSD-3-Clause"
] | 5 | 2017-02-04T01:46:55.000Z | 2017-07-17T09:54:41.000Z | forest-example/classify_user.py | RedisLabs/spark-redis-ml | c43b817f170cdf3f2f5e2c6a92327294241bf0c3 | [
"BSD-3-Clause"
] | 15 | 2016-11-02T14:59:03.000Z | 2020-06-10T03:17:40.000Z | #!/usr/bin/python
import operator
import redis
config = {"host":"localhost", "port":6379}
r = redis.StrictRedis(**config)
user_profile = r.get("user-1-profile")
results = {}
for i in range(1, 11):
results[i] = r.execute_command("ML.FOREST.RUN", "movie-{}".format(i), user_profile)
print "Movies sorted by scores:"
sorted_results = sorted(results.items(), key=operator.itemgetter(1), reverse=True)
for k,v in sorted_results:
print "movie-{}:{}".format(k,v)
print ""
print "Recommended movie: movie-{}".format(sorted_results[0][0])
| 23.695652 | 87 | 0.691743 |
697743be8b299d77e75d6342d55ad94de4e7724c | 6,305 | py | Python | antispam/ext/anti_mass_mention.py | 11Tuvork28/DPY-Anti-Spam | 8efe58b264460611baab9153d0799ffa390acaa0 | [
"MIT"
] | null | null | null | antispam/ext/anti_mass_mention.py | 11Tuvork28/DPY-Anti-Spam | 8efe58b264460611baab9153d0799ffa390acaa0 | [
"MIT"
] | null | null | null | antispam/ext/anti_mass_mention.py | 11Tuvork28/DPY-Anti-Spam | 8efe58b264460611baab9153d0799ffa390acaa0 | [
"MIT"
] | null | null | null | import logging
import datetime
import typing
from dataclasses import dataclass, asdict
import discord
from antispam.exceptions import UserNotFound
from antispam.base_extension import BaseExtension
from antispam.ext.user_tracking import UserTracking
log = logging.getLogger(__name__)
@dataclass
class MassMentionPunishment:
"""
This dataclass is what is dispatched to
`on_mass_mention_punishment`
Parameters
----------
user_id : int
The associated users id
guild_id : int
The associated guilds id
is_overall_punishment : bool
If this is ``True``, it means the user
has exceeded ``total_mentions_before_punishment``.
Otherwise they have exceeded ``min_mentions_per_message``
"""
user_id: int
guild_id: int
is_overall_punishment: bool
@dataclass
class Tracking:
mentions: int
timestamp: datetime.datetime
class AntiMassMention(BaseExtension):
"""A simple class used to track mentions"""
def __init__(
self,
bot,
*,
total_mentions_before_punishment: int = 10,
time_period: int = 15000,
min_mentions_per_message: int = 5,
):
"""
Parameters
----------
bot : commands.Bot or commands.AutoShardedBot or discord.Client or discord.AutoShardedClient
Our bot instance
total_mentions_before_punishment : int
How many mentions within the time period
before we punish the user
*Inclusive*
time_period : int
The time period valid for mentions
*Is in milliseconds*
min_mentions_per_message : int
The minimum amount of mentions in a message
before a punishment is issued
*Inclusive*
"""
super().__init__()
self.bot = bot
self.data = UserTracking()
if min_mentions_per_message > total_mentions_before_punishment:
raise ValueError(
"Expected `min_mentions_per_message` to be less then or equal to `total_mentions_before_punishment`"
)
if time_period < 1:
raise ValueError("Expected `time_period` to be positive")
self.min_mentions_per_message = min_mentions_per_message
self.total_mentions_before_punishment = total_mentions_before_punishment
self.time_period = time_period
log.debug("Initialized AntiMassMessage")
async def propagate(
self, message: discord.Message, data: typing.Optional[dict] = None
) -> dict:
"""
Manages and stores any mass mentions per users
Parameters
----------
message : discord.Message
The message to interact with
data : None
Not expected/wanted
Returns
-------
dict
A dictionary explaining what
actions have been taken
"""
user_id = message.author.id
guild_id = message.guild.id
log.debug(f"Propagating message for {user_id}, guild:{guild_id}")
try:
user = self.data.get_user(guild_id, user_id)
except UserNotFound:
user = {"total_mentions": []}
"""
{
"total_mentions": [
{
int amount : Datetime timestamp
}
]
}
"""
mentions = set(message.mentions)
user["total_mentions"].append(
Tracking(mentions=len(mentions), timestamp=message.created_at)
)
self.data.set_user(guild_id, user_id, user)
self._clean_mention_timestamps(
guild_id=guild_id,
user_id=user_id,
current_time=datetime.datetime.now(),
)
if len(mentions) >= self.min_mentions_per_message:
# They mention too many people in this message so punish
log.info("Dispatching punishment event, is_overall_punishment=False")
payload = MassMentionPunishment(
user_id=user_id,
guild_id=guild_id,
is_overall_punishment=False,
)
self.bot.dispatch(
"mass_mention_punishment",
payload,
)
return asdict(payload)
user = self.data.get_user(guild_id=guild_id, user_id=user_id)
if (
sum(item.mentions for item in user["total_mentions"])
>= self.total_mentions_before_punishment
):
# They have more mentions are cleaning then allowed,
# So time to punish them
log.info("Dispatching punishment event, is_overall_punishment=True")
payload = MassMentionPunishment(
user_id=user_id,
guild_id=guild_id,
is_overall_punishment=True,
)
self.bot.dispatch(
"mass_mention_punishment",
payload,
)
return asdict(payload)
return {"action": "No action taken"}
def _clean_mention_timestamps(
self, guild_id: int, user_id: int, current_time: datetime.datetime
):
"""
Cleans the internal cache for a user to only keep current mentions
Parameters
----------
guild_id : int
The guild the users in
user_id : int
The user to clean
Notes
-----
Expects the user to exist in ``self.data``. This
does no form of validation for existence
"""
log.debug(f"Cleaning timestamps for {user_id}, guild: {guild_id}")
def _is_still_valid(timestamp):
difference = current_time - timestamp
offset = datetime.timedelta(milliseconds=self.time_period)
if difference >= offset:
return False
return True
user = self.data.get_user(guild_id=guild_id, user_id=user_id)
valid_items = []
for item in user["total_mentions"]:
if _is_still_valid(item.timestamp):
valid_items.append(item)
user["total_mentions"] = valid_items
self.data.set_user(guild_id=guild_id, user_id=user_id, user_data=user)
| 29.881517 | 116 | 0.589532 |
ffc50ab1a5eb96c6c45b068613b71abe225a494d | 255 | py | Python | PYTHON/Desafios Python - Mundo 1/024B.py | FR7/Meus-Projetos | 1c8e1a91eaf143cccdc10f0e7edd013d910de474 | [
"MIT"
] | null | null | null | PYTHON/Desafios Python - Mundo 1/024B.py | FR7/Meus-Projetos | 1c8e1a91eaf143cccdc10f0e7edd013d910de474 | [
"MIT"
] | null | null | null | PYTHON/Desafios Python - Mundo 1/024B.py | FR7/Meus-Projetos | 1c8e1a91eaf143cccdc10f0e7edd013d910de474 | [
"MIT"
] | null | null | null | #Crie um programa que leia o nome de uma cidade e diga se ela começa ou não com o nome: "Santo".
cidade = str(input("Informe o nome da sua cidade: ")).strip().upper()
print("A cidade {} começa com o nome Santo?: {}".format(cidade, 'SANTO'in cidade[:5])) | 51 | 96 | 0.686275 |
3472820a8e4d39d6c2d54e10e94a830488991a10 | 4,153 | py | Python | discord/team.py | FrankKeller/discord.py | ed9f940c01490b94ee5d958ff105041a6e5d260c | [
"MIT"
] | null | null | null | discord/team.py | FrankKeller/discord.py | ed9f940c01490b94ee5d958ff105041a6e5d260c | [
"MIT"
] | 1 | 2020-01-21T16:21:11.000Z | 2020-01-21T16:21:11.000Z | discord/team.py | FrankKeller/discord.py | ed9f940c01490b94ee5d958ff105041a6e5d260c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-2020 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from . import utils
from .user import BaseUser
from .asset import Asset
from .enums import TeamMembershipState, try_enum
__all__ = (
'Team',
'TeamMember',
)
class Team:
"""Represents an application team for a bot provided by Discord.
Attributes
-------------
id: :class:`int`
The team ID.
name: :class:`str`
The team name
icon: Optional[:class:`str`]
The icon hash, if it exists.
owner_id: :class:`int`
The team's owner ID.
members: List[:class:`TeamMember`]
A list of the members in the team
"""
__slots__ = ('_state', 'id', 'name', 'icon', 'owner_id', 'members')
def __init__(self, state, data):
self._state = state
self.id = utils._get_as_snowflake(data, 'id')
self.name = data['name']
self.icon = data['icon']
self.owner_id = utils._get_as_snowflake(data, 'owner_user_id')
self.members = [TeamMember(self, self._state, member) for member in data['members']]
def __repr__(self):
return '<{0.__class__.__name__} id={0.id} name={0.name}>'.format(self)
@property
def icon_url(self):
""":class:`.Asset`: Retrieves the team's icon asset."""
return Asset._from_icon(self._state, self, 'team')
@property
def owner(self):
"""Optional[:class:`TeamMember`]: The team's owner."""
return utils.get(self.members, id=self.owner_id)
class TeamMember(BaseUser):
"""Represents a team member in a team.
.. container:: operations
.. describe:: x == y
Checks if two team members are equal.
.. describe:: x != y
Checks if two team members are not equal.
.. describe:: hash(x)
Return the team member's hash.
.. describe:: str(x)
Returns the team member's name with discriminator.
Attributes
-------------
name: :class:`str`
The team member's username.
id: :class:`int`
The team member's unique ID.
discriminator: :class:`str`
The team member's discriminator. This is given when the username has conflicts.
avatar: Optional[:class:`str`]
The avatar hash the team member has. Could be None.
bot: :class:`bool`
Specifies if the user is a bot account.
team: :class:`Team`
The team that the member is from.
membership_state: :class:`TeamMembershipState`
The membership state of the member (e.g. invited or accepted)
"""
__slots__ = BaseUser.__slots__ + ('team', 'membership_state', 'permissions')
def __init__(self, team, state, data):
self.team = team
self.membership_state = try_enum(TeamMembershipState, data['membership_state'])
self.permissions = data['permissions']
super().__init__(state=state, data=data['user'])
def __repr__(self):
return '<{0.__class__.__name__} id={0.id} name={0.name!r} ' \
'discriminator={0.discriminator!r} membership_state={0.membership_state!r}>'.format(self)
| 32.960317 | 104 | 0.659764 |
b1bb475e972655ff870c987bf364ac2b82d09edf | 512 | py | Python | pypersonalfin/utils/amount.py | guilhermebruzzi/pypersonalfin | 180619b36ed28e90b2891a9b2b9b4708d22cbdc8 | [
"MIT"
] | 1 | 2021-12-05T17:51:00.000Z | 2021-12-05T17:51:00.000Z | pypersonalfin/utils/amount.py | guilhermebruzzi/pypersonalfin | 180619b36ed28e90b2891a9b2b9b4708d22cbdc8 | [
"MIT"
] | null | null | null | pypersonalfin/utils/amount.py | guilhermebruzzi/pypersonalfin | 180619b36ed28e90b2891a9b2b9b4708d22cbdc8 | [
"MIT"
] | 1 | 2021-02-21T20:07:18.000Z | 2021-02-21T20:07:18.000Z | from .locale import is_brazil
def amount_to_str(amount, locale=None, include_currency=True):
float_amount = amount / 100.0
# Fallback to US amount
float_amount_str = "{:.2f}".format(float_amount)
if is_brazil(locale):
float_amount_str = float_amount_str.replace('.', ',')
if include_currency:
return 'R${}'.format(float_amount_str)
return float_amount_str
if include_currency:
return '${}'.format(float_amount_str)
return float_amount_str
| 26.947368 | 62 | 0.675781 |
c87a527f93d3f42c2bd6be702fdfef4efe5bb298 | 8,392 | py | Python | src/datasets/transforms.py | somritabanerjee/speedplusbaseline | 5913c611d8c182ad8070abcf5f1baffc554dfd90 | [
"MIT"
] | 11 | 2021-10-21T07:19:05.000Z | 2022-03-27T09:26:03.000Z | src/datasets/transforms.py | somritabanerjee/speedplusbaseline | 5913c611d8c182ad8070abcf5f1baffc554dfd90 | [
"MIT"
] | 2 | 2021-11-12T09:49:41.000Z | 2022-03-06T03:15:00.000Z | src/datasets/transforms.py | somritabanerjee/speedplusbaseline | 5913c611d8c182ad8070abcf5f1baffc554dfd90 | [
"MIT"
] | 3 | 2021-12-22T03:10:02.000Z | 2022-02-03T09:41:47.000Z | '''
MIT License
Copyright (c) 2021 SLAB Group
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torchvision.transforms.functional as T
'''
As noted in https://pytorch.org/tutorials/beginner/data_loading_tutorial.html,
it's best to use torch to generate random numbers, otherwise each worker
must be initialized with different random seeds when using, e.g., numpy,
to prevent all images in the same batch ending up with identical augmentations.
'''
class Rotate(object):
''' Rotate image randomly by 90 deg intervals '''
def __call__(self, data, bbox, keypts):
angle = 90*float(torch.randint(1, 4, (1,)))
# Rotate image
data = T.rotate(data, angle)
# Rotate keypoints
x, y = keypts[0].clone(), keypts[1].clone()
if angle == 90: # 90 deg
keypts[0], keypts[1] = y, 1.0 - x
elif angle == 180: # 180 deg
keypts[0], keypts[1] = 1.0 - x, 1.0 - y
elif angle == 270: # 270 deg
keypts[0], keypts[1] = 1.0 - y, x
return data, bbox, keypts
class Flip(object):
''' Flip image randomly either horizontally or vertically '''
def __call__(self, data, bbox, keypts):
if torch.rand(1) < 0.5:
# horizontal flip
data = T.hflip(data)
keypts[0] = 1.0 - keypts[0]
else:
# vertical flip
data = T.vflip(data)
keypts[1] = 1.0 - keypts[1]
return data, bbox, keypts
class BrightnessContrast(object):
""" Adjust brightness and contrast of the image in a fashion of
OpenCV's convertScaleAbs, where
newImage = alpha * image + beta
image: torch.Tensor image (0 ~ 1)
alpha: multiplicative factor
beta: additive factor (0 ~ 255)
"""
def __init__(self, alpha=(0.5, 2.0), beta=(-25, 25)):
self.alpha = torch.tensor(alpha).log()
self.beta = torch.tensor(beta)/255
def __call__(self, image, bbox, keypts):
# Contrast - multiplicative factor
loga = torch.rand(1) * (self.alpha[1] - self.alpha[0]) + self.alpha[0]
a = loga.exp()
# Brightness - additive factor
b = torch.rand(1) * (self.beta[1] - self.beta[0]) + self.beta[0]
# Apply
image = torch.clamp(a*image + b, 0, 1)
return image, bbox, keypts
class GaussianNoise(object):
""" Add random Gaussian white noise
image: torch.Tensor image (0 ~ 1)
std: noise standard deviation (0 ~ 255)
"""
def __init__(self, std=25):
self.std = std/255
def __call__(self, image, bbox, keypts):
noise = torch.randn(image.shape, dtype=torch.float32) * self.std
image = torch.clamp(image + noise, 0, 1)
return image, bbox, keypts
class RandomCrop(object):
''' Crop the image with random bounding box fully containing
the satellite foreground
'''
def __init__(self, output_shape, is_train=True):
self.shape = output_shape
self.is_train = is_train
def __call__(self, image, bbox, keypts):
# bbox: [xmin, xmax, ymin, ymax] (pix)
# keypts: [2 x Nk] (pix)
xmin, xmax, ymin, ymax = bbox
w, h = xmax-xmin, ymax-ymin
x, y = xmin+w/2.0, ymin+h/2.0
# Original image shape
org_w, org_h = image.size
# Make sure the RoI is SQUARE, as is most input to the CNN
roi_size = max((w, h))
# Since the 2D bounding box is supposedly very "tight",
# Give some extra room in both directions
if self.is_train:
# Enlarge tight RoI by random factor within [1, 1.5]
roi_size = (1 + 0.5 * torch.rand(1)) * roi_size
# Shift expanded RoI by random factor as well
# Factor within range of [-f*roi_size, +f*roi_size]
fx = 0.2 * (torch.rand(1)*2 - 1) * roi_size
fy = 0.2 * (torch.rand(1)*2 - 1) * roi_size
else:
# For testing, just enlarge by fixed amount
roi_size = (1 + 0.2) * roi_size
fx = fy = 0
# Construct new RoI
xmin = max(0, int(x - roi_size/2.0 + fx))
xmax = min(org_w, int(x + roi_size/2.0 + fx))
ymin = max(0, int(y - roi_size/2.0 + fy))
ymax = min(org_h, int(y + roi_size/2.0 + fy))
bbox = torch.tensor([xmin, xmax, ymin, ymax], dtype=torch.float32)
# Adjust keypoints (0 ~ 1)
keypts = torch.tensor(keypts, dtype=torch.float32)
keypts[0] = (keypts[0] - xmin) / (xmax - xmin)
keypts[1] = (keypts[1] - ymin) / (ymax - ymin)
# Crop and resize
image = T.resized_crop(image, ymin, xmin, ymax-ymin, xmax-xmin, self.shape)
return image, bbox, keypts
class ResizeCrop(object):
''' Resize and crop image given bounding box'''
def __init__(self, output_shape):
self.shape = output_shape
def __call__(self, image, bbox, keypts):
# bbox: [xmin, xmax, ymin, ymax] (pix)
xmin, xmax, ymin, ymax = bbox
# Original image shape
org_w, org_h = image.size
# Make sure bbox is within image frame
xmin = max(0, int(xmin))
xmax = min(org_w, int(xmax))
ymin = max(0, int(ymin))
ymax = min(org_h, int(ymax))
# Crop and resize
image = T.resized_crop(image, ymin, xmin, ymax-ymin, xmax-xmin, self.shape)
# For SPN, return original bounding box
bbox = torch.tensor(bbox, dtype=torch.float32)
return image, bbox, keypts
class ToTensor(object):
''' Same as torchvision.ToTensor(), but passes extra arguments'''
def __call__(self, image, bbox, keypts):
# float tensor [0, 1]
return T.to_tensor(image).type(torch.float32), bbox, keypts
class RandomApply(object):
''' Sameas torchvision.RandomApply(), but randomly apply EACH transform
instead of the whole set of transforms
'''
def __init__(self, transforms, p):
self.transforms = transforms
self.p = p
def __call__(self, image, bbox, keypts):
for t in self.transforms:
if torch.rand(1) < self.p:
image, bbox, keypts = t(image, bbox, keypts)
return image, bbox, keypts
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, bbox, keypts):
for t in self.transforms:
image, bbox, keypts = t(image, bbox, keypts)
return image, bbox, keypts
def build_transforms(model_name, input_size, p_aug=0.5, is_train=True):
# First, resize & crop image
if model_name == 'krn':
transforms = [RandomCrop(input_size, is_train)]
elif model_name == 'spn':
transforms = [ResizeCrop(input_size)]
# Image to tensor [0, 1] before augmentation
transforms = transforms + [ToTensor()]
# Add augmentation if training for KRN, skip if not
if is_train and model_name == 'krn':
augmentations = [
RandomApply(
[Rotate(), Flip(),
BrightnessContrast(alpha=(0.5,2.0), beta=(-25,25)),
GaussianNoise(std=25)],
p=p_aug)
]
transforms = transforms + augmentations
# Compose and return
return Compose(transforms) | 34.393443 | 83 | 0.617612 |
7ff0af30c0944c63bfe54db72999fa9aa7b3a9a6 | 2,220 | py | Python | scripts/visualization/stake.py | francescodisalvo05/portfolio_analysis | 1145d5b0307acde50a9b1be6be62483b16c70f34 | [
"MIT"
] | null | null | null | scripts/visualization/stake.py | francescodisalvo05/portfolio_analysis | 1145d5b0307acde50a9b1be6be62483b16c70f34 | [
"MIT"
] | null | null | null | scripts/visualization/stake.py | francescodisalvo05/portfolio_analysis | 1145d5b0307acde50a9b1be6be62483b16c70f34 | [
"MIT"
] | null | null | null | from math import pi
from typing import Dict, Text
import pandas as pd
from bokeh.layouts import gridplot, column
from bokeh.models import Div, HoverTool
from bokeh.palettes import Category20c
from bokeh.plotting import figure
from bokeh.transform import cumsum
from scripts.constants.constants import TOOLS
HEIGHT = 500
def stake_plot(stake_dict: Dict,
title: Text):
data = pd.Series(stake_dict).reset_index(name='value')
data = data.sort_values(by='value', ascending=True)
data['angle'] = data['value'] * 2 * pi
# define color
if len(stake_dict) == 1:
data['color'] = ['#3182bd']
elif len(stake_dict) == 2:
data['color'] = ['#3182bd', '#6baed6']
else:
data['color'] = Category20c[len(stake_dict)]
bar_fig = figure(title='Bar Chart', toolbar_location=None,
tools=TOOLS, tooltips="@index: @value",
# x_range=(-0.5, 1.0),
y_range=data['index'],
height=HEIGHT
)
wedge_fig = figure(title='Pie Chart', toolbar_location=None,
tools="hover", tooltips="@index: @value",
# x_range=(-0.5, 1.0),
height=HEIGHT
)
wedge_fig.axis.axis_label = None
wedge_fig.axis.visible = False
wedge_fig.grid.grid_line_color = None
wedge_fig.wedge(x=0, y=1, radius=0.6,
start_angle=cumsum('angle', include_zero=True), end_angle=cumsum('angle'),
line_color="white", fill_color='color', legend_field='index', source=data)
bar_fig.hbar(y='index', right='value',
left=0, height=0.4,
fill_color="color",
source=data,
name='value')
# Select specific tool for the plot
hover = bar_fig.select(dict(type=HoverTool))
# Choose, which glyphs are active by glyph name
hover.names = ["value"]
# Creating tooltips
hover.tooltips = [("Stake", "@value{( 0.00 )}")]
grid = gridplot([[wedge_fig, bar_fig]])
title = Div(text=title, style={'font-size': '200%'}, align='center')
fig = column(title, grid)
return fig
| 31.714286 | 94 | 0.581081 |
ac446b5ccc92b6f8c61217111375e67604fd653a | 34,229 | py | Python | kvmagent/kvmagent/plugins/localstorage.py | zstackio/zstack-utility | 919d686d46c68836cbcad51ab0b8bf53bc88abda | [
"ECL-2.0",
"Apache-2.0"
] | 55 | 2017-02-10T07:55:21.000Z | 2021-09-01T00:59:36.000Z | kvmagent/kvmagent/plugins/localstorage.py | zstackio/zstack-utility | 919d686d46c68836cbcad51ab0b8bf53bc88abda | [
"ECL-2.0",
"Apache-2.0"
] | 106 | 2017-02-13T09:58:27.000Z | 2022-02-15T09:51:48.000Z | kvmagent/kvmagent/plugins/localstorage.py | zstackio/zstack-utility | 919d686d46c68836cbcad51ab0b8bf53bc88abda | [
"ECL-2.0",
"Apache-2.0"
] | 68 | 2017-02-13T11:02:01.000Z | 2021-12-16T11:02:01.000Z | __author__ = 'frank'
import os
import os.path
import traceback
import zstacklib.utils.uuidhelper as uuidhelper
from kvmagent import kvmagent
from kvmagent.plugins.imagestore import ImageStoreClient
from zstacklib.utils import jsonobject
from zstacklib.utils import linux
from zstacklib.utils import shell
from zstacklib.utils import traceable_shell
from zstacklib.utils import rollback
from zstacklib.utils.bash import *
from zstacklib.utils.report import *
from zstacklib.utils.plugin import completetask
logger = log.get_logger(__name__)
class AgentCommand(object):
def __init__(self):
pass
class AgentResponse(object):
def __init__(self):
self.totalCapacity = None
self.availableCapacity = None
self.success = None
self.error = None
class InitRsp(AgentResponse):
def __init__(self):
super(InitRsp, self).__init__()
self.localStorageUsedCapacity = None
class CopyBitsFromRemoteCmd(AgentCommand):
@log.sensitive_fields("dstPassword")
def __init__(self):
super(CopyBitsFromRemoteCmd, self).__init__()
self.sendCommandUrl = None
self.paths = []
self.dstIp = None
self.dstPassword = None
self.dstUsername = None
self.dstPort = 22
self.stage = None
self.volumeUuid = None
class RevertVolumeFromSnapshotRsp(AgentResponse):
def __init__(self):
super(RevertVolumeFromSnapshotRsp, self).__init__()
self.newVolumeInstallPath = None
self.size = None
class ReinitImageRsp(AgentResponse):
def __init__(self):
super(ReinitImageRsp, self).__init__()
self.newVolumeInstallPath = None
class MergeSnapshotRsp(AgentResponse):
def __init__(self):
super(MergeSnapshotRsp, self).__init__()
self.size = None
self.actualSize = None
class RebaseAndMergeSnapshotsRsp(AgentResponse):
def __init__(self):
super(RebaseAndMergeSnapshotsRsp, self).__init__()
self.size = None
self.actualSize = None
class CheckBitsRsp(AgentResponse):
def __init__(self):
super(CheckBitsRsp, self).__init__()
self.existing = False
class GetMd5Rsp(AgentResponse):
def __init__(self):
super(GetMd5Rsp, self).__init__()
self.md5s = None
class GetBackingFileRsp(AgentResponse):
def __init__(self):
super(GetBackingFileRsp, self).__init__()
self.size = None
self.backingFilePath = None
class GetVolumeSizeRsp(AgentResponse):
def __init__(self):
super(GetVolumeSizeRsp, self).__init__()
self.actualSize = None
self.size = None
class GetVolumeBaseImagePathRsp(AgentResponse):
def __init__(self):
super(GetVolumeBaseImagePathRsp, self).__init__()
self.path = None
self.size = None
class GetQCOW2ReferenceRsp(AgentResponse):
def __init__(self):
super(GetQCOW2ReferenceRsp, self).__init__()
self.referencePaths = None
class ResizeVolumeRsp(AgentResponse):
def __init__(self):
super(ResizeVolumeRsp, self).__init__()
self.size = None
class ListResponse(AgentResponse):
def __init__(self):
super(ListResponse, self).__init__()
self.paths = []
class CheckInitializedFileRsp(AgentResponse):
def __init__(self):
super(CheckInitializedFileRsp, self).__init__()
self.existed = True
class GetDownloadBitsFromKvmHostProgressRsp(AgentResponse):
def __init__(self):
super(GetDownloadBitsFromKvmHostProgressRsp, self).__init__()
self.totalSize = None
class DownloadBitsFromKvmHostRsp(AgentResponse):
def __init__(self):
super(DownloadBitsFromKvmHostRsp, self).__init__()
self.format = None
class LocalStoragePlugin(kvmagent.KvmAgent):
INIT_PATH = "/localstorage/init"
GET_PHYSICAL_CAPACITY_PATH = "/localstorage/getphysicalcapacity"
CREATE_EMPTY_VOLUME_PATH = "/localstorage/volume/createempty"
CREATE_FOLDER_PATH = "/localstorage/volume/createfolder"
CREATE_VOLUME_FROM_CACHE_PATH = "/localstorage/volume/createvolumefromcache"
DELETE_BITS_PATH = "/localstorage/delete"
DELETE_DIR_PATH = "/localstorage/deletedir"
UPLOAD_BIT_PATH = "/localstorage/sftp/upload"
DOWNLOAD_BIT_PATH = "/localstorage/sftp/download"
UPLOAD_TO_IMAGESTORE_PATH = "/localstorage/imagestore/upload"
COMMIT_TO_IMAGESTORE_PATH = "/localstorage/imagestore/commit"
DOWNLOAD_FROM_IMAGESTORE_PATH = "/localstorage/imagestore/download"
REVERT_SNAPSHOT_PATH = "/localstorage/snapshot/revert"
MERGE_SNAPSHOT_PATH = "/localstorage/snapshot/merge"
MERGE_AND_REBASE_SNAPSHOT_PATH = "/localstorage/snapshot/mergeandrebase"
OFFLINE_MERGE_PATH = "/localstorage/snapshot/offlinemerge"
CREATE_TEMPLATE_FROM_VOLUME = "/localstorage/volume/createtemplate"
CHECK_BITS_PATH = "/localstorage/checkbits"
REBASE_ROOT_VOLUME_TO_BACKING_FILE_PATH = "/localstorage/volume/rebaserootvolumetobackingfile"
VERIFY_SNAPSHOT_CHAIN_PATH = "/localstorage/snapshot/verifychain"
REBASE_SNAPSHOT_BACKING_FILES_PATH = "/localstorage/snapshot/rebasebackingfiles"
COPY_TO_REMOTE_BITS_PATH = "/localstorage/copytoremote"
GET_MD5_PATH = "/localstorage/getmd5"
CHECK_MD5_PATH = "/localstorage/checkmd5"
GET_BACKING_FILE_PATH = "/localstorage/volume/getbackingfile"
GET_VOLUME_SIZE = "/localstorage/volume/getsize"
GET_BASE_IMAGE_PATH = "/localstorage/volume/getbaseimagepath"
GET_QCOW2_REFERENCE = "/localstorage/getqcow2reference"
CONVERT_QCOW2_TO_RAW = "/localstorage/imagestore/convert/raw"
RESIZE_VOLUME_PATH = "/localstorage/volume/resize"
REINIT_IMAGE_PATH = "/localstorage/reinit/image"
CHECK_INITIALIZED_FILE = "/localstorage/check/initializedfile"
CREATE_INITIALIZED_FILE = "/localstorage/create/initializedfile"
DOWNLOAD_BITS_FROM_KVM_HOST_PATH = "/localstorage/kvmhost/download"
CANCEL_DOWNLOAD_BITS_FROM_KVM_HOST_PATH = "/localstorage/kvmhost/download/cancel"
GET_DOWNLOAD_BITS_FROM_KVM_HOST_PROGRESS_PATH = "/localstorage/kvmhost/download/progress"
LOCAL_NOT_ROOT_USER_MIGRATE_TMP_PATH = "primary_storage_tmp_dir"
def start(self):
http_server = kvmagent.get_http_server()
http_server.register_async_uri(self.INIT_PATH, self.init)
http_server.register_async_uri(self.GET_PHYSICAL_CAPACITY_PATH, self.get_physical_capacity)
http_server.register_async_uri(self.CREATE_EMPTY_VOLUME_PATH, self.create_empty_volume)
http_server.register_async_uri(self.CREATE_FOLDER_PATH, self.create_folder)
http_server.register_async_uri(self.CREATE_VOLUME_FROM_CACHE_PATH, self.create_root_volume_from_template)
http_server.register_async_uri(self.DELETE_BITS_PATH, self.delete)
http_server.register_async_uri(self.DELETE_DIR_PATH, self.deletedir)
http_server.register_async_uri(self.DOWNLOAD_BIT_PATH, self.download_from_sftp)
http_server.register_async_uri(self.UPLOAD_BIT_PATH, self.upload_to_sftp)
http_server.register_async_uri(self.UPLOAD_TO_IMAGESTORE_PATH, self.upload_to_imagestore)
http_server.register_async_uri(self.COMMIT_TO_IMAGESTORE_PATH, self.commit_to_imagestore)
http_server.register_async_uri(self.DOWNLOAD_FROM_IMAGESTORE_PATH, self.download_from_imagestore)
http_server.register_async_uri(self.REVERT_SNAPSHOT_PATH, self.revert_snapshot)
http_server.register_async_uri(self.REINIT_IMAGE_PATH, self.reinit_image)
http_server.register_async_uri(self.MERGE_SNAPSHOT_PATH, self.merge_snapshot)
http_server.register_async_uri(self.MERGE_AND_REBASE_SNAPSHOT_PATH, self.merge_and_rebase_snapshot)
http_server.register_async_uri(self.OFFLINE_MERGE_PATH, self.offline_merge_snapshot)
http_server.register_async_uri(self.CREATE_TEMPLATE_FROM_VOLUME, self.create_template_from_volume)
http_server.register_async_uri(self.CHECK_BITS_PATH, self.check_bits)
http_server.register_async_uri(self.REBASE_ROOT_VOLUME_TO_BACKING_FILE_PATH, self.rebase_root_volume_to_backing_file)
http_server.register_async_uri(self.VERIFY_SNAPSHOT_CHAIN_PATH, self.verify_backing_file_chain)
http_server.register_async_uri(self.REBASE_SNAPSHOT_BACKING_FILES_PATH, self.rebase_backing_files)
http_server.register_async_uri(self.COPY_TO_REMOTE_BITS_PATH, self.copy_bits_to_remote, cmd=CopyBitsFromRemoteCmd())
http_server.register_async_uri(self.GET_MD5_PATH, self.get_md5)
http_server.register_async_uri(self.CHECK_MD5_PATH, self.check_md5)
http_server.register_async_uri(self.GET_BACKING_FILE_PATH, self.get_backing_file_path)
http_server.register_async_uri(self.GET_VOLUME_SIZE, self.get_volume_size)
http_server.register_async_uri(self.GET_BASE_IMAGE_PATH, self.get_volume_base_image_path)
http_server.register_async_uri(self.GET_QCOW2_REFERENCE, self.get_qcow2_reference)
http_server.register_async_uri(self.CONVERT_QCOW2_TO_RAW, self.convert_qcow2_to_raw)
http_server.register_async_uri(self.RESIZE_VOLUME_PATH, self.resize_volume)
http_server.register_async_uri(self.CHECK_INITIALIZED_FILE, self.check_initialized_file)
http_server.register_async_uri(self.CREATE_INITIALIZED_FILE, self.create_initialized_file)
http_server.register_async_uri(self.DOWNLOAD_BITS_FROM_KVM_HOST_PATH, self.download_from_kvmhost)
http_server.register_async_uri(self.CANCEL_DOWNLOAD_BITS_FROM_KVM_HOST_PATH, self.cancel_download_from_kvmhost)
http_server.register_async_uri(self.GET_DOWNLOAD_BITS_FROM_KVM_HOST_PROGRESS_PATH, self.get_download_bits_from_kvmhost_progress)
self.imagestore_client = ImageStoreClient()
def stop(self):
pass
@kvmagent.replyerror
def cancel_download_from_kvmhost(self, req):
return self.cancel_download_from_sftp(req)
@kvmagent.replyerror
def get_download_bits_from_kvmhost_progress(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = GetDownloadBitsFromKvmHostProgressRsp()
rsp.totalSize = linux.get_total_file_size(cmd.volumePaths)
return jsonobject.dumps(rsp)
def cancel_download_from_sftp(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = AgentResponse()
shell.run("pkill -9 -f '%s'" % cmd.primaryStorageInstallPath)
self.do_delete_bits(cmd.primaryStorageInstallPath)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
@completetask
def download_from_kvmhost(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = DownloadBitsFromKvmHostRsp()
install_path = cmd.primaryStorageInstallPath
# todo: assume agent will not restart, maybe need clean
last_task = self.load_and_save_task(req, rsp, os.path.exists, install_path)
if last_task and last_task.agent_pid == os.getpid():
rsp = self.wait_task_complete(last_task)
return jsonobject.dumps(rsp)
self.do_download_from_sftp(cmd)
rsp.format = linux.get_img_fmt(install_path)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def check_initialized_file(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
file_path = cmd.filePath
rsp = CheckInitializedFileRsp()
if file_path is None:
rsp.success = False
rsp.error = "input file path is None"
else:
rsp.existed = os.path.exists(file_path)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def create_initialized_file(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
file_path = cmd.filePath
rsp = AgentResponse()
if file_path is None:
rsp.success = False
rsp.error = "input file path is None"
else:
if not os.path.exists(file_path):
f = open(file_path, 'w')
f.close()
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def resize_volume(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
install_path = cmd.installPath
rsp = ResizeVolumeRsp()
shell.call("qemu-img resize %s %s" % (install_path, cmd.size))
ret = linux.qcow2_virtualsize(install_path)
rsp.size = ret
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def convert_qcow2_to_raw(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
return self.imagestore_client.convert_image_raw(cmd)
@kvmagent.replyerror
def get_qcow2_reference(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
out = shell.call('find %s/ -type f' % cmd.searchingDir)
rsp = GetQCOW2ReferenceRsp()
rsp.referencePaths = []
real_path = os.path.realpath(cmd.path)
for f in out.splitlines():
backing_file = linux.qcow2_get_backing_file(f)
if os.path.realpath(backing_file) == real_path:
rsp.referencePaths.append(f)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def get_volume_size(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = GetVolumeSizeRsp()
rsp.size, rsp.actualSize = linux.qcow2_size_and_actual_size(cmd.installPath)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def get_volume_base_image_path(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = GetVolumeBaseImagePathRsp()
if not os.path.basename(cmd.volumeInstallDir).endswith(cmd.volumeUuid):
raise Exception('maybe you pass a wrong install dir')
path = linux.get_qcow2_base_image_recusively(cmd.volumeInstallDir, cmd.imageCacheDir)
if not path:
return jsonobject.dumps(rsp)
rsp.path = path
rsp.size = linux.get_qcow2_file_chain_size(path)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def get_backing_file_path(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
out = linux.qcow2_get_backing_file(cmd.path)
rsp = GetBackingFileRsp()
if out:
rsp.backingFilePath = out
rsp.size = os.path.getsize(out)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def get_md5(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = GetMd5Rsp()
rsp.md5s = []
if cmd.sendCommandUrl:
Report.url = cmd.sendCommandUrl
report = Report(cmd.threadContext, cmd.threadContextStack)
report.processType = "LocalStorageMigrateVolume"
PFILE = shell.call('mktemp /tmp/tmp-XXXXXX').strip()
total = 0
written = 0
for to in cmd.md5s:
total = total + os.path.getsize(to.path)
start = 0
end = 10
if cmd.stage:
start, end = get_scale(cmd.stage)
def _get_progress(synced):
logger.debug("getProgress in get_md5")
if not os.path.exists(PFILE):
return synced
last = linux.tail_1(PFILE).strip()
if not last or not last.isdigit():
return synced
percent = int(round((float(written) * 100 + os.path.getsize(to.path) * float(last)) / total * (end - start) / 100) + start)
report.progress_report(str(percent), "report")
return synced
report.resourceUuid = cmd.volumeUuid
if start == 0:
report.progress_report("0", "start")
else:
report.progress_report(str(start), "report")
for to in cmd.md5s:
_, md5, _ = bash_progress_1("pv -n %s 2>%s | md5sum | cut -d ' ' -f 1" % (to.path, PFILE), _get_progress)
rsp.md5s.append({
'resourceUuid': to.resourceUuid,
'path': to.path,
'md5': md5
})
written += os.path.getsize(to.path)
percent = int(round(float(written) / float(total) * (end - start) + start))
report.progress_report(percent, "report")
if os.path.exists(PFILE):
os.remove(PFILE)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def check_md5(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
if cmd.sendCommandUrl:
Report.url = cmd.sendCommandUrl
report = Report(cmd.threadContext, cmd.threadContextStack)
report.processType = "LocalStorageMigrateVolume"
PFILE = shell.call('mktemp /tmp/tmp-XXXXXX').strip()
total = 0
written = 0
start = 90
end = 100
if cmd.stage:
start, end = get_scale(cmd.stage)
for to in cmd.md5s:
total = total + os.path.getsize(to.path)
def _get_progress(synced):
logger.debug("getProgress in check_md5")
if not os.path.exists(PFILE):
return synced
last = linux.tail_1(PFILE).strip()
if not last or not last.isdigit():
return synced
percent = int(round((float(written) * 100 + os.path.getsize(to.path) * float(last)) / total * (end - start) / 100) + start)
report.progress_report(percent, "report")
return synced
report.resourceUuid = cmd.volumeUuid
for to in cmd.md5s:
_, dst_md5, _ = bash_progress_1("pv -n %s 2>%s | md5sum | cut -d ' ' -f 1" % (to.path, PFILE), _get_progress)
if dst_md5 != to.md5:
raise Exception("MD5 unmatch. The file[uuid:%s, path:%s]'s md5 (src host:%s, dst host:%s)" %
(to.resourceUuid, to.path, to.md5, dst_md5))
written += os.path.getsize(to.path)
percent = int(round(float(written) / float(total) * (end - start) + start))
report.progress_report(percent, "report")
if os.path.exists(PFILE):
os.remove(PFILE)
rsp = AgentResponse()
if end == 100:
report.progress_report("100", "finish")
else:
report.progress_report(str(end), "report")
return jsonobject.dumps(rsp)
@staticmethod
def _get_disk_capacity(path):
if not path:
raise Exception('storage path cannot be None')
return linux.get_disk_capacity_by_df(path)
@kvmagent.replyerror
@in_bash
def copy_bits_to_remote(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
if cmd.dstUsername != 'root':
raise Exception("cannot support migrate to non-root user host")
chain = sum([linux.qcow2_get_file_chain(p) for p in cmd.paths], [])
if cmd.sendCommandUrl:
Report.url = cmd.sendCommandUrl
report = Report(cmd.threadContext, cmd.threadContextStack)
report.processType = "LocalStorageMigrateVolume"
report.resourceUuid = cmd.volumeUuid
PFILE = shell.call('mktemp /tmp/tmp-XXXXXX').strip()
PASSWORD_FILE = linux.write_to_temp_file(cmd.dstPassword)
start = 10
end = 90
if cmd.stage:
start, end = get_scale(cmd.stage)
total = 0
for path in set(chain):
total = total + os.path.getsize(path)
written = 0
def _get_progress(synced):
logger.debug("getProgress in localstorage-agent, synced: %s, total: %s" % (synced, total))
if not os.path.exists(PFILE):
return synced
fpread = open(PFILE, 'r')
lines = fpread.readlines()
if not lines:
fpread.close()
return synced
last = str(lines[-1]).strip().split('\r')[-1]
if not last or len(last.split()) < 1:
fpread.close()
return synced
line = last.split()[0]
if not line.isdigit():
return synced
if total > 0:
synced = long(line)
if synced < total:
percent = int(round(float(written + synced) / float(total) * (end - start) + start))
report.progress_report(percent, "report")
synced = written
fpread.close()
return synced
for path in set(chain):
PATH = path
USER = cmd.dstUsername
IP = cmd.dstIp
PORT = (cmd.dstPort and cmd.dstPort or "22")
DIR = os.path.dirname(path)
_, _, err = bash_progress_1(
# Fixes ZSTAC-13430: handle extremely complex password like ~ ` !@#$%^&*()_+-=[]{}|?<>;:'"/ .
'rsync -av --progress --relative {{PATH}} --rsh="/usr/bin/sshpass -f{{PASSWORD_FILE}} ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -p {{PORT}} -l {{USER}}" {{IP}}:/ 1>{{PFILE}}', _get_progress, False)
if err:
linux.rm_file_force(PASSWORD_FILE)
linux.rm_file_force(PFILE)
raise Exception('fail to migrate vm to host, because %s' % str(err))
written += os.path.getsize(path)
bash_errorout('/usr/bin/sshpass -f{{PASSWORD_FILE}} ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -p {{PORT}} {{USER}}@{{IP}} "/bin/sync {{PATH}}"')
percent = int(round(float(written) / float(total) * (end - start) + start))
report.progress_report(percent, "report")
linux.rm_file_force(PASSWORD_FILE)
linux.rm_file_force(PFILE)
rsp = AgentResponse()
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.storagePath)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def verify_backing_file_chain(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
for sp in cmd.snapshots:
if not os.path.exists(sp.path):
raise Exception('cannot find the file[%s]' % sp.path)
if sp.parentPath and not os.path.exists(sp.parentPath):
raise Exception('cannot find the backing file[%s]' % sp.parentPath)
if sp.parentPath:
out = linux.qcow2_get_backing_file(sp.path)
if sp.parentPath != out:
raise Exception("resource[Snapshot or Volume, uuid:%s, path:%s]'s backing file[%s] is not equal to %s" %
(sp.snapshotUuid, sp.path, out, sp.parentPath))
return jsonobject.dumps(AgentResponse())
@kvmagent.replyerror
def rebase_backing_files(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
for sp in cmd.snapshots:
if sp.parentPath:
linux.qcow2_rebase_no_check(sp.parentPath, sp.path)
return jsonobject.dumps(AgentResponse())
@kvmagent.replyerror
def check_bits(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = CheckBitsRsp()
rsp.existing = os.path.exists(cmd.path)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
@rollback.rollback
def create_template_from_volume(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = AgentResponse()
dirname = os.path.dirname(cmd.installPath)
if not os.path.exists(dirname):
os.makedirs(dirname, 0755)
@rollback.rollbackable
def _0():
linux.rm_file_force(cmd.insallPath)
_0()
t_shell = traceable_shell.get_shell(cmd)
linux.create_template(cmd.volumePath, cmd.installPath, shell=t_shell)
logger.debug('successfully created template[%s] from volume[%s]' % (cmd.installPath, cmd.volumePath))
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.storagePath)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def revert_snapshot(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = RevertVolumeFromSnapshotRsp()
install_path = cmd.snapshotInstallPath
new_volume_path = os.path.join(os.path.dirname(install_path), '{0}.qcow2'.format(uuidhelper.uuid()))
linux.qcow2_clone_with_cmd(install_path, new_volume_path, cmd)
size = linux.qcow2_virtualsize(new_volume_path)
rsp.newVolumeInstallPath = new_volume_path
rsp.size = size
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def reinit_image(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = ReinitImageRsp()
install_path = cmd.imagePath
dirname = os.path.dirname(cmd.volumePath)
if not os.path.exists(dirname):
os.makedirs(dirname, 0775)
new_volume_path = os.path.join(dirname, '{0}.qcow2'.format(uuidhelper.uuid()))
linux.qcow2_clone_with_cmd(install_path, new_volume_path, cmd)
rsp.newVolumeInstallPath = new_volume_path
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def merge_snapshot(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = MergeSnapshotRsp()
workspace_dir = os.path.dirname(cmd.workspaceInstallPath)
if not os.path.exists(workspace_dir):
os.makedirs(workspace_dir)
linux.create_template(cmd.snapshotInstallPath, cmd.workspaceInstallPath)
rsp.size, rsp.actualSize = linux.qcow2_size_and_actual_size(cmd.workspaceInstallPath)
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.storagePath)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def merge_and_rebase_snapshot(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
snapshots = cmd.snapshotInstallPaths
count = len(snapshots)
for i in range(count):
if i+1 < count:
target = snapshots[i]
backing_file = snapshots[i+1]
linux.qcow2_rebase_no_check(backing_file, target)
latest = snapshots[0]
rsp = RebaseAndMergeSnapshotsRsp()
workspace_dir = os.path.dirname(cmd.workspaceInstallPath)
if not os.path.exists(workspace_dir):
os.makedirs(workspace_dir)
linux.create_template(latest, cmd.workspaceInstallPath)
rsp.size, rsp.actualSize = linux.qcow2_size_and_actual_size(cmd.workspaceInstallPath)
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.storagePath)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def offline_merge_snapshot(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = AgentResponse()
if not cmd.fullRebase:
linux.qcow2_rebase(cmd.srcPath, cmd.destPath)
else:
tmp = os.path.join(os.path.dirname(cmd.destPath), '%s.qcow2' % uuidhelper.uuid())
linux.create_template(cmd.destPath, tmp)
shell.call("mv %s %s" % (tmp, cmd.destPath))
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.storagePath)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def get_physical_capacity(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = AgentResponse()
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.storagePath)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def rebase_root_volume_to_backing_file(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
linux.qcow2_rebase_no_check(cmd.backingFilePath, cmd.rootVolumePath)
return jsonobject.dumps(AgentResponse())
@kvmagent.replyerror
def init(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
if not os.path.exists(cmd.path):
os.makedirs(cmd.path, 0755)
if cmd.initFilePath:
if not os.path.exists(cmd.initFilePath):
f = open(cmd.initFilePath, 'w')
f.close()
rsp = InitRsp()
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.path)
rsp.localStorageUsedCapacity = linux.get_used_disk_apparent_size(cmd.path, 0, 1)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def create_folder(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = AgentResponse()
try:
dirname = os.path.dirname(cmd.installUrl)
if not os.path.exists(dirname):
os.makedirs(dirname)
except Exception as e:
logger.warn(linux.get_exception_stacktrace())
rsp.error = 'unable to create folder at %s, because %s' % (cmd.installUrl, str(e))
rsp.success = False
return jsonobject.dumps(rsp)
logger.debug('successfully create folder at %s' % cmd.installUrl)
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.storagePath)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def create_empty_volume(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = AgentResponse()
try:
self.do_create_empty_volume(cmd)
except Exception as e:
logger.warn(linux.get_exception_stacktrace())
rsp.error = 'unable to create empty volume[uuid:%s, name:%s], %s' % (cmd.volumeUuid, cmd.name, str(e))
rsp.success = False
return jsonobject.dumps(rsp)
logger.debug('successfully create empty volume[uuid:%s, size:%s] at %s' % (cmd.volumeUuid, cmd.size, cmd.installUrl))
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.storagePath)
return jsonobject.dumps(rsp)
def do_create_empty_volume(self, cmd):
dirname = os.path.dirname(cmd.installUrl)
if not os.path.exists(dirname):
os.makedirs(dirname)
if cmd.backingFile:
linux.qcow2_create_with_backing_file_and_cmd(cmd.backingFile, cmd.installUrl, cmd)
else:
linux.qcow2_create_with_cmd(cmd.installUrl, cmd.size, cmd)
@kvmagent.replyerror
def create_root_volume_from_template(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = AgentResponse()
if not os.path.exists(cmd.templatePathInCache):
rsp.error = "unable to find image in cache"
rsp.success = False
logger.debug('error: %s: %s' % (rsp.error, cmd.templatePathInCache))
return jsonobject.dumps(rsp)
dirname = os.path.dirname(cmd.installUrl)
if not os.path.exists(dirname):
os.makedirs(dirname, 0775)
linux.qcow2_clone_with_cmd(cmd.templatePathInCache, cmd.installUrl, cmd)
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.storagePath)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def delete(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = AgentResponse()
if cmd.path:
kvmagent.deleteImage(cmd.path)
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.storagePath)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def deletedir(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = AgentResponse()
linux.rm_dir_checked(cmd.path)
logger.debug('successfully delete %s' % cmd.path)
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.storagePath)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def upload_to_sftp(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = AgentResponse()
def upload():
if not os.path.exists(cmd.primaryStorageInstallPath):
raise kvmagent.KvmError('cannot find %s' % cmd.primaryStorageInstallPath)
linux.scp_upload(cmd.hostname, cmd.sshKey, cmd.primaryStorageInstallPath, cmd.backupStorageInstallPath, cmd.username, cmd.sshPort)
try:
upload()
except kvmagent.KvmError as e:
logger.warn(linux.get_exception_stacktrace())
rsp.error = str(e)
rsp.success = False
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def upload_to_imagestore(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
return self.imagestore_client.upload_to_imagestore(cmd, req)
@kvmagent.replyerror
def commit_to_imagestore(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
return self.imagestore_client.commit_to_imagestore(cmd, req)
@kvmagent.replyerror
def download_from_sftp(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = AgentResponse()
try:
self.do_download_from_sftp(cmd)
except Exception as e:
content = traceback.format_exc()
logger.warn(content)
err = "unable to download %s/%s, because %s" % (cmd.hostname, cmd.backupStorageInstallPath, str(e))
rsp.error = err
rsp.success = False
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.storagePath)
return jsonobject.dumps(rsp)
def do_download_from_sftp(self, cmd):
linux.scp_download(cmd.hostname, cmd.sshKey, cmd.backupStorageInstallPath, cmd.primaryStorageInstallPath, cmd.username, cmd.sshPort, cmd.bandWidth)
logger.debug('successfully download %s/%s to %s' % (cmd.hostname, cmd.backupStorageInstallPath, cmd.primaryStorageInstallPath))
@kvmagent.replyerror
def download_from_imagestore(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
cachedir = None if cmd.isData else cmd.storagePath
self.imagestore_client.download_from_imagestore(cachedir, cmd.hostname, cmd.backupStorageInstallPath, cmd.primaryStorageInstallPath)
if cmd.isData:
self.imagestore_client.clean_meta(cmd.primaryStorageInstallPath)
rsp = AgentResponse()
rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.storagePath)
return jsonobject.dumps(rsp)
| 40.080796 | 235 | 0.668381 |
c2d88515894b5e90e7a57a065e97a1510061bc94 | 3,404 | py | Python | src/eiffel_graphql_api/graphql/schemas/union.py | magnusbaeck/eiffel-graphql-api | c0cd0dc3fdad7787988599974ace2a4cebf70844 | [
"Apache-2.0"
] | null | null | null | src/eiffel_graphql_api/graphql/schemas/union.py | magnusbaeck/eiffel-graphql-api | c0cd0dc3fdad7787988599974ace2a4cebf70844 | [
"Apache-2.0"
] | null | null | null | src/eiffel_graphql_api/graphql/schemas/union.py | magnusbaeck/eiffel-graphql-api | c0cd0dc3fdad7787988599974ace2a4cebf70844 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Axis Communications AB.
#
# For a full list of individual contributors, please see the commit history.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import graphene
from graphene import relay
from .links import *
from .events import *
class NotFound(graphene.ObjectType):
target = graphene.String()
type = graphene.String()
reason = graphene.String()
mongo = None
def __init__(self, link, reason):
self.mongo = {}
self.link = link
self.reason = reason
def resolve_target(self, info):
return self.link.get("target")
def resolve_type(self, info):
return self.link.get("type")
def resolve_reason(self, info):
return self.reason
class EiffelContextUnion(graphene.Union):
class Meta:
types = (ActivityTriggered, TestSuiteStarted, NotFound)
class EiffelIutUnion(graphene.Union):
class Meta:
types = (ArtifactCreated, CompositionDefined, NotFound)
class EiffelElementUnion(graphene.Union):
class Meta:
types = (CompositionDefined, ArtifactCreated, SourceChangeSubmitted, NotFound)
class EiffelVerificationBasisUnion(graphene.Union):
class Meta:
types = (TestCaseFinished, TestSuiteFinished, NotFound)
class EiffelSubjectUnion(graphene.Union):
class Meta:
types = (CompositionDefined, ArtifactCreated, SourceChangeSubmitted,
SourceChangeCreated, NotFound)
class EiffelEventUnion(graphene.Union):
class Meta:
types = (ActivityTriggered, ActivityStarted, ActivityFinished, ActivityCanceled,
AnnouncementPublished, ArtifactCreated, ArtifactPublished, ArtifactReused,
CompositionDefined, ConfidenceLevelModified, EnvironmentDefined, FlowContextDefined,
IssueDefined, IssueVerified, SourceChangeCreated, SourceChangeSubmitted,
TestCaseCanceled, TestCaseFinished, TestCaseStarted, TestCaseTriggered,
TestExecutionRecipeCollectionCreated, TestSuiteFinished, TestSuiteStarted,
NotFound)
class EiffelLinkUnion(graphene.Union):
class Meta:
types = (Context, FlowContext, Cause,
ActivityExecution, PreviousActivityExecution,
ModifiedAnnouncement, Composition, CompositionPreviousVersion,
Environment, ArtifactPreviousVersion, Artifact,
ReusedArtifact, Element, Subject, SubConfidenceLevel,
EnvironmentPreviousVersion, SuccessfulIssue, FailedIssue,
InconclusiveIssue, IUT, Base, SourceCreatedPreviousVersion,
PartiallyResolvedIssue, ResolvedIssue, DeresolvedIssue,
SourceChange, SourceSubmittedPreviousVersion, TestCaseExecution,
TestSuiteExecution, Tercc)
class ReverseConnection(relay.Connection):
class Meta:
node = EiffelEventUnion
| 34.734694 | 101 | 0.709166 |
e4f73305ec93a76ab8f972b47e474d19ff369e02 | 296 | py | Python | py_game_1/text.py | ankitsumitg/python-projects | 34a3df6fcd8544bf83aa9f3d47ec160e3838b1d1 | [
"MIT"
] | 1 | 2021-03-22T20:45:06.000Z | 2021-03-22T20:45:06.000Z | py_game_1/text.py | ankitsumitg/python-projects | 34a3df6fcd8544bf83aa9f3d47ec160e3838b1d1 | [
"MIT"
] | null | null | null | py_game_1/text.py | ankitsumitg/python-projects | 34a3df6fcd8544bf83aa9f3d47ec160e3838b1d1 | [
"MIT"
] | null | null | null | import pygame
class Text:
def __init__(self,screen,color):
self.screen = screen
self.color = color
def display(self,text,x,y):
font = pygame.font.Font('freesansbold.ttf', 25)
text = font.render(text, True, self.color)
self.screen.blit(text, (x,y))
| 29.6 | 55 | 0.618243 |
d8dafcad3365e3f7b37b808609e557883f39d86f | 21,764 | py | Python | env/Lib/site-packages/lightbulb/checks.py | AtharvVohra/iseakai-d-bot | 1257372bb5ab388a87a63a458aa6b173097227d3 | [
"Apache-2.0"
] | null | null | null | env/Lib/site-packages/lightbulb/checks.py | AtharvVohra/iseakai-d-bot | 1257372bb5ab388a87a63a458aa6b173097227d3 | [
"Apache-2.0"
] | null | null | null | env/Lib/site-packages/lightbulb/checks.py | AtharvVohra/iseakai-d-bot | 1257372bb5ab388a87a63a458aa6b173097227d3 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright © tandemdude 2020-present
#
# This file is part of Lightbulb.
#
# Lightbulb is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Lightbulb is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Lightbulb. If not, see <https://www.gnu.org/licenses/>.
from __future__ import annotations
__all__ = [
"Check",
"owner_only",
"guild_only",
"dm_only",
"bot_only",
"webhook_only",
"human_only",
"nsfw_channel_only",
"has_roles",
"has_role_permissions",
"has_channel_permissions",
"has_guild_permissions",
"bot_has_guild_permissions",
"bot_has_role_permissions",
"bot_has_channel_permissions",
"has_attachments",
]
import functools
import inspect
import operator
import typing as t
import warnings
import hikari
from lightbulb import context as context_
from lightbulb import errors
from lightbulb.utils import permissions
if t.TYPE_CHECKING:
from lightbulb import app
from lightbulb import commands
from lightbulb import plugins
_CallbackT = t.Union[
t.Callable[[context_.base.Context], t.Union[bool, t.Coroutine[t.Any, t.Any, bool]]], functools.partial
]
class _ExclusiveCheck:
def __init__(self, *checks: "Check") -> None:
self._checks = list(checks)
def __name__(self) -> str:
return repr(self)
def __repr__(self) -> str:
return f"ExclusiveCheck({', '.join(repr(c) for c in self._checks)})"
def __or__(self, other: t.Union["_ExclusiveCheck", "Check"]) -> "_ExclusiveCheck":
if isinstance(other, _ExclusiveCheck):
self._checks.extend(other._checks)
else:
self._checks.append(other)
return self
async def _evaluate(self, context: context_.base.Context) -> bool:
failed = []
for check in self._checks:
try:
res = check(context)
if inspect.iscoroutine(res):
res = await res
if not res:
raise errors.CheckFailure(f"Check {check.__name__!r} failed")
return True
except Exception as ex:
if isinstance(ex, errors.CheckFailure) and not ex.__cause__:
ex = errors.CheckFailure(str(ex), causes=[ex])
ex.__cause__ = ex
failed.append(ex)
if failed:
if len(failed) == 1:
raise failed[0]
raise errors.CheckFailure(
"None of the exclusive checks passed: " + ", ".join(str(ex) for ex in failed), causes=failed
)
return True
def __call__(self, context: context_.base.Context) -> t.Coroutine[t.Any, t.Any, bool]:
return self._evaluate(context)
def add_to_object_hook(
self, obj: t.Union[plugins.Plugin, app.BotApp, commands.base.CommandLike]
) -> t.Union[plugins.Plugin, app.BotApp, commands.base.CommandLike]:
for check in self._checks:
check.add_to_object_hook(obj)
return obj
class Check:
"""
Class representing a check. Check functions can be synchronous or asynchronous functions which take
a single argument, which will be the context that the command is being invoked under, and return
a boolean or raise a :obj:`.errors.CheckFailure` indicating whether the check passed or failed.
Args:
p_callback (CallbackT): Check function to use for prefix commands.
s_callback (Optional[CallbackT]): Check function to use for slash commands.
m_callback (Optional[CallbackT]): Check function to use for message commands.
u_callback (Optional[CallbackT]): Check function to use for user commands.
add_hook (Optional[Callable[[T], T]]): Function called when the check is added to an object.
"""
__slots__ = ("prefix_callback", "slash_callback", "message_callback", "user_callback", "add_to_object_hook")
def __init__(
self,
p_callback: _CallbackT,
s_callback: t.Optional[_CallbackT] = None,
m_callback: t.Optional[_CallbackT] = None,
u_callback: t.Optional[_CallbackT] = None,
add_hook: t.Optional[
t.Callable[
[t.Union[plugins.Plugin, app.BotApp, commands.base.CommandLike]],
t.Union[plugins.Plugin, app.BotApp, commands.base.CommandLike],
]
] = None,
) -> None:
self.prefix_callback = p_callback
self.slash_callback = s_callback or p_callback
self.message_callback = m_callback or p_callback
self.user_callback = u_callback or p_callback
self.add_to_object_hook = add_hook or (lambda o: o)
def __repr__(self) -> str:
return f"Check({self.__name__.strip('_')})"
def __or__(self, other: t.Union["Check", _ExclusiveCheck]) -> _ExclusiveCheck:
return _ExclusiveCheck(self) | other
@property
def __name__(self) -> str:
if isinstance(self.prefix_callback, functools.partial):
return self.prefix_callback.func.__name__
return self.prefix_callback.__name__
def __call__(self, context: context_.base.Context) -> t.Union[bool, t.Coroutine[t.Any, t.Any, bool]]:
if isinstance(context, context_.prefix.PrefixContext):
return self.prefix_callback(context)
elif isinstance(context, context_.slash.SlashContext):
return self.slash_callback(context)
elif isinstance(context, context_.message.MessageContext):
return self.message_callback(context)
elif isinstance(context, context_.user.UserContext):
return self.user_callback(context)
return True
async def _owner_only(context: context_.base.Context) -> bool:
if not context.app.owner_ids:
context.app.owner_ids = await context.app.fetch_owner_ids()
if context.author.id not in context.app.owner_ids:
raise errors.NotOwner("You are not the owner of this bot")
return True
def _guild_only(context: context_.base.Context) -> bool:
if context.guild_id is None:
raise errors.OnlyInGuild("This command can only be used in a guild")
return True
def _dm_only(context: context_.base.Context) -> bool:
if context.guild_id is not None:
raise errors.OnlyInDM("This command can only be used in DMs")
return True
def _bot_only(context: context_.base.Context) -> bool:
if not context.author.is_bot:
raise errors.BotOnly("This command can only be used by bots")
return True
def _webhook_only(context: context_.base.Context) -> bool:
if not isinstance(context, context_.prefix.PrefixContext):
raise errors.WebhookOnly("This command can only be used by webhooks")
if context.event.message.webhook_id is None:
raise errors.WebhookOnly("This command can only be used by webhooks")
return True
def _human_only(context: context_.base.Context) -> bool:
if isinstance(context, context_.prefix.PrefixContext):
if context.author.is_bot or context.event.message.webhook_id is not None:
raise errors.HumanOnly("This command can only be used by humans")
if context.author.is_bot:
raise errors.HumanOnly("This command can only be used by humans")
return True
def _nsfw_channel_only(context: context_.base.Context) -> bool:
if context.guild_id is None:
raise errors.NSFWChannelOnly("This command can only be used in NSFW channels")
channel = context.get_channel()
if not isinstance(channel, hikari.GuildChannel) or not channel.is_nsfw:
raise errors.NSFWChannelOnly("This command can only be used in NSFW channels")
return True
def _has_roles(
context: context_.base.Context, *, roles: t.Sequence[int], check_func: t.Callable[[t.Sequence[bool]], bool]
) -> bool:
_guild_only(context)
assert context.member is not None
if not check_func([r in context.member.role_ids for r in roles]):
raise errors.MissingRequiredRole("You are missing one or more roles required in order to run this command")
return True
def _has_guild_permissions(context: context_.base.Context, *, perms: hikari.Permissions) -> bool:
_guild_only(context)
channel, guild = context.get_channel(), context.get_guild()
if channel is None or guild is None:
raise errors.InsufficientCache("Some objects required for this check could not be resolved from the cache")
if guild.owner_id == context.author.id:
return True
assert context.member is not None and isinstance(channel, hikari.GuildChannel)
missing_perms = ~permissions.permissions_in(channel, context.member) & perms
if missing_perms is not hikari.Permissions.NONE:
raise errors.MissingRequiredPermission(
"You are missing one or more permissions required in order to run this command", perms=missing_perms
)
return True
def _has_role_permissions(context: context_.base.Context, *, perms: hikari.Permissions) -> bool:
_guild_only(context)
assert context.member is not None
missing_perms = ~permissions.permissions_for(context.member) & perms
if missing_perms is not hikari.Permissions.NONE:
raise errors.MissingRequiredPermission(
"You are missing one or more permissions required in order to run this command", perms=missing_perms
)
return True
def _has_channel_permissions(context: context_.base.Context, *, perms: hikari.Permissions) -> bool:
_guild_only(context)
channel = context.get_channel()
if channel is None:
raise errors.InsufficientCache("Some objects required for this check could not be resolved from the cache")
assert context.member is not None and isinstance(channel, hikari.GuildChannel)
missing_perms = ~permissions.permissions_in(channel, context.member, include_guild_permissions=False) & perms
if missing_perms is not hikari.Permissions.NONE:
raise errors.MissingRequiredPermission(
"You are missing one or more permissions required in order to run this command", perms=missing_perms
)
return True
def _bot_has_guild_permissions(context: context_.base.Context, *, perms: hikari.Permissions) -> bool:
_guild_only(context)
channel, guild = context.get_channel(), context.get_guild()
if channel is None or guild is None:
raise errors.InsufficientCache("Some objects required for this check could not be resolved from the cache")
member = guild.get_my_member()
if member is None:
raise errors.InsufficientCache("Some objects required for this check could not be resolved from the cache")
if guild.owner_id == member.id:
return True
assert isinstance(channel, hikari.GuildChannel)
missing_perms = ~permissions.permissions_in(channel, member) & perms
if missing_perms is not hikari.Permissions.NONE:
raise errors.BotMissingRequiredPermission(
"The bot is missing one or more permissions required in order to run this command", perms=missing_perms
)
return True
def _bot_has_role_permissions(context: context_.base.Context, *, perms: hikari.Permissions) -> bool:
_guild_only(context)
guild = context.get_guild()
if guild is None:
raise errors.InsufficientCache("Some objects required for this check could not be resolved from the cache")
member = guild.get_my_member()
if member is None:
raise errors.InsufficientCache("Some objects required for this check could not be resolved from the cache")
missing_perms = ~permissions.permissions_for(member) & perms
if missing_perms is not hikari.Permissions.NONE:
raise errors.BotMissingRequiredPermission(
"The bot is missing one or more permissions required in order to run this command", perms=missing_perms
)
return True
def _bot_has_channel_permissions(context: context_.base.Context, *, perms: hikari.Permissions) -> bool:
_guild_only(context)
channel, guild = context.get_channel(), context.get_guild()
if channel is None or guild is None:
raise errors.InsufficientCache("Some objects required for this check could not be resolved from the cache")
member = guild.get_my_member()
if member is None:
raise errors.InsufficientCache("Some objects required for this check could not be resolved from the cache")
assert isinstance(channel, hikari.GuildChannel)
missing_perms = ~permissions.permissions_in(channel, member, include_guild_permissions=False) & perms
if missing_perms is not hikari.Permissions.NONE:
raise errors.BotMissingRequiredPermission(
"The bot is missing one or more permissions required in order to run this command", perms=missing_perms
)
return True
def _has_attachments(context: context_.base.Context, *, file_exts: t.Sequence[str] = ()) -> bool:
if not context.attachments:
raise errors.MissingRequiredAttachment("Missing attachment(s) required to run the command")
if file_exts:
for attachment in context.attachments:
if not any(attachment.filename.endswith(ext) for ext in file_exts):
raise errors.MissingRequiredAttachment("Missing attachment(s) required to run the command")
return True
owner_only = Check(_owner_only)
"""Prevents a command from being used by anyone other than the owner of the application."""
guild_only = Check(_guild_only)
"""Prevents a command from being used in direct messages."""
dm_only = Check(_dm_only)
"""Prevents a command from being used in a guild."""
bot_only = Check(_bot_only)
"""Prevents a command from being used by anyone other than a bot."""
webhook_only = Check(_webhook_only)
"""Prevents a command from being used by anyone other than a webhook."""
human_only = Check(_human_only)
"""Prevents a command from being used by anyone other than a human."""
nsfw_channel_only = Check(_nsfw_channel_only)
"""Prevents a command from being used in any channel other than one marked as NSFW."""
def has_roles(role1: int, *roles: int, mode: t.Callable[[t.Sequence[bool]], bool] = all) -> Check:
"""
Prevents a command from being used by anyone missing roles according to the given mode.
This check supports slash commands.
Args:
role1 (:obj:`int`): Role ID to check for.
*roles (:obj:`int`): Additional role IDs to check for.
Keyword Args:
mode (``all`` or ``any``): The mode to check roles using. If ``all``, all role IDs passed will
be required. If ``any``, only one of the role IDs will be required. Defaults to ``all``.
Note:
This check will also prevent commands from being used in DMs, as you cannot have roles
in a DM channel.
"""
if mode not in (any, all):
raise TypeError("mode must be one of: any, all")
return Check(functools.partial(_has_roles, roles=[role1, *roles], check_func=mode))
def has_guild_permissions(perm1: hikari.Permissions, *perms: hikari.Permissions) -> Check:
"""
Prevents the command from being used by a member missing any of the required
permissions (this takes into account permissions granted by both roles and permission overwrites).
Args:
perm1 (:obj:`hikari.Permissions`): Permission to check for.
*perms (:obj:`hikari.Permissions`): Additional permissions to check for.
Note:
This check will also prevent commands from being used in DMs, as you cannot have permissions
in a DM channel.
Warning:
This check is unavailable if your application is stateless and/or missing the intent
:obj:`hikari.Intents.GUILDS` and will **always** raise an error on command invocation if
either of these conditions are not met.
"""
reduced = functools.reduce(operator.or_, [perm1, *perms])
return Check(functools.partial(_has_guild_permissions, perms=reduced))
def has_role_permissions(perm1: hikari.Permissions, *perms: hikari.Permissions) -> Check:
"""
Prevents the command from being used by a member missing any of the required role
permissions.
Args:
perm1 (:obj:`hikari.Permissions`): Permission to check for.
*perms (:obj:`hikari.Permissions`): Additional permissions to check for.
Note:
This check will also prevent commands from being used in DMs, as you cannot have permissions
in a DM channel.
Warning:
This check is unavailable if your application is stateless and/or missing the intent
:obj:`hikari.Intents.GUILDS` and will **always** raise an error on command invocation if
either of these conditions are not met.
"""
reduced = functools.reduce(operator.or_, [perm1, *perms])
return Check(functools.partial(_has_role_permissions, perms=reduced))
def has_channel_permissions(perm1: hikari.Permissions, *perms: hikari.Permissions) -> Check:
"""
Prevents the command from being used by a member missing any of the required
channel permissions (permissions granted by a permission overwrite).
Args:
perm1 (:obj:`hikari.Permissions`): Permission to check for.
*perms (:obj:`hikari.Permissions`): Additional permissions to check for.
Note:
This check will also prevent commands from being used in DMs, as you cannot have permissions
in a DM channel.
Warning:
This check is unavailable if your application is stateless and/or missing the intent
:obj:`hikari.Intents.GUILDS` and will **always** raise an error on command invocation if
either of these conditions are not met.
"""
reduced = functools.reduce(operator.or_, [perm1, *perms])
return Check(functools.partial(_has_channel_permissions, perms=reduced))
def bot_has_guild_permissions(perm1: hikari.Permissions, *perms: hikari.Permissions) -> Check:
"""
Prevents the command from being used if the bot is missing any of the required
permissions (this takes into account permissions granted by both roles and permission overwrites).
Args:
perm1 (:obj:`hikari.Permissions`): Permission to check for.
*perms (:obj:`hikari.Permissions`): Additional permissions to check for.
Note:
This check will also prevent commands from being used in DMs, as you cannot have permissions
in a DM channel.
Warning:
This check is unavailable if your application is stateless and/or missing the intent
:obj:`hikari.Intents.GUILDS` and will **always** raise an error on command invocation if
either of these conditions are not met.
"""
reduced = functools.reduce(operator.or_, [perm1, *perms])
return Check(functools.partial(_bot_has_guild_permissions, perms=reduced))
def bot_has_role_permissions(perm1: hikari.Permissions, *perms: hikari.Permissions) -> Check:
"""
Prevents the command from being used if the bot is missing any of the required role
permissions.
Args:
perm1 (:obj:`hikari.Permissions`): Permission to check for.
*perms (:obj:`hikari.Permissions`): Additional permissions to check for.
Note:
This check will also prevent commands from being used in DMs, as you cannot have permissions
in a DM channel.
Warning:
This check is unavailable if your application is stateless and/or missing the intent
:obj:`hikari.Intents.GUILDS` and will **always** raise an error on command invocation if
either of these conditions are not met.
"""
reduced = functools.reduce(operator.or_, [perm1, *perms])
return Check(functools.partial(_bot_has_role_permissions, perms=reduced))
def bot_has_channel_permissions(perm1: hikari.Permissions, *perms: hikari.Permissions) -> Check:
"""
Prevents the command from being used if the bot is missing any of the required channel
permissions (permissions granted a permission overwrite).
Args:
perm1 (:obj:`hikari.Permissions`): Permission to check for.
*perms (:obj:`hikari.Permissions`): Additional permissions to check for.
Note:
This check will also prevent commands from being used in DMs, as you cannot have permissions
in a DM channel.
Warning:
This check is unavailable if your application is stateless and/or missing the intent
:obj:`hikari.Intents.GUILDS` and will **always** raise an error on command invocation if
either of these conditions are not met.
"""
reduced = functools.reduce(operator.or_, [perm1, *perms])
return Check(functools.partial(_bot_has_channel_permissions, perms=reduced))
def has_attachments(*extensions: str) -> Check:
"""
Prevents the command from being used if the invocation message
does not include any attachments.
Args:
*extensions (:obj:`str`): If specified, attachments with different file extensions
will cause the check to fail.
Note:
If ``extensions`` is specified then all attachments must conform to the restriction.
"""
warnings.warn(
"'has_attachments' is deprecated and scheduled for removal in version '2.5.0'. "
"Use an option with type 'hikari.Attachment' instead.",
DeprecationWarning,
)
return Check(functools.partial(_has_attachments, file_exts=extensions))
| 40.081031 | 115 | 0.699228 |
e6b114c0c5ca510c50d907cabceb1e73f7e73f46 | 481 | py | Python | leave/migrations/0004_auto_20200810_1107.py | mathemartins/solitonEMS | be593a1de735a2d60288e3c3ef3cf50d554ebfce | [
"Unlicense",
"MIT"
] | null | null | null | leave/migrations/0004_auto_20200810_1107.py | mathemartins/solitonEMS | be593a1de735a2d60288e3c3ef3cf50d554ebfce | [
"Unlicense",
"MIT"
] | null | null | null | leave/migrations/0004_auto_20200810_1107.py | mathemartins/solitonEMS | be593a1de735a2d60288e3c3ef3cf50d554ebfce | [
"Unlicense",
"MIT"
] | 1 | 2021-01-05T20:27:40.000Z | 2021-01-05T20:27:40.000Z | # Generated by Django 3.0.7 on 2020-08-10 08:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('leave', '0003_leaveplan_description'),
]
operations = [
migrations.AlterField(
model_name='leaveplan',
name='approval_status',
field=models.CharField(choices=[('PD', 'Pending'), ('AD', 'Approved'), ('RD', 'Rejected')], default='PD', max_length=2),
),
]
| 25.315789 | 132 | 0.598753 |
8e59adc6d7dedbbc4dfd7d02a62b80720de877f9 | 4,057 | py | Python | tasks/data_visualization/src/plotting.py | rongfang323/policy-data-analyzer | 73a03089753a26fe5e7031c5c68ba887428246a9 | [
"FTL",
"RSA-MD"
] | null | null | null | tasks/data_visualization/src/plotting.py | rongfang323/policy-data-analyzer | 73a03089753a26fe5e7031c5c68ba887428246a9 | [
"FTL",
"RSA-MD"
] | null | null | null | tasks/data_visualization/src/plotting.py | rongfang323/policy-data-analyzer | 73a03089753a26fe5e7031c5c68ba887428246a9 | [
"FTL",
"RSA-MD"
] | null | null | null | from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import scprep
import phate
def visualize_embeddings_2D(embs, numeric_labels, tsne_perplexity, pca_k_n_comps=None, seed=69420):
df = pd.DataFrame()
df["y"] = np.array(numeric_labels)
num_labels = len(set(numeric_labels))
# Data for plot 1
pca = PCA(n_components=2, random_state=seed)
pca_result = pca.fit_transform(embs)
df['pca-1'] = pca_result[:, 0]
df['pca-2'] = pca_result[:, 1]
# Data for plot 2
tsne = TSNE(n_components=2, verbose=1, perplexity=tsne_perplexity, n_iter=1000, random_state=seed)
tsne_results = tsne.fit_transform(embs)
df["tsne-1"] = tsne_results[:, 0]
df["tsne-2"] = tsne_results[:, 1]
# Actual plotting
plt.figure(figsize=(24, 4))
ax1 = plt.subplot(1, 3, 1)
sns.scatterplot(
x="pca-1", y="pca-2",
hue=df.y.tolist(),
palette="bright",
data=df,
legend=False,
ax=ax1
).set(title="PCA projection")
ax2 = plt.subplot(1, 3, 2)
sns.scatterplot(
x="tsne-1", y="tsne-2",
hue=df.y.tolist(),
palette="bright",
data=df,
legend=False if pca_k_n_comps else "auto",
ax=ax2
).set(title="t-SNE projection")
if pca_k_n_comps:
# Data for plot 3
pca_k = PCA(n_components=pca_k_n_comps, random_state=seed)
pca_k_result = pca_k.fit_transform(embs)
tsne = TSNE(n_components=2, verbose=1, perplexity=tsne_perplexity, n_iter=1000, random_state=seed)
tsne_pca_results = tsne.fit_transform(pca_k_result)
df[f"tsne-pca-{pca_k_n_comps}-1"] = tsne_pca_results[:, 0]
df[f"tsne-pca-{pca_k_n_comps}-2"] = tsne_pca_results[:, 1]
# Actual plotting
ax3 = plt.subplot(1, 3, 3)
sns.scatterplot(
x=f"tsne-pca-{pca_k_n_comps}-1", y=f"tsne-pca-{pca_k_n_comps}-2",
hue=df.y.tolist(),
palette="bright",
data=df,
ax=ax3
).set(title="t-SNE on PCA projection")
plt.legend(bbox_to_anchor=(1.01, 1), borderaxespad=0)
def visualize_PCA_embeddings_3D(embs, labels, fname=None, seed=69420):
pca = PCA(n_components=3, random_state=seed)
pca_result = pca.fit_transform(embs)
data = np.vstack([pca_result[:, 0], pca_result[:, 1], pca_result[:, 2]]).T
colors = np.array(labels)
return scprep.plot.rotate_scatter3d(data, c=colors, figsize=(10, 8), title=f"PCA 3 components",
legend_anchor=(1.01, 1), filename=fname)
def visualize_tSNE_embeddings_3D(embs, labels, perplexity=50, fname=None, seed=69420):
tsne = TSNE(n_components=3, verbose=1, perplexity=perplexity, n_iter=1000, random_state=seed)
tsne_result = tsne.fit_transform(embs)
data = np.vstack([tsne_result[:, 0], tsne_result[:, 1], tsne_result[:, 2]]).T
colors = np.array(labels)
return scprep.plot.rotate_scatter3d(data, c=colors, figsize=(10, 8), title=f"t-SNE {perplexity} perplexity",
legend_anchor=(1.01, 1), filename=fname)
def visualize_phate_embeddings_2D(embs, labels, knn=4, decay=15, t=12, seed=69420):
phate_operator = phate.PHATE(knn=knn, decay=decay,
t=t, random_state=seed) # (k=2, t=5000, n_pca=50, random_state=69420, knn_dist='cosine')
tree_phate = phate_operator.fit_transform(embs)
return phate.plot.scatter2d(phate_operator, c=labels, legend_anchor=(1.01, 1))
def visualize_phate_embeddings_3D(embs, labels, knn=4, decay=15, t=12, seed=69420, fname=None):
phate_operator = phate.PHATE(knn=knn, decay=decay,
t=t, random_state=seed) # (k=2, t=5000, n_pca=50, random_state=69420, knn_dist='cosine')
tree_phate = phate_operator.fit_transform(embs)
return phate.plot.rotate_scatter3d(phate_operator, c=labels, legend_anchor=(1.01, 1),
filename=fname)
| 38.638095 | 122 | 0.639882 |
132db73b42d39412b7073e9389cae6e4335cf400 | 3,750 | py | Python | pyreach/force_torque_sensor.py | google-research/pyreach | f91753ce7a26e77e122eb02a9fdd5a1ce3ce0159 | [
"Apache-2.0"
] | 13 | 2021-09-01T01:10:22.000Z | 2022-03-05T10:01:52.000Z | pyreach/force_torque_sensor.py | google-research/pyreach | f91753ce7a26e77e122eb02a9fdd5a1ce3ce0159 | [
"Apache-2.0"
] | null | null | null | pyreach/force_torque_sensor.py | google-research/pyreach | f91753ce7a26e77e122eb02a9fdd5a1ce3ce0159 | [
"Apache-2.0"
] | 6 | 2021-09-20T21:17:53.000Z | 2022-03-14T18:42:48.000Z | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interface for interacting with the force torque sensor."""
import dataclasses
from typing import Callable, Optional
from pyreach import core
@dataclasses.dataclass(frozen=True)
class ForceTorqueSensorState:
"""Represents the state of the force torque sensor device.
Attributes:
time: The time when the state is measured.
sequence: The sequence number of the force torque state.
device_name: The name of the device.
force: The force.
torque: The torque.
"""
time: float
sequence: int
device_name: str
force: core.Force
torque: core.Torque
class ForceTorqueSensor(object):
"""Interface for interacting with a force torque sensor device."""
@property
def device_name(self) -> str:
"""Return the force torque sensor device name."""
raise NotImplementedError
@property
def state(self) -> Optional[ForceTorqueSensorState]:
"""Return the latest force torque sensor state."""
raise NotImplementedError
def add_update_callback(
self,
callback: Callable[[ForceTorqueSensorState], bool],
finished_callback: Optional[Callable[[],
None]] = None) -> Callable[[], None]:
"""Add a callback for force torque sensor state.
Args:
callback: Callback called when a new sensor state arrives. The callback
function should return False for continuous state update. When the
callback function returns True, it will stop receiving future updates.
finished_callback: Optional callback, called when the callback is stopped.
Returns:
A function that when called stops the callback.
"""
raise NotImplementedError
def fetch_state(self,
timeout: float = 15.0) -> Optional[ForceTorqueSensorState]:
"""Fetch a new force torque sensor state.
Args:
timeout: The number of seconds to wait before giving up.
Raises:
PyReachError: if timeout.
Returns:
The latest sensor state.
"""
raise NotImplementedError
def async_fetch_state(self,
callback: Optional[Callable[[ForceTorqueSensorState],
None]] = None,
error_callback: Optional[Callable[[core.PyReachStatus],
None]] = None,
timeout: float = 15.0) -> None:
"""Fetch a new force torque sensor state asynchronously.
Args:
callback: Optional callback when a new force torque sensor state arrives.
error_callback: Optional callback called if there is an error.
timeout: The number of seconds to wait before giving up.
"""
raise NotImplementedError
def start_streaming(self, request_period: float = 0.1) -> None:
"""Start streaming of force torque sensor state.
Args:
request_period: The number of seconds between force torque sensor states.
Defaults to .1 seconds between force torque sensor states.
"""
raise NotImplementedError
def stop_streaming(self) -> None:
"""Stop streaming force torque sensor states."""
raise NotImplementedError
| 32.051282 | 80 | 0.674133 |
0e87428680f0b63e97751dddd9af768249c3f6dc | 8,013 | py | Python | tests/integration/prepare/test_prepare_esa_sentinel_l1.py | jaysnm/eo-datasets | e381d53efb208467406dd9e071d4bc2484c5bb5a | [
"Apache-2.0"
] | null | null | null | tests/integration/prepare/test_prepare_esa_sentinel_l1.py | jaysnm/eo-datasets | e381d53efb208467406dd9e071d4bc2484c5bb5a | [
"Apache-2.0"
] | null | null | null | tests/integration/prepare/test_prepare_esa_sentinel_l1.py | jaysnm/eo-datasets | e381d53efb208467406dd9e071d4bc2484c5bb5a | [
"Apache-2.0"
] | null | null | null | import datetime
import shutil
from pathlib import Path
import pytest
from eodatasets3.prepare import sentinel_l1c_prepare
from tests.common import check_prepare_outputs
DATASET_PATH: Path = Path(__file__).parent.parent / (
"data/esa_s2_l1c/S2B_MSIL1C_20201011T000249_N0209_R030_T55HFA_20201011T011446.zip"
)
@pytest.fixture()
def expected_dataset_document():
return {
"$schema": "https://schemas.opendatacube.org/dataset",
"id": "7c1df12c-e580-5fa2-b51b-c30a59e73bbf",
"crs": "epsg:32755",
"geometry": {
"coordinates": [
[
[600300.0, 6100000.0],
[709800.0, 6100000.0],
[709800.0, 5990200.0],
[600000.0, 5990200.0],
[600000.0, 6099700.0],
[600000.0, 6100000.0],
[600300.0, 6100000.0],
]
],
"type": "Polygon",
},
"grids": {
"300": {
"shape": [366, 366],
"transform": [
300.0,
0.0,
600000.0,
0.0,
-300.0,
6100000.0,
0.0,
0.0,
1.0,
],
},
"default": {
"shape": [1098, 1098],
"transform": [
100.0,
0.0,
600000.0,
0.0,
-100.0,
6100000.0,
0.0,
0.0,
1.0,
],
},
"50": {
"shape": [2196, 2196],
"transform": [
50.0,
0.0,
600000.0,
0.0,
-50.0,
6100000.0,
0.0,
0.0,
1.0,
],
},
},
"label": "esa_s2bm_level1_1-0-20201011_55HFA_2020-10-11",
"lineage": {},
"measurements": {
"blue": {
"grid": "50",
"path": "S2B_MSIL1C_20201011T000249_N0209_R030_T55HFA_20201011T011446.SAFE/GRANULE/"
"L1C_T55HFA_A018789_20201011T000244/IMG_DATA/T55HFA_20201011T000249_B02.jp2",
},
"coastal_aerosol": {
"grid": "300",
"path": "S2B_MSIL1C_20201011T000249_N0209_R030_T55HFA_20201011T011446.SAFE/GRANULE/"
"L1C_T55HFA_A018789_20201011T000244/IMG_DATA/T55HFA_20201011T000249_B01.jp2",
},
"green": {
"grid": "50",
"path": "S2B_MSIL1C_20201011T000249_N0209_R030_T55HFA_20201011T011446.SAFE/GRANULE/"
"L1C_T55HFA_A018789_20201011T000244/IMG_DATA/T55HFA_20201011T000249_B03.jp2",
},
"nir_1": {
"grid": "50",
"path": "S2B_MSIL1C_20201011T000249_N0209_R030_T55HFA_20201011T011446.SAFE/GRANULE/"
"L1C_T55HFA_A018789_20201011T000244/IMG_DATA/T55HFA_20201011T000249_B08.jp2",
},
"red": {
"grid": "50",
"path": "S2B_MSIL1C_20201011T000249_N0209_R030_T55HFA_20201011T011446.SAFE/GRANULE/"
"L1C_T55HFA_A018789_20201011T000244/IMG_DATA/T55HFA_20201011T000249_B04.jp2",
},
"red_edge_1": {
"path": "S2B_MSIL1C_20201011T000249_N0209_R030_T55HFA_20201011T011446.SAFE/GRANULE/"
"L1C_T55HFA_A018789_20201011T000244/IMG_DATA/T55HFA_20201011T000249_B05.jp2"
},
"red_edge_2": {
"path": "S2B_MSIL1C_20201011T000249_N0209_R030_T55HFA_20201011T011446.SAFE/GRANULE/"
"L1C_T55HFA_A018789_20201011T000244/IMG_DATA/T55HFA_20201011T000249_B06.jp2"
},
"red_edge_3": {
"path": "S2B_MSIL1C_20201011T000249_N0209_R030_T55HFA_20201011T011446.SAFE/GRANULE/"
"L1C_T55HFA_A018789_20201011T000244/IMG_DATA/T55HFA_20201011T000249_B07.jp2"
},
"swir_1_cirrus": {
"grid": "300",
"path": "S2B_MSIL1C_20201011T000249_N0209_R030_T55HFA_20201011T011446.SAFE/GRANULE/"
"L1C_T55HFA_A018789_20201011T000244/IMG_DATA/T55HFA_20201011T000249_B10.jp2",
},
"swir_2": {
"path": "S2B_MSIL1C_20201011T000249_N0209_R030_T55HFA_20201011T011446.SAFE/GRANULE/"
"L1C_T55HFA_A018789_20201011T000244/IMG_DATA/T55HFA_20201011T000249_B11.jp2"
},
"swir_3": {
"path": "S2B_MSIL1C_20201011T000249_N0209_R030_T55HFA_20201011T011446.SAFE/GRANULE/"
"L1C_T55HFA_A018789_20201011T000244/IMG_DATA/T55HFA_20201011T000249_B12.jp2"
},
"water_vapour": {
"grid": "300",
"path": "S2B_MSIL1C_20201011T000249_N0209_R030_T55HFA_20201011T011446.SAFE/GRANULE/"
"L1C_T55HFA_A018789_20201011T000244/IMG_DATA/T55HFA_20201011T000249_B09.jp2",
},
},
"product": {"name": "esa_s2bm_level1_1"},
"properties": {
"datetime": datetime.datetime(2020, 10, 11, 0, 6, 49, 882566),
"eo:cloud_cover": 24.9912,
"eo:gsd": 10,
"eo:instrument": "MSI",
"eo:platform": "sentinel-2b",
"eo:constellation": "sentinel-2",
"eo:sun_azimuth": 46.3307328858312,
"eo:sun_elevation": 37.3713908882192,
"odc:dataset_version": "1.0.20201011",
"odc:file_format": "JPEG2000",
"odc:processing_datetime": datetime.datetime(
2020, 10, 11, 1, 47, 4, 112949
),
"odc:producer": "esa.int",
"odc:product_family": "level1",
"odc:region_code": "55HFA",
"sentinel:datastrip_id": "S2B_OPER_MSI_L1C_DS_EPAE_20201011T011446_S20201011T000244_N02.09",
"sentinel:sentinel_tile_id": "S2B_OPER_MSI_L1C_TL_EPAE_20201011T011446_A018789_T55HFA_N02.09",
"sentinel:product_name": "S2B_MSIL1C_20201011T000249_N0209_R030_T55HFA_20201011T011446",
"sentinel:datatake_type": "INS-NOBS",
"sat:orbit_state": "descending",
"sat:relative_orbit": 30,
"sentinel:datatake_start_datetime": datetime.datetime(
2020, 10, 11, 1, 14, 46
),
"sentinel:processing_baseline": "02.09",
"sentinel:processing_center": "EPAE",
"sentinel:reception_station": "EDRS",
},
"accessories": {
"metadata:s2_datastrip": {
"path": "S2B_MSIL1C_20201011T000249_N0209_R030_T55HFA_20201011T011446.SAFE/DATASTRIP/"
"DS_EPAE_20201011T011446_S20201011T000244/MTD_DS.xml"
},
"metadata:s2_user_product": {
"path": "S2B_MSIL1C_20201011T000249_N0209_R030_T55HFA_20201011T011446.SAFE/MTD_MSIL1C.xml"
},
"metadata:s2_tile": {
"path": "S2B_MSIL1C_20201011T000249_N0209_R030_T55HFA_20201011T011446.SAFE/GRANULE/"
"L1C_T55HFA_A018789_20201011T000244/MTD_TL.xml"
},
},
}
def test_run(tmp_path, expected_dataset_document):
"""
Run prepare on our test input scene, and check the created metadata matches expected.
"""
shutil.copy(DATASET_PATH, tmp_path)
expected_metadata_path = tmp_path / (
"S2B_MSIL1C_20201011T000249_N0209_R030_T55HFA_20201011T011446.odc-metadata.yaml"
)
check_prepare_outputs(
invoke_script=sentinel_l1c_prepare.main,
run_args=[
tmp_path / DATASET_PATH.name,
],
expected_doc=expected_dataset_document,
expected_metadata_path=expected_metadata_path,
)
| 39.865672 | 106 | 0.541994 |
552e059cc6add9214e44bc559624c6ea75469bda | 523 | py | Python | web/mooctracker/projects/models.py | Jaaga/mooc-tracker | b7be270d24fa2608042064dc87ae13740893bade | [
"MIT"
] | null | null | null | web/mooctracker/projects/models.py | Jaaga/mooc-tracker | b7be270d24fa2608042064dc87ae13740893bade | [
"MIT"
] | 1 | 2020-06-05T17:43:59.000Z | 2020-06-05T17:43:59.000Z | web/mooctracker/projects/models.py | Jaaga/mooc-tracker | b7be270d24fa2608042064dc87ae13740893bade | [
"MIT"
] | 2 | 2015-02-25T10:46:20.000Z | 2016-10-28T11:24:32.000Z | from django.db import models
class Project(models.Model):
project_name = models.CharField(max_length = 200)
url = models.URLField(default=None, blank=True, null = True, max_length=400)
project_description = models.TextField(default=None, blank=True, null = True, max_length=2000)
project_site_url = models.URLField(default=None, blank=True, null = True, max_length=400)
repository_url = models.URLField(default=None, blank=True, null = True, max_length=400)
def __unicode__(self):
return self.project_name | 47.545455 | 96 | 0.76673 |
da4a7a33acfec7ebe0305b550e165536b1ff5f7f | 20,077 | py | Python | extra_foam/gui/plot_widgets/graphics_widgets.py | zhujun98/EXtra-foam | 680d6d7fd4afdcbc41eb8e440feac54b6cecab33 | [
"BSD-3-Clause"
] | null | null | null | extra_foam/gui/plot_widgets/graphics_widgets.py | zhujun98/EXtra-foam | 680d6d7fd4afdcbc41eb8e440feac54b6cecab33 | [
"BSD-3-Clause"
] | null | null | null | extra_foam/gui/plot_widgets/graphics_widgets.py | zhujun98/EXtra-foam | 680d6d7fd4afdcbc41eb8e440feac54b6cecab33 | [
"BSD-3-Clause"
] | null | null | null | """
Distributed under the terms of the BSD 3-Clause License.
The full license is in the file LICENSE, distributed with this software.
Author: Jun Zhu <jun.zhu@xfel.eu>
Copyright (C) European X-Ray Free-Electron Laser Facility GmbH.
All rights reserved.
"""
import warnings
from itertools import chain
import numpy as np
from PyQt5.QtGui import QPainter
from PyQt5.QtCore import pyqtSignal, pyqtSlot, QPointF, Qt
from PyQt5.QtWidgets import (
QCheckBox, QGraphicsGridLayout, QHBoxLayout, QLabel, QMenu, QSizePolicy,
QSlider, QWidget, QWidgetAction
)
from .. import pyqtgraph as pg
from ..pyqtgraph import Point
from ..pyqtgraph import functions as fn
from ..plot_widgets.plot_items import CurvePlotItem
from ..misc_widgets import FColor
class HistogramLUTItem(pg.GraphicsWidget):
"""GraphicsWidget for adjusting the display of an image.
Implemented based on pyqtgraph.HistogramLUTItem.
"""
lut_changed_sgn = pyqtSignal(object)
def __init__(self, image_item, parent=None):
super().__init__(parent=parent)
self._lut = None
gradient = pg.GradientEditorItem()
gradient.setOrientation('right')
gradient.loadPreset('grey')
self._gradient = gradient
self._gradient.show()
lri = pg.LinearRegionItem([0, 1], 'horizontal', swapMode='block')
lri.setZValue(1000)
lri.lines[0].addMarker('<|', 0.5)
lri.lines[1].addMarker('|>', 0.5)
self._lri = lri
self._hist = CurvePlotItem(pen=FColor.mkPen('k'))
self._hist.rotate(90)
vb = pg.ViewBox(parent=self)
vb.setMaximumWidth(152)
vb.setMinimumWidth(45)
vb.setMouseEnabled(x=False, y=True)
vb.addItem(self._hist)
vb.addItem(self._lri)
vb.enableAutoRange(pg.ViewBox.XYAxes)
self._vb = vb
self._axis = pg.AxisItem(
'left', linkView=self._vb, maxTickLength=-10, parent=self)
self.initUI()
self.initConnections()
image_item.image_changed_sgn.connect(self.onImageChanged)
# send function pointer, not the result
image_item.setLookupTable(self.getLookupTable)
self._image_item = image_item
# If image_item._image is None, the following line does not initialize
# image_item._levels
self.onImageChanged(auto_levels=True)
# synchronize levels
image_item.setLevels(self.getLevels())
def initUI(self):
layout = QGraphicsGridLayout()
layout.setContentsMargins(1, 1, 1, 1)
layout.setSpacing(0)
layout.addItem(self._axis, 0, 0)
layout.addItem(self._vb, 0, 1)
layout.addItem(self._gradient, 0, 2)
self.setLayout(layout)
def initConnections(self):
self._lri.sigRegionChanged.connect(self.regionChanging)
self._lri.sigRegionChangeFinished.connect(self.regionChanged)
self._gradient.sigGradientChanged.connect(self.gradientChanged)
self._vb.sigRangeChanged.connect(self.update)
def paint(self, p, *args):
"""Override."""
pen = self._lri.lines[0].pen
rgn = self.getLevels()
p1 = self._vb.mapFromViewToItem(
self, Point(self._vb.viewRect().center().x(), rgn[0]))
p2 = self._vb.mapFromViewToItem(
self, Point(self._vb.viewRect().center().x(), rgn[1]))
rect = self._gradient.mapRectToParent(self._gradient.gradRect.rect())
p.setRenderHint(QPainter.Antialiasing)
for pen in [fn.mkPen((0, 0, 0, 100), width=3), pen]:
p.setPen(pen)
p.drawLine(p1 + Point(0, 5), rect.bottomLeft())
p.drawLine(p2 - Point(0, 5), rect.topLeft())
p.drawLine(rect.topLeft(), rect.topRight())
p.drawLine(rect.bottomLeft(), rect.bottomRight())
def gradientChanged(self):
if self._gradient.isLookupTrivial():
# lambda x: x.astype(np.uint8))
self._image_item.setLookupTable(None)
else:
# send function pointer, not the result
self._image_item.setLookupTable(self.getLookupTable)
self._lut = None
self.lut_changed_sgn.emit(self)
def getLookupTable(self, img=None, n=None, alpha=None):
"""Return the look-up table."""
if self._lut is None:
if n is None:
n = 256 if img.dtype == np.uint8 else 512
self._lut = self._gradient.getLookupTable(n, alpha=alpha)
return self._lut
def regionChanging(self):
"""One line of the region is being dragged."""
self._image_item.setLevels(self.getLevels())
self.update()
def regionChanged(self):
"""Line dragging has finished."""
self._image_item.setLevels(self.getLevels())
def onImageChanged(self, auto_levels=False):
hist, bin_centers = self._image_item.histogram()
if hist is None:
self._hist.setData([], [])
return
self._hist.setData(bin_centers, hist)
if auto_levels:
self._lri.setRegion((bin_centers[0], bin_centers[-1]))
else:
# synchronize levels if ImageItem updated its image with
# auto_levels = True
self._lri.setRegion(self._image_item.getLevels())
def setColorMap(self, cm):
self._gradient.setColorMap(cm)
def getLevels(self):
return self._lri.getRegion()
def setLevels(self, levels):
"""Called by HistogramLUTItem."""
self._lri.setRegion(levels)
class PlotArea(pg.GraphicsWidget):
"""GraphicsWidget implementing a standard 2D plotting area with axes.
Implemented based on pyqtgraph.PlotItem.
It has the following functionalities:
- Manage placement of a ViewBox, AxisItems, and LabelItems;
- Manage a list of GraphicsItems displayed inside the ViewBox;
- Implement a context menu with display options.
"""
cross_toggled_sgn = pyqtSignal(bool)
_METER_ROW = 0
_TITLE_ROW = 1
_MAX_ANNOTATION_ITEMS = 10
def __init__(self, name=None, *,
enable_meter=True, enable_transform=True, parent=None):
super().__init__(parent=parent)
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self._items = set()
self._plot_items = set()
self._plot_items2 = set()
self._annotation_items = []
self._n_vis_annotation_items = 0
self._vb = pg.ViewBox(parent=self)
self._vb2 = None
if name is not None:
self._vb.register(name)
self._legend = None
self._axes = {}
self._meter = pg.LabelItem(
'', size='11pt', justify='left', color='6A3D9A', parent=self)
self._title = pg.LabelItem('', size='11pt', parent=self)
# context menu
self._show_cross_cb = QCheckBox("Cross cursor")
self._show_x_grid_cb = QCheckBox("Show X Grid")
self._show_y_grid_cb = QCheckBox("Show Y Grid")
self._grid_opacity_sld = QSlider(Qt.Horizontal)
self._grid_opacity_sld.setMinimum(0)
self._grid_opacity_sld.setMaximum(255)
self._grid_opacity_sld.setValue(160)
self._grid_opacity_sld.setSingleStep(1)
self._log_x_cb = QCheckBox("Log X")
self._log_y_cb = QCheckBox("Log Y")
self._menu = None
self._enable_transform = enable_transform
self._enable_meter = enable_meter
self._show_meter = False
self._layout = QGraphicsGridLayout()
self.initUI()
self.initConnections()
def initUI(self):
layout = self._layout
layout.setContentsMargins(1, 1, 1, 1)
layout.setHorizontalSpacing(0)
layout.setVerticalSpacing(0)
layout.addItem(self._meter, self._METER_ROW, 1)
layout.addItem(self._title, self._TITLE_ROW, 1)
layout.addItem(self._vb, 3, 1)
for i in range(5):
layout.setRowPreferredHeight(i, 0)
layout.setRowMinimumHeight(i, 0)
layout.setRowSpacing(i, 0)
layout.setRowStretchFactor(i, 1)
for i in range(3):
layout.setColumnPreferredWidth(i, 0)
layout.setColumnMinimumWidth(i, 0)
layout.setColumnSpacing(i, 0)
layout.setColumnStretchFactor(i, 1)
layout.setRowStretchFactor(2, 100)
layout.setColumnStretchFactor(1, 100)
self.setLayout(layout)
self._initAxisItems()
self.setTitle()
self.showMeter(self._show_meter)
self._initContextMenu()
def initConnections(self):
self._show_cross_cb.toggled.connect(self._onShowCrossChanged)
self._show_x_grid_cb.toggled.connect(self._onShowGridChanged)
self._show_y_grid_cb.toggled.connect(self._onShowGridChanged)
self._grid_opacity_sld.sliderReleased.connect(self._onShowGridChanged)
self._log_x_cb.toggled.connect(self._onLogXChanged)
self._log_y_cb.toggled.connect(self._onLogYChanged)
def _initContextMenu(self):
self._menu = [
QMenu("Meter"),
QMenu("Grid"),
QMenu("Transform")
]
meter_menu = self._menu[0]
cross_act = QWidgetAction(meter_menu)
cross_act.setDefaultWidget(self._show_cross_cb)
meter_menu.addAction(cross_act)
grid_menu = self._menu[1]
show_x_act = QWidgetAction(grid_menu)
show_x_act.setDefaultWidget(self._show_x_grid_cb)
grid_menu.addAction(show_x_act)
show_y_act = QWidgetAction(grid_menu)
show_y_act.setDefaultWidget(self._show_y_grid_cb)
grid_menu.addAction(show_y_act)
opacity_act = QWidgetAction(grid_menu)
widget = QWidget()
layout = QHBoxLayout()
layout.addWidget(QLabel("Opacity"))
layout.addWidget(self._grid_opacity_sld)
widget.setLayout(layout)
opacity_act.setDefaultWidget(widget)
grid_menu.addAction(opacity_act)
transform_menu = self._menu[2]
log_x_act = QWidgetAction(transform_menu)
log_x_act.setDefaultWidget(self._log_x_cb)
transform_menu.addAction(log_x_act)
log_y_act = QWidgetAction(transform_menu)
log_y_act.setDefaultWidget(self._log_y_cb)
transform_menu.addAction(log_y_act)
def _initAxisItems(self):
for orient, pos in (('top', (2, 1)),
('bottom', (4, 1)),
('left', (3, 0)),
('right', (3, 2))):
axis = pg.AxisItem(orientation=orient, parent=self)
axis.linkToView(self._vb)
self._axes[orient] = {'item': axis, 'pos': pos}
self._layout.addItem(axis, *pos)
axis.setZValue(-1000)
axis.setFlag(axis.ItemNegativeZStacksBehindParent)
self.showAxis(orient, orient in ['left', 'bottom'])
def getViewBox(self):
return self._vb
def clearAllPlotItems(self):
"""Clear data on all the plot items."""
for item in chain(self._plot_items, self._plot_items2):
item.setData([], [])
@pyqtSlot(bool)
def _onShowCrossChanged(self, state):
self.showMeter(state)
self.cross_toggled_sgn.emit(state)
@pyqtSlot()
def _onShowGridChanged(self):
alpha = self._grid_opacity_sld.value()
x = alpha if self._show_x_grid_cb.isChecked() else False
y = alpha if self._show_y_grid_cb.isChecked() else False
self.getAxis('bottom').setGrid(x)
self.getAxis('left').setGrid(y)
@pyqtSlot(bool)
def _onLogXChanged(self, state):
for item in chain(self._plot_items, self._plot_items2):
item.setLogX(state)
self.getAxis("bottom").setLogMode(state)
self._vb.autoRange(disableAutoRange=False)
@pyqtSlot(bool)
def _onLogYChanged(self, state):
for item in self._plot_items:
item.setLogY(state)
self.getAxis("left").setLogMode(state)
self._vb.autoRange(disableAutoRange=False)
def addItem(self, item, ignore_bounds=False, y2=False):
"""Add a graphics item to ViewBox."""
if item in self._items:
warnings.warn(f'Item {item} already added to PlotItem, ignoring.')
return
self._items.add(item)
if isinstance(item, pg.PlotItem):
if y2:
if self._log_x_cb.isChecked():
item.setLogX(True)
self._plot_items2.add(item)
else:
if self._log_x_cb.isChecked():
item.setLogX(True)
if self._log_y_cb.isChecked():
item.setLogY(True)
self._plot_items.add(item)
name = item.name()
if self._legend is not None and name:
self._legend.addItem(item, name)
if y2:
vb = self._vb2
if vb is None:
vb = pg.ViewBox()
self.scene().addItem(vb)
right_axis = self.getAxis('right')
right_axis.linkToView(vb)
right_axis.show()
vb.setXLink(self._vb)
self._vb2 = vb
self._vb.sigResized.connect(self._updateY2View)
else:
vb = self._vb
vb.addItem(item, ignoreBounds=ignore_bounds)
def _updateY2View(self):
self._vb2.setGeometry(self._vb.sceneBoundingRect())
# not sure this is required
# vb.linkedViewChanged(self._plot_area.vb, vb.XAxis)
def removeItem(self, item):
"""Add a graphics item to ViewBox."""
if item not in self._items:
return
if item in self._annotation_items:
# it is tricky to update n_vis_annotation_items
raise RuntimeError("Annotation item is not allowed to be removed "
"using 'removeItem' method!")
self._items.remove(item)
if item in self._plot_items2:
self._plot_items2.remove(item)
if self._legend is not None and item.name():
self._legend.removeItem(item)
self._vb2.removeItem(item)
return
if item in self._plot_items:
self._plot_items.remove(item)
if self._legend is not None and item.name():
self._legend.removeItem(item)
self._vb.removeItem(item)
def removeAllItems(self):
"""Remove all graphics items from the ViewBox."""
for item in self._items:
if item in self._plot_items2:
self._vb2.removeItem(item)
else:
self._vb.removeItem(item)
if self._legend is not None:
self._legend.clear()
self._plot_items.clear()
self._plot_items2.clear()
self._annotation_items.clear()
self._n_vis_annotation_items = 0
self._items.clear()
def getContextMenus(self, event):
"""Override."""
start = 0
end = len(self._menu)
if not self._enable_transform:
end -= 1
if not self._enable_meter:
start += 1
return self._menu[start:end]
def getAxis(self, axis):
"""Return the specified AxisItem.
:param str axis: one of 'left', 'bottom', 'right', or 'top'.
"""
return self._axes[axis]['item']
def showAxis(self, axis, show=True):
"""Show or hide the given axis.
:param str axis: one of 'left', 'bottom', 'right', or 'top'.
:param bool show: whether to show the axis.
"""
s = self.getAxis(axis)
if show:
s.show()
else:
s.hide()
def addLegend(self, offset=(30, 30), **kwargs):
"""Add a LegendItem if it does not exist."""
if self._legend is None:
self._legend = pg.LegendItem(offset=offset, pen='k', **kwargs)
self._legend.setParentItem(self._vb)
for item in chain(self._plot_items, self._plot_items2):
name = item.name()
if name:
self._legend.addItem(item, name)
return self._legend
def showLegend(self, show=True):
"""Show or hide the legend.
:param bool show: whether to show the legend.
"""
if show:
self._legend.show()
else:
self._legend.hide()
def setLabel(self, axis, text=None, units=None, **args):
"""Set the label for an axis. Basic HTML formatting is allowed.
:param str axis: one of 'left', 'bottom', 'right', or 'top'.
:param str text: text to display along the axis. HTML allowed.
"""
self.getAxis(axis).setLabel(text=text, units=units, **args)
self.showAxis(axis)
def showLabel(self, axis, show=True):
"""Show or hide one of the axis labels.
:param str axis: one of 'left', 'bottom', 'right', or 'top'.
:param bool show: whether to show the label.
"""
self.getAxis(axis).showLabel(show)
def showMeter(self, show=True):
"""Show or hide the meter bar.
:param bool show: whether to show the meter bar.
"""
row = self._METER_ROW
if not show:
self._meter.setMaximumHeight(0)
self._layout.setRowFixedHeight(row, 0)
self._meter.setVisible(False)
else:
self._meter.setMaximumHeight(30)
self._layout.setRowFixedHeight(row, 30)
self._meter.setVisible(True)
self._show_meter = show
def setMeter(self, pos):
"""Set the meter of the plot."""
if not self._show_meter:
return
if pos is None:
self._meter.setText("")
else:
x, y = pos
self._meter.setText(f"x = {x}, y = {y}")
def setAnnotationList(self, x, y, values=None):
"""Set a list of annotation items.
:param list-like x: x coordinate of the annotated point.
:param list-like y: y coordinate of the annotated point.
:param list-like values: a list of annotation text.
"""
# Don't waste time to check the list lengths.
a_items = self._annotation_items
if values is None:
values = x
values = values[:self._MAX_ANNOTATION_ITEMS]
n_pts = len(values)
n_items = len(a_items)
if n_items < n_pts:
for i in range(n_pts - n_items):
item = pg.TextItem(color=FColor.mkColor('b'), anchor=(0.5, 2))
self.addItem(item)
a_items.append(item)
n_vis = self._n_vis_annotation_items
if n_vis < n_pts:
for i in range(n_vis, n_pts):
a_items[i].show()
elif n_vis > n_pts:
for i in range(n_pts, n_vis):
a_items[i].hide()
self._n_vis_annotation_items = n_pts
for i in range(n_pts):
a_items[i].setPos(x[i], y[i])
a_items[i].setText(f"{values[i]:.4f}")
def setTitle(self, *args, **kwargs):
"""Set the title of the plot."""
row = self._TITLE_ROW
title = None if len(args) == 0 else args[0]
if title is None:
self._title.setMaximumHeight(0)
self._layout.setRowFixedHeight(row, 0)
self._title.setVisible(False)
else:
self._title.setMaximumHeight(30)
self._layout.setRowFixedHeight(row, 30)
self._title.setText(title, **kwargs)
self._title.setVisible(True)
def setAspectLocked(self, *args, **kwargs):
self._vb.setAspectLocked(*args, **kwargs)
def invertX(self, *args, **kwargs):
self._vb.invertX(*args, **kwargs)
def invertY(self, *args, **kwargs):
self._vb.invertY(*args, **kwargs)
def autoRange(self, *args, **kwargs):
self._vb.autoRange(*args, **kwargs)
def mapSceneToView(self, *args, **kwargs):
return self._vb.mapSceneToView(*args, **kwargs)
| 32.278135 | 78 | 0.603128 |
22be8a018a8d6ecaad942508e30d55e2c249bc6f | 3,435 | py | Python | manual_requeue.py | davidh83110/aws_sqs-dead-letter-queue-lambda-requeue | 8bbe41908be726345bb4236c7d41f38656770459 | [
"MIT"
] | null | null | null | manual_requeue.py | davidh83110/aws_sqs-dead-letter-queue-lambda-requeue | 8bbe41908be726345bb4236c7d41f38656770459 | [
"MIT"
] | null | null | null | manual_requeue.py | davidh83110/aws_sqs-dead-letter-queue-lambda-requeue | 8bbe41908be726345bb4236c7d41f38656770459 | [
"MIT"
] | 1 | 2021-04-27T08:23:30.000Z | 2021-04-27T08:23:30.000Z | import boto3
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
class DLQ:
def __init__(self, queue_name):
self.sqs = boto3.resource('sqs')
self.dead_letter_queue_name = queue_name
def get_queue(self, queue_name):
queue = self.sqs.get_queue_by_name(
QueueName=queue_name
)
return queue
def receive_messages_from_dlq(self):
messages = self.get_queue(self.dead_letter_queue_name).receive_messages(
MaxNumberOfMessages=10,
WaitTimeSeconds=20
)
logger.info('Message Length: %s', str(len(messages)))
return messages
def send_messages_to_source_queue(self, retry_count, message_id, message_body):
source_queue_name = self.dead_letter_queue_name.replace('DLQ-', '')
logger.info('Source Queue: %s', source_queue_name)
retry_count += 1
self.get_queue(source_queue_name).send_messages(Entries=[
{
'Id': message_id,
'MessageBody': message_body,
'MessageAttributes': {
'retryCount': {
'StringValue': str(retry_count),
'DataType': 'String'
}
}
}
]
)
def delete_message_from_dlq(self, message_id, message_receipt_handle):
self.get_queue(self.dead_letter_queue_name).delete_messages(Entries=[
{
'Id': message_id,
'ReceiptHandle': message_receipt_handle
}
]
)
def requeue_all(self, retry_count):
total_moved_job = 0
while True:
messages = self.receive_messages_from_dlq()
if len(messages) == 0:
break
else:
for i, msg in enumerate(messages):
logger.info('Index:[%s], Message ID/Body: %s / %s', str(i), msg.message_id, str(msg.body))
logger.info('Index:[%s], Sending message back to source queue...', str(i))
self.send_messages_to_source_queue(retry_count, msg.message_id, msg.body)
logger.info('Index:[%s], Deleteing message on DLQ...', str(i))
self.delete_message_from_dlq(msg.message_id, msg.receipt_handle)
total_moved_job += len(messages)
if total_moved_job > 60:
break
logger.info('Total Moved Job: %s', total_moved_job)
def lambda_handler(context, event):
logger.info('Event Body: \n' + str(event))
dlq_name = event['Records'][0]['eventSourceARN'].split(':')[5]
# dlq_name = 'dlq-demo-queue' #Test queue name when without Lambda Trigger
try:
retry_count = event['Records'][0]['messageAttributes']['retryCount']['stringValue']
except Exception as e:
logger.warning('KeyError: %s, assign retry_count = 1', e)
retry_count = 1
if retry_count > 3:
logger.error('This task is retrying over 3 time. function end.')
return 200
DLQ(dlq_name).requeue_all(retry_count)
return 200
if __name__ == '__main__':
lambda_handler(context='', event='') | 32.102804 | 110 | 0.546143 |
55c3298d05d61422bd5aae5d56a9127b5d1703e6 | 5,254 | py | Python | skyportal/handlers/api/internal/standards.py | bparazin/skyportal | c160610ca0cc28eef9f36c2d11cc15bd9bcbfe56 | [
"BSD-3-Clause"
] | 52 | 2018-11-02T00:53:21.000Z | 2022-03-08T16:03:52.000Z | skyportal/handlers/api/internal/standards.py | bparazin/skyportal | c160610ca0cc28eef9f36c2d11cc15bd9bcbfe56 | [
"BSD-3-Clause"
] | 1,944 | 2017-04-27T18:51:20.000Z | 2022-03-31T20:17:44.000Z | skyportal/handlers/api/internal/standards.py | bparazin/skyportal | c160610ca0cc28eef9f36c2d11cc15bd9bcbfe56 | [
"BSD-3-Clause"
] | 63 | 2017-05-13T01:40:47.000Z | 2022-03-12T11:32:11.000Z | import ast
from baselayer.app.access import auth_or_token
from baselayer.app.env import load_env
from ...base import BaseHandler
from ....utils.offset import get_formatted_standards_list
_, cfg = load_env()
class StandardsHandler(BaseHandler):
@auth_or_token
def get(self):
"""
---
description: Get standard stars with specified formatting
parameters:
- in: query
name: facility
nullable: true
required: false
schema:
type: string
enum: [Keck, Shane, P200]
description: Which facility to generate the starlist for
- in: query
name: standard_type
required: false
schema:
type: string
description: |
Origin of the standard stars, defined in config.yaml
- in: query
name: dec_filter_range
nullable: True
required: false
schema:
type: list
description: |
lowest and highest dec to return, e.g. "(-10,30)"
- in: query
name: ra_filter_range
required: false
nullable: True
schema:
type: list
description: |
lowest and highest ra to return (or wrapped range)
e.g. "(125,320)" or "(300,10)"
- in: query
name: show_first_line
required: false
schema:
type: boolean
description: |
In the returned list, include the first formatting line
if it is otherwise demanded by the format.
responses:
200:
content:
application/json:
schema:
- $ref: '#/components/schemas/Success'
- type: object
properties:
data:
type: object
properties:
success:
type: boolean
description: did we get back a starlist as we expect?
starlist_info:
type: array
description: |
list of source and offset star information
items:
type: object
properties:
str:
type: string
description: single-line starlist format per object
400:
content:
application/json:
schema: Error
"""
starlist_type = self.get_query_argument('facility', 'Keck')
standard_type = self.get_query_argument('standard_type', 'ESO')
dec_filter_range_str = self.get_query_argument('dec_filter_range', "[-90, 90]")
ra_filter_range_str = self.get_query_argument('ra_filter_range', "[0, 360]")
show_first_line = self.get_query_argument('show_first_line', False)
if standard_type not in cfg["standard_stars"]:
return self.error(
f'Invalid `standard_type`. Should be in {list(cfg["standard_stars"].keys())}'
)
if starlist_type not in ["Keck", "Shane", "P200"]:
return self.error(
'Invalid `starlist_type`. Should be in [Keck, Shane, P200]'
)
try:
show_first_line = bool(show_first_line)
except TypeError:
return self.error('Invalid argument for `show_first_line`')
dec_filter_range = ast.literal_eval(dec_filter_range_str)
if not (
isinstance(dec_filter_range, (list, tuple)) and len(dec_filter_range) == 2
):
return self.error('Invalid argument for `dec_filter_range`')
if not (
isinstance(dec_filter_range[0], (float, int))
and isinstance(dec_filter_range[1], (float, int))
):
return self.error('Invalid arguments in `dec_filter_range`')
if not all(map(lambda x: x >= -90 and x <= 90, dec_filter_range)):
return self.error('Elements out of range in `dec_filter_range`')
ra_filter_range = ast.literal_eval(ra_filter_range_str)
if not (
isinstance(ra_filter_range, (list, tuple)) and len(ra_filter_range) == 2
):
return self.error('Invalid argument for `ra_filter_range`')
if not (
isinstance(ra_filter_range[0], (float, int))
and isinstance(ra_filter_range[1], (float, int))
):
return self.error('Invalid arguments in `ra_filter_range`')
if not all(map(lambda x: x >= 0 and x <= 360, ra_filter_range)):
return self.error('Elements out of range in `ra_filter_range`')
data = get_formatted_standards_list(
starlist_type=starlist_type,
standard_type=standard_type,
dec_filter_range=tuple(dec_filter_range),
ra_filter_range=tuple(ra_filter_range),
show_first_line=show_first_line,
)
self.verify_and_commit()
return self.success(data=data)
| 35.986301 | 93 | 0.538257 |
fe93174a3b3cc44579de174ceb0e98adba3d4a6a | 813 | py | Python | cherrymusicclient/client.py | endlesscoil/cherrymusicclient | 07b0ef3ede11b7e26f41f944ee923760ab32ff5b | [
"MIT"
] | null | null | null | cherrymusicclient/client.py | endlesscoil/cherrymusicclient | 07b0ef3ede11b7e26f41f944ee923760ab32ff5b | [
"MIT"
] | null | null | null | cherrymusicclient/client.py | endlesscoil/cherrymusicclient | 07b0ef3ede11b7e26f41f944ee923760ab32ff5b | [
"MIT"
] | null | null | null | import logging
from .api import api
from .library import Playlist
class CherryMusicClient(object):
def __init__(self):
self.log = logging.getLogger(self.__class__.__name__)
self.playlists = []
self.current_playlist = None
def login(self, username, password):
return api.login(username, password)
def logout(self):
api.logout()
def load_playlists(self):
playlists = api.show_playlists()
for playlist in playlists:
self.playlists.append(Playlist(playlist))
def select_playlist(self, name):
self.current_playlist = None
for playlist in self.playlists:
if playlist.title == name:
self.current_playlist = playlist
return self.current_playlist
@property
def url(self):
return self._url
@url.setter
def url(self, value):
self._url = value
api.url = value | 19.829268 | 55 | 0.726937 |
2b8bfa9d640f6aebc8b1000a8698a617a06e557e | 6,520 | py | Python | src/programy/clients/polling/telegram/client.py | motazsaad/fit-bot-fb-clt | 580477aa1ec91855b621d9ae276f2705962f6a87 | [
"MIT"
] | null | null | null | src/programy/clients/polling/telegram/client.py | motazsaad/fit-bot-fb-clt | 580477aa1ec91855b621d9ae276f2705962f6a87 | [
"MIT"
] | null | null | null | src/programy/clients/polling/telegram/client.py | motazsaad/fit-bot-fb-clt | 580477aa1ec91855b621d9ae276f2705962f6a87 | [
"MIT"
] | 4 | 2019-04-01T15:42:23.000Z | 2020-11-05T08:14:27.000Z | """
Copyright (c) 2016-2019 Keith Sterling http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from programy.utils.logging.ylogger import YLogger
from telegram.ext import Updater
from telegram.ext import CommandHandler
from telegram.ext import MessageHandler, Filters
from programy.clients.polling.client import PollingBotClient
from programy.clients.polling.telegram.config import TelegramConfiguration
def start(telegram_bot, update):
if TelegramBotClient.TELEGRAM_CLIENT is None:
raise Exception("Please initialise Telegram Client first")
TelegramBotClient.TELEGRAM_CLIENT.start(telegram_bot, update)
def message(telegram_bot, update):
if TelegramBotClient.TELEGRAM_CLIENT is None:
raise Exception("Please initialise Telegram Client first")
TelegramBotClient.TELEGRAM_CLIENT.message(telegram_bot, update)
def unknown(telegram_bot, update):
if TelegramBotClient.TELEGRAM_CLIENT is None:
raise Exception("Please initialise Telegram Client first")
TelegramBotClient.TELEGRAM_CLIENT.unknown(telegram_bot, update)
class TelegramBotClient(PollingBotClient):
TELEGRAM_CLIENT = None
def __init__(self, argument_parser=None):
self._updater = None
PollingBotClient.__init__(self, "telegram", argument_parser)
def get_client_configuration(self):
return TelegramConfiguration()
def get_license_keys(self):
self._telegram_token = self.license_keys.get_key("TELEGRAM_TOKEN")
def create_updater(self, telegram_token):
self._updater = Updater(token=telegram_token)
def register_handlers(self):
start_handler = CommandHandler('start', start)
message_handler = MessageHandler(Filters.text, message)
unknown_handler = MessageHandler(Filters.command, unknown)
self._updater.dispatcher.add_handler(start_handler)
self._updater.dispatcher.add_handler(message_handler)
# Add unknown last
self._updater.dispatcher.add_handler(unknown_handler)
def get_initial_question(self, update):
client_context = self.create_client_context(update.message.chat_id)
initial_question = client_context.bot.get_initial_question(client_context)
processed_question = client_context.bot.post_process_response(client_context,
initial_question)
return processed_question
def ask_question(self, userid, question):
self._questions += 1
client_context = self.create_client_context(userid)
return client_context.bot.ask_question(client_context, question, responselogger=self)
def start(self, telegram_bot, update):
try:
initial_question = self.get_initial_question(update)
if initial_question:
telegram_bot.send_message(chat_id=update.message.chat_id, text=initial_question)
else:
YLogger.error(self, "Not initial question to return in start()")
except Exception as e:
YLogger.exception(self, "Failed to start", e)
def message(self, telegram_bot, update):
try:
response = self.ask_question(update.message.chat_id, update.message.text)
if response:
telegram_bot.send_message(chat_id=update.message.chat_id, text=response)
else:
YLogger.error(self, "Not response to return in message()")
except Exception as e:
YLogger.exception(self, "Failed to handle message", e)
def get_unknown_response(self, userid):
return self.ask_question(userid, self.configuration.client_configuration.unknown_command_srai)
def get_unknown_command(self, userid):
if self.configuration.client_configuration.unknown_command_srai is None:
unknown_response = self.configuration.client_configuration.unknown_command
else:
unknown_response = self.get_unknown_response(userid)
if unknown_response is None or unknown_response == "":
unknown_response = self.configuration.client_configuration.unknown_command
return unknown_response
def unknown(self, telegram_bot, update):
try:
unknown_response = self.get_unknown_command(update.message.chat_id)
if unknown_response:
telegram_bot.send_message(chat_id=update.message.chat_id, text=unknown_response)
YLogger.error(self, "No response to return in unknown()")
except Exception as e:
YLogger.exception(self, "Failed to handle unknown", e)
def display_connected_message(self):
print ("Telegram Bot connected and running...")
def connect(self):
self.create_updater(self._telegram_token)
self.register_handlers()
return True
def poll_and_answer(self):
running = True
try:
self._updater.start_polling()
# Without this the system goes into 100% CPU utilisation
self._updater.idle()
except KeyboardInterrupt as keye:
print("Telegram client stopping....")
running = False
self._updater.stop()
except Exception as excep:
YLogger.exception(self, "Failed to poll and answer", excep)
return running
if __name__ == '__main__':
print("Initiating Telegram Client...")
TelegramBotClient.TELEGRAM_CLIENT = TelegramBotClient()
TelegramBotClient.TELEGRAM_CLIENT.run()
| 40.246914 | 120 | 0.713957 |
574e0c3168f73cf449f339edebba4a6383b10a85 | 372 | py | Python | 40/40.py | xiaomiwujiecao/effectivePythonNote | 772e81864b171dcc19b6bcb1b31fc61bcfa1b9fe | [
"MIT"
] | null | null | null | 40/40.py | xiaomiwujiecao/effectivePythonNote | 772e81864b171dcc19b6bcb1b31fc61bcfa1b9fe | [
"MIT"
] | null | null | null | 40/40.py | xiaomiwujiecao/effectivePythonNote | 772e81864b171dcc19b6bcb1b31fc61bcfa1b9fe | [
"MIT"
] | null | null | null | # encoding=utf-8
def my_coroutine():
while True:
received = yield
print('Received:',received)
it = my_coroutine()
next(it)
it.send('First')
it.send('Second')
def minimize():
current = yield
while True:
value = yield current
current = min(value,current)
it = minimize()
next(it)
print(it.send(10))
print(it.send(4))
print(it.send(22))
print(it.send(-1))
| 13.285714 | 30 | 0.672043 |
433ae9442e060be775bf195be63f071cd7e20a65 | 33,475 | py | Python | jsl/jsengine/parser/__init__.py | thodges314/angular2 | 8600ef4193e5f6d84f391bfd29008653c7ffab38 | [
"MIT"
] | null | null | null | jsl/jsengine/parser/__init__.py | thodges314/angular2 | 8600ef4193e5f6d84f391bfd29008653c7ffab38 | [
"MIT"
] | null | null | null | jsl/jsengine/parser/__init__.py | thodges314/angular2 | 8600ef4193e5f6d84f391bfd29008653c7ffab38 | [
"MIT"
] | null | null | null | # vim: sw=4 ts=4 et
import unittest
from jsengine.tokenizer import tok
from jsengine import tokenizer
from jsengine import JSSyntaxError
from _constants_kind import kind
from _constants_op import op
from jsengine.structs import *
_VERSIONS = [
"default",
"1.0",
"1.1",
"1.2",
"1.3",
"1.4",
"1.5",
"1.6",
"1.7",
]
def _auto_semicolon(t, kind_, op_, start_offset, end_offset, atom, kids):
nosemi = False
if t.peek_sameline().tok not in (tok.EOF, tok.EOL, tok.RBRACE):
x = t.advance()
if x.tok != tok.SEMI:
raise JSSyntaxError(x.start_offset, 'semi_before_stmnt')
end_offset = x.end_offset
else:
nosemi = True
return ParseNode(kind_, op_, start_offset, end_offset, atom, kids, nosemi)
def _function_arglist(t):
fn_args = []
if t.peek().tok != tok.RPAREN:
while True:
x = t.expect(tok.NAME)
fn_args.append(ParseNode(kind.NAME, op.ARGNAME,
x.start_offset,
x.end_offset, x.atom, []))
if t.peek().tok == tok.COMMA:
t.advance()
else:
break
return fn_args
def _primary_expression(t):
x = t.next_withregexp()
if x.tok == tok.THIS:
return ParseNode(kind.PRIMARY, op.THIS, x.start_offset, x.end_offset, None, [])
elif x.tok == tok.NAME:
return ParseNode(kind.NAME, op.NAME, x.start_offset, x.end_offset, x.atom, [None])
elif x.tok == tok.NULL:
return ParseNode(kind.PRIMARY, op.NULL, x.start_offset, x.end_offset, None, [])
elif x.tok == tok.TRUE:
return ParseNode(kind.PRIMARY, op.TRUE, x.start_offset, x.end_offset, None, [])
elif x.tok == tok.FALSE:
return ParseNode(kind.PRIMARY, op.FALSE, x.start_offset, x.end_offset, None, [])
elif x.tok == tok.STRING:
return ParseNode(kind.STRING, op.STRING, x.start_offset, x.end_offset, x.atom, [])
elif x.tok == tok.REGEXP:
return ParseNode(kind.OBJECT, op.REGEXP, x.start_offset, x.end_offset, None, [])
elif x.tok == tok.NUMBER:
return ParseNode(kind.NUMBER, None, x.start_offset, x.end_offset, x.atom, [])
elif x.tok == tok.LBRACKET:
start_offset = x.start_offset
items = []
end_comma = None
if t.peek().tok != tok.RBRACKET:
while True:
# Conditionally add a value. If it isn't followed by a comma,
# quit in order to force an RBRACKET.
if t.peek().tok == tok.COMMA:
items.append(None)
else:
items.append(_assignment_expression(t, True))
if not t.peek().tok == tok.COMMA:
break
# Expect a comma and use it if the value was missing.
x = t.expect(tok.COMMA)
comma = ParseNode(kind.COMMA, None,
x.start_offset, x.end_offset, None, [])
items[-1] = items[-1] or comma
# Check for the end.
if t.peek().tok == tok.RBRACKET:
end_comma = comma
break
end_offset = t.expect(tok.RBRACKET).end_offset
return ParseNode(kind.RB, None, start_offset, end_offset, None, items,
end_comma=end_comma)
elif x.tok == tok.LBRACE:
start_offset = x.start_offset
kids = []
# TODO: get/set
end_comma = None
while True:
x = t.peek()
if x.tok == tok.RBRACE:
break
elif x.tok == tok.STRING:
t.expect(tok.STRING)
key = ParseNode(kind.STRING, None, x.start_offset,
x.end_offset, x.atom, [])
elif x.tok == tok.NUMBER:
t.expect(tok.NUMBER)
key = ParseNode(kind.NUMBER, None, x.start_offset,
x.end_offset, x.atom, [])
else:
x = t.expect_identifiername()
key = ParseNode(kind.NAME, None, x.start_offset, x.end_offset,
x.atom, [])
t.expect(tok.COLON)
value = _assignment_expression(t, True)
kids.append(ParseNode(kind.COLON, None, key.start_offset,
value.end_offset, None, [key, value]))
if t.peek().tok == tok.COMMA:
x = t.advance()
end_comma = ParseNode(kind.COMMA, None,
x.start_offset, x.end_offset, None, [])
else:
end_comma = None
break
end_offset = t.expect(tok.RBRACE).end_offset
return ParseNode(kind.RC, None, start_offset, end_offset, None, kids,
end_comma=end_comma)
elif x.tok == tok.LPAREN:
start_offset = x.start_offset
kid = _expression(t, True)
end_offset = t.expect(tok.RPAREN).end_offset
return ParseNode(kind.RP, None, start_offset, end_offset, None, [kid])
else:
raise JSSyntaxError(x.start_offset, 'syntax_error')
def _function_declaration(t, named_opcode):
node = _function_expression(t, named_opcode)
# Convert anonymous functions in expressions.
if node.opcode == op.ANONFUNOBJ:
node = _auto_semicolon(t, kind.SEMI, None, node.start_offset, node.end_offset,
None, [node])
return node
def _function_expression(t, named_opcode):
start_offset = t.expect(tok.FUNCTION).start_offset
if t.peek().tok == tok.NAME:
fn_name = t.expect(tok.NAME).atom
opcode = named_opcode
else:
fn_name = None
opcode = op.ANONFUNOBJ
t.expect(tok.LPAREN)
fn_args = _function_arglist(t)
t.expect(tok.RPAREN)
fn_body_start_offset = t.expect(tok.LBRACE).start_offset
kids = _sourceelements(t, tok.RBRACE)
fn_body_end_offset = t.expect(tok.RBRACE).end_offset
fn_body = ParseNode(kind.LC, None, fn_body_start_offset,
fn_body_end_offset, None, kids)
return ParseNode(kind.FUNCTION, opcode, start_offset, fn_body.end_offset,
fn_name, [fn_body], fn_args=fn_args)
def _argument_list(t):
args = []
if t.peek().tok != tok.RPAREN:
while True:
args.append(_assignment_expression(t, True))
if t.peek().tok == tok.COMMA:
t.advance()
else:
break
return args
def _new_expression(t):
start_offset = t.expect(tok.NEW).start_offset
expr = _member_expression(t)
# If no (), this is a variant of the NewExpression
if t.peek().tok == tok.LPAREN:
t.expect(tok.LPAREN)
args = _argument_list(t)
end_offset = t.expect(tok.RPAREN).end_offset
else:
args = []
end_offset = expr.end_offset
return ParseNode(kind.NEW, op.NEW, start_offset, end_offset,
None, [expr] + args)
def _member_expression(t, _recurse=True):
x = t.peek()
if x.tok == tok.NEW:
kid = _new_expression(t)
elif x.tok == tok.FUNCTION:
kid = _function_expression(t, op.NAMEDFUNOBJ)
else:
kid = _primary_expression(t)
while True:
if t.peek().tok == tok.LBRACKET:
t.advance()
expr = _expression(t, True)
end_offset = t.expect(tok.RBRACKET).end_offset
kid = ParseNode(kind.LB, op.GETELEM, kid.start_offset, end_offset,
None, [kid, expr])
elif t.peek().tok == tok.DOT:
t.advance()
expr = t.expect_identifiername()
kid = ParseNode(kind.DOT, op.GETPROP, kid.start_offset, expr.end_offset,
expr.atom, [kid])
else:
return kid
def _call_expression(t):
expr = _member_expression(t)
if t.peek().tok != tok.LPAREN:
return expr
while True:
x = t.peek()
if x.tok == tok.LPAREN:
t.expect(tok.LPAREN)
args = _argument_list(t)
end_offset = t.expect(tok.RPAREN).end_offset
expr = ParseNode(kind.LP, op.CALL, expr.start_offset,
end_offset, None, [expr] + args)
elif x.tok == tok.LBRACKET:
t.expect(tok.LBRACKET)
lookup = _expression(t, True)
end_offset = t.expect(tok.RBRACKET).end_offset
expr = ParseNode(kind.LB, op.GETELEM,
expr.start_offset, end_offset,
None, [expr, lookup])
elif x.tok == tok.DOT:
t.expect(tok.DOT)
lookup = t.expect_identifiername()
expr = ParseNode(kind.DOT, op.GETPROP,
expr.start_offset, lookup.end_offset,
lookup.atom, [expr])
else:
return expr
def _lefthandside_expression(t):
kid = _call_expression(t)
kid._lefthandside = True
return kid
def _postfix_expression(t):
kid = _lefthandside_expression(t)
if t.peek_sameline().tok == tok.INC:
end_offset = t.expect(tok.INC).end_offset
if kid.kind == kind.DOT and kid.opcode == op.GETPROP:
opcode = op.PROPINC
else:
opcode = op.NAMEINC
return ParseNode(kind.INC, opcode,
kid.start_offset, end_offset, None, [kid])
elif t.peek_sameline().tok == tok.DEC:
end_offset = t.expect(tok.DEC).end_offset
return ParseNode(kind.DEC, op.NAMEDEC,
kid.start_offset, end_offset, None, [kid])
else:
return kid
_UNARY = {
tok.DELETE: (kind.DELETE, None),
tok.VOID: (kind.UNARYOP, op.VOID),
tok.TYPEOF: (kind.UNARYOP, op.TYPEOF),
tok.INC: (kind.INC, op.INCNAME),
tok.DEC: (kind.DEC, op.DECNAME),
tok.ADD: (kind.UNARYOP, op.POS),
tok.SUB: (kind.UNARYOP, op.NEG),
tok.BIT_NOT: (kind.UNARYOP, op.BITNOT),
tok.LOGICAL_NOT: (kind.UNARYOP, op.NOT),
}
def _unary_expression(t):
x = t.peek()
if x.tok in _UNARY:
kind_, op_ = _UNARY[x.tok]
start_offset = t.advance().start_offset
kid = _unary_expression(t)
return ParseNode(kind_, op_, start_offset, kid.end_offset, None, [kid])
else:
return _postfix_expression(t)
def _binary_expression(t, dict_, child_expr_callback):
expr = child_expr_callback(t)
while True:
x = t.peek()
try:
kind_, op_ = dict_[x.tok]
except KeyError:
return expr
kids = [expr]
while t.peek().tok == x.tok:
t.advance()
kids.append(child_expr_callback(t))
expr = ParseNode(kind_, op_,
kids[0].start_offset, kids[1].end_offset,
None, kids)
_MULTIPLICATIVE = {
tok.MUL: (kind.STAR, op.MUL),
tok.DIV: (kind.DIVOP, op.DIV),
tok.MOD: (kind.DIVOP, op.MOD),
}
def _multiplicative_expression(t):
return _binary_expression(t, _MULTIPLICATIVE, _unary_expression)
_ADDITIVE = {
tok.ADD: (kind.PLUS, op.ADD),
tok.SUB: (kind.MINUS, op.SUB),
}
def _additive_expression(t):
return _binary_expression(t, _ADDITIVE,
_multiplicative_expression)
_SHIFT = {
tok.LSHIFT: (kind.SHOP, op.LSH),
tok.RSHIFT: (kind.SHOP, op.RSH),
tok.URSHIFT: (kind.SHOP, op.URSH),
}
def _shift_expression(t):
return _binary_expression(t, _SHIFT,
_additive_expression)
_RELATIONAL_NOIN = {
tok.LT: (kind.RELOP, op.LT),
tok.GT: (kind.RELOP, op.GT),
tok.LE: (kind.RELOP, op.LE),
tok.GE: (kind.RELOP, op.GE),
tok.INSTANCEOF: (kind.INSTANCEOF, op.INSTANCEOF),
}
_RELATIONAL_IN = dict(_RELATIONAL_NOIN)
_RELATIONAL_IN.update({
tok.IN: (kind.IN, op.IN),
})
def _relational_expression(t, allowin):
return _binary_expression(t, _RELATIONAL_IN if allowin else _RELATIONAL_NOIN,
_shift_expression)
_EQUALITY = {
tok.EQ: (kind.EQOP, op.EQ),
tok.NE: (kind.EQOP, op.NE),
tok.EQ_STRICT: (kind.EQOP, op.NEW_EQ),
tok.NE_STRICT: (kind.EQOP, op.NEW_NE),
}
def _equality_expression(t, allowin):
return _binary_expression(t, _EQUALITY,
lambda t: _relational_expression(t, allowin))
def _bitwise_and_expression(t, allowin):
left = _equality_expression(t, allowin)
while t.peek().tok == tok.BIT_AND:
t.advance()
right = _equality_expression(t, allowin)
left = ParseNode(kind.BITAND, op.BITAND,
left.start_offset, right.end_offset,
None, [left, right])
return left
def _bitwise_xor_expression(t, allowin):
left = _bitwise_and_expression(t, allowin)
while t.peek().tok == tok.BIT_XOR:
t.advance()
right = _bitwise_and_expression(t, allowin)
left = ParseNode(kind.BITXOR, op.BITXOR,
left.start_offset, right.end_offset,
None, [left, right])
return left
def _bitwise_or_expression(t, allowin):
left = _bitwise_xor_expression(t, allowin)
while t.peek().tok == tok.BIT_OR:
t.advance()
right = _bitwise_xor_expression(t, allowin)
left = ParseNode(kind.BITOR, op.BITOR,
left.start_offset, right.end_offset,
None, [left, right])
return left
def _logical_and_expression(t, allowin):
exprs = []
while True:
exprs.append(_bitwise_or_expression(t, allowin))
if t.peek().tok == tok.LOGICAL_AND:
t.expect(tok.LOGICAL_AND)
else:
break
while len(exprs) > 1:
right = exprs.pop()
left = exprs[-1]
exprs[-1] = ParseNode(kind.AND, op.AND,
left.start_offset, right.end_offset,
None, [left, right])
return exprs[0]
def _logical_or_expression(t, allowin):
exprs = []
while True:
exprs.append(_logical_and_expression(t, allowin))
if t.peek().tok == tok.LOGICAL_OR:
t.expect(tok.LOGICAL_OR)
else:
break
while len(exprs) > 1:
right = exprs.pop()
left = exprs[-1]
exprs[-1] = ParseNode(kind.OR, op.OR,
left.start_offset, right.end_offset,
None, [left, right])
return exprs[0]
def _conditional_expression(t, allowin):
kid = _logical_or_expression(t, allowin)
if t.peek().tok == tok.QUESTION:
t.expect(tok.QUESTION)
if_ = _assignment_expression(t, True)
t.expect(tok.COLON)
else_ = _assignment_expression(t, allowin)
return ParseNode(kind.HOOK, None,
kid.start_offset, else_.end_offset,
None, [kid, if_, else_])
else:
return kid
_ASSIGNS = {
tok.ASSIGN: (kind.ASSIGN, None),
tok.ASSIGN_URSHIFT: (kind.ASSIGN, op.URSH),
tok.ASSIGN_LSHIFT: (kind.ASSIGN, op.LSH),
tok.ASSIGN_RSHIFT: (kind.ASSIGN, op.RSH),
tok.ASSIGN_ADD: (kind.ASSIGN, op.ADD),
tok.ASSIGN_SUB: (kind.ASSIGN, op.SUB),
tok.ASSIGN_MUL: (kind.ASSIGN, op.MUL),
tok.ASSIGN_MOD: (kind.ASSIGN, op.MOD),
tok.ASSIGN_BIT_AND: (kind.ASSIGN, op.BITAND),
tok.ASSIGN_BIT_OR: (kind.ASSIGN, op.BITOR),
tok.ASSIGN_BIT_XOR: (kind.ASSIGN, op.BITXOR),
tok.ASSIGN_DIV: (kind.ASSIGN, op.DIV),
}
def _assignment_expression(t, allowin):
left = _conditional_expression(t, allowin)
if t.peek().tok in _ASSIGNS:
kid = left
while kid.kind == kind.RP:
kid, = kid.kids
if kid.kind == kind.NAME:
assert kid.opcode == op.NAME
kid.opcode = op.SETNAME
elif kid.kind == kind.DOT:
assert kid.opcode == op.GETPROP, left.op
kid.opcode = op.SETPROP
elif kid.kind == kind.LB:
assert kid.opcode == op.GETELEM
kid.opcode = op.SETELEM
elif kid.kind == kind.LP:
assert kid.opcode == op.CALL
kid.opcode = op.SETCALL
else:
raise JSSyntaxError(left.start_offset, 'invalid_assign')
kind_, op_ = _ASSIGNS[t.peek().tok]
t.advance()
right = _assignment_expression(t, allowin)
return ParseNode(kind_, op_,
left.start_offset, right.end_offset, None, [left, right])
else:
return left
def _expression(t, allowin):
items = []
items.append(_assignment_expression(t, allowin))
while t.peek().tok == tok.COMMA:
t.advance()
items.append(_assignment_expression(t, allowin))
if len(items) > 1:
return ParseNode(kind.COMMA, None, items[0].start_offset,
items[-1].end_offset, None, items)
else:
return items[0]
def _variable_declaration(t, allowin):
nodes = []
while True:
x = t.expect(tok.NAME)
value = None
if t.peek().tok == tok.ASSIGN:
t.advance()
value = _assignment_expression(t, allowin)
nodes.append(ParseNode(kind.NAME, op.SETNAME if value else op.NAME,
x.start_offset,
value.end_offset if value else x.end_offset,
x.atom, [value]))
if t.peek().tok == tok.COMMA:
t.advance()
else:
return nodes
def _block_statement(t):
kids = []
start_offset = t.expect(tok.LBRACE).start_offset
while t.peek().tok != tok.RBRACE:
kids.append(_statement(t))
end_offset = t.expect(tok.RBRACE).end_offset
return ParseNode(kind.LC, None, start_offset, end_offset, None, kids)
def _empty_statement(t):
# EMPTY STATEMENT
x = t.expect(tok.SEMI)
return ParseNode(kind.SEMI, None, x.start_offset, x.end_offset, None, [None])
def _var_statement(t):
# VARIABLE STATEMENT
start_offset = t.expect(tok.VAR).start_offset
nodes = _variable_declaration(t, True)
return _auto_semicolon(t, kind.VAR, op.DEFVAR,
start_offset, nodes[-1].end_offset, None, nodes)
def _if_statement(t):
# IF STATEMENT
start_offset = t.expect(tok.IF).start_offset
t.expect(tok.LPAREN)
condition = _expression(t, True)
t.expect(tok.RPAREN)
if_body = _statement(t)
if t.peek().tok == tok.ELSE:
t.advance()
else_body = _statement(t)
else:
else_body = None
end_offset = else_body.end_offset if else_body else if_body.end_offset
return ParseNode(kind.IF, None, start_offset,
end_offset, None, [condition, if_body, else_body])
def _do_statement(t):
start_offset = t.expect(tok.DO).start_offset
code = _statement(t)
t.expect(tok.WHILE)
t.expect(tok.LPAREN)
expr = _expression(t, True)
endtoken = t.expect(tok.RPAREN)
return _auto_semicolon(t, kind.DO, None,
start_offset, endtoken.end_offset, None, [code, expr])
def _while_statement(t):
start_offset = t.expect(tok.WHILE).start_offset
t.expect(tok.LPAREN)
expr = _expression(t, True)
t.expect(tok.RPAREN)
code = _statement(t)
return ParseNode(kind.WHILE, None,
start_offset, code.end_offset, None, [expr, code])
def _for_statement(t):
for_start_offset = t.expect(tok.FOR).start_offset
t.expect(tok.LPAREN)
for_exprs = []
if t.peek().tok == tok.VAR:
var_start_offset = t.advance().start_offset
kids = _variable_declaration(t, False)
vars = ParseNode(kind.VAR, op.DEFVAR, var_start_offset, kids[-1].end_offset,
None, kids)
if t.peek().tok == tok.IN:
t.advance()
in_ = _expression(t, True)
for_exprs = [vars, in_]
else:
for_exprs = [vars, None, None]
else:
if t.peek().tok != tok.SEMI:
expr = _expression(t, False)
else:
expr = None
if t.peek().tok == tok.IN:
t.advance()
vars = expr
in_ = _expression(t, True)
for_exprs = [vars, in_]
else:
for_exprs = [expr, None, None]
if len(for_exprs) == 2:
condition = ParseNode(kind.IN, None, for_exprs[0].start_offset,
for_exprs[-1].end_offset, None, for_exprs)
else:
x = t.expect(tok.SEMI)
if t.peek().tok != tok.SEMI:
for_exprs[1] = _expression(t, True)
t.expect(tok.SEMI)
if t.peek().tok != tok.RPAREN:
for_exprs[2] = _expression(t, True)
condition = ParseNode(kind.RESERVED, None, None, None,
None, for_exprs)
t.expect(tok.RPAREN)
body = _statement(t)
return ParseNode(kind.FOR,
op.FORIN if condition.kind == kind.IN else None,
for_start_offset, body.end_offset,
None, [condition, body])
def _continue_statement(t):
endtoken = t.expect(tok.CONTINUE)
start_offset = endtoken.start_offset
if t.peek_sameline().tok == tok.NAME:
endtoken = t.expect(tok.NAME)
name = endtoken.atom
else:
name = None
# TODO: Validate Scope Labels
return _auto_semicolon(t, kind.CONTINUE, None, start_offset, endtoken.end_offset, name, [])
def _break_statement(t):
endtoken = t.expect(tok.BREAK)
start_offset = endtoken.start_offset
if t.peek_sameline().tok == tok.NAME:
endtoken = t.expect(tok.NAME)
name = endtoken.atom
else:
name = None
# TODO: Validate Scope Labels
return _auto_semicolon(t, kind.BREAK, None, start_offset, endtoken.end_offset, name, [])
def _return_statement(t):
endtoken = t.expect(tok.RETURN)
start_offset = endtoken.start_offset
if t.peek_sameline().tok not in (tok.EOF, tok.EOL, tok.SEMI, tok.RBRACE):
expr = _expression(t, True)
endtoken = expr
else:
expr = None
# TODO: Validate Scope Labels
return _auto_semicolon(t, kind.RETURN, None, start_offset, endtoken.end_offset,
None, [expr])
def _with_statement(t):
start_offset = t.expect(tok.WITH).start_offset
t.expect(tok.LPAREN)
expr = _expression(t, True)
t.expect(tok.RPAREN)
body = _statement(t)
return ParseNode(kind.WITH, None, start_offset, body.end_offset, None, [expr, body])
def _switch_statement(t):
switch_start_offset = t.expect(tok.SWITCH).start_offset
t.expect(tok.LPAREN)
expr = _expression(t, True)
t.expect(tok.RPAREN)
lc_start_offset = t.expect(tok.LBRACE).start_offset
cases = []
while t.peek().tok != tok.RBRACE:
case_kind = None
case_expr = None
if t.peek().tok == tok.CASE:
case_start_offset = t.advance().start_offset
case_kind = kind.CASE
case_expr = _expression(t, True)
elif t.peek().tok == tok.DEFAULT:
case_start_offset = t.advance().start_offset
case_kind = kind.DEFAULT
else:
raise JSSyntaxError(t.peek().start_offset, 'invalid_case')
case_end_offset = t.expect(tok.COLON).end_offset
statements = []
while t.peek().tok not in (tok.DEFAULT, tok.CASE, tok.RBRACE):
statements.append(_statement(t))
if statements:
statements_start_offset = statements[0].start_offset
statements_end_offset = statements[-1].end_offset
case_end_offset = statements[-1].end_offset
else:
statements_start_offset = case_end_offset
statements_end_offset = case_end_offset
cases.append(ParseNode(case_kind, None, case_start_offset, case_end_offset,
None, [
case_expr,
ParseNode(kind.LC, None, statements_start_offset,
statements_end_offset, None, statements)
]))
rc_end_offset = t.expect(tok.RBRACE).end_offset
return ParseNode(kind.SWITCH, None, switch_start_offset, rc_end_offset,
None, [expr,
ParseNode(kind.LC, None, lc_start_offset, rc_end_offset, None, cases)])
def _throw_statement(t):
# TODO: Validate Scope
start_offset = t.expect(tok.THROW).start_offset
if t.peek_sameline().tok == tok.EOL:
raise JSSyntaxError(t.peek_sameline().start_offset, 'expected_statement')
expr = _expression(t, True)
return _auto_semicolon(t, kind.THROW, op.THROW, start_offset, expr.end_offset,
None, [expr])
def _try_statement(t):
try_start_offset = t.expect(tok.TRY).start_offset
try_node = _block_statement(t)
catch_node = None
finally_node = None
try_end_offset = None
if t.peek().tok == tok.CATCH:
catch_start_offset = t.advance().start_offset
t.expect(tok.LPAREN)
x = t.expect(tok.NAME)
catch_expr = ParseNode(kind.NAME, None, x.start_offset, x.end_offset,
x.atom, [None])
t.expect(tok.RPAREN)
catch_block = _block_statement(t)
catch_end_offset = catch_block.end_offset
catch_node = \
ParseNode(kind.RESERVED, None, None, None, None, [
ParseNode(kind.LEXICALSCOPE, op.LEAVEBLOCK,
catch_start_offset, catch_end_offset, None, [
ParseNode(kind.CATCH, None, catch_start_offset,
catch_end_offset, None,
[catch_expr, None, catch_block])
])
])
try_end_offset = catch_end_offset
if t.peek().tok == tok.FINALLY:
t.advance()
finally_node = _block_statement(t)
try_end_offset = finally_node.end_offset
if not catch_node and not finally_node:
raise JSSyntaxError(try_end_offset, 'invalid_catch')
return ParseNode(kind.TRY, None, try_start_offset, try_end_offset,
None,
[try_node, catch_node, finally_node])
def _statement(t):
# TODO: Labelled Statement
x = t.peek()
if x.tok == tok.LBRACE:
return _block_statement(t)
elif x.tok == tok.SEMI:
return _empty_statement(t)
elif x.tok == tok.VAR:
return _var_statement(t)
elif x.tok == tok.IF:
return _if_statement(t)
elif x.tok == tok.DO:
return _do_statement(t)
elif x.tok == tok.WHILE:
return _while_statement(t)
elif x.tok == tok.FOR:
return _for_statement(t)
elif x.tok == tok.CONTINUE:
return _continue_statement(t)
elif x.tok == tok.BREAK:
return _break_statement(t)
elif x.tok == tok.RETURN:
return _return_statement(t)
elif x.tok == tok.WITH:
return _with_statement(t)
elif x.tok == tok.SWITCH:
return _switch_statement(t)
elif x.tok == tok.THROW:
return _throw_statement(t)
elif x.tok == tok.TRY:
return _try_statement(t)
elif x.tok == tok.EOF:
raise JSSyntaxError(x.start_offset, 'unexpected_eof')
elif x.tok == tok.FUNCTION:
return _function_declaration(t, op.CLOSURE) #TODO: warn, since this is not reliable
elif x.tok not in (tok.LBRACE, tok.FUNCTION):
expr = _expression(t, True)
if expr.kind == kind.NAME and t.peek().tok == tok.COLON:
t.expect(tok.COLON)
stmt = _statement(t)
return ParseNode(kind.COLON, op.NAME, expr.start_offset,
stmt.end_offset, expr.atom, [stmt])
return _auto_semicolon(t, kind.SEMI, None, expr.start_offset, expr.end_offset,
None, [expr])
else:
raise JSSyntaxError(x.start_offset, 'syntax_error')
def _sourceelements(t, end_tok):
nodes = []
while True:
x = t.peek()
if x.tok == tok.FUNCTION:
nodes.append(_function_declaration(t, None))
elif x.tok == end_tok:
return nodes
else:
nodes.append(_statement(t))
def parsestring(s, start_offset=0):
assert not start_offset is None
stream = tokenizer.TokenStream(s, start_offset)
t = tokenizer.Tokenizer(stream)
nodes = _sourceelements(t, tok.EOF)
lc_end_offset = t.expect(tok.EOF).end_offset
lc_start_offset = nodes[-1].start_offset if nodes else lc_end_offset
root = ParseNode(kind.LC, None, lc_start_offset, lc_end_offset, None, nodes)
_validate(root)
return root
def is_valid_version(version):
return version in _VERSIONS
def _validate(node, depth=0):
for kid in node.kids:
if kid:
assert kid.parent is node
_validate(kid, depth+1)
def parse(script, jsversion, start_offset):
# TODO: respect version
assert is_valid_version(jsversion)
return parsestring(script, start_offset)
def is_compilable_unit(script, jsversion):
# TODO: respect version
assert is_valid_version(jsversion)
try:
parsestring(script)
except JSSyntaxError as error:
return error.msg not in ('unexpected_eof', 'unterminated_comment')
return True
class TestParser(unittest.TestCase):
def testCompilableUnit(self):
self.assert_(is_compilable_unit('', 'default'))
self.assert_(is_compilable_unit('/**/', 'default'))
self.assert_(not is_compilable_unit('/*', 'default'))
def testRegExpLineBreak(self):
try:
parsestring('re = /[\n');
except JSSyntaxError as error:
self.assertEqual(error.offset, 5)
else:
self.assert_(False)
def testUnterminatedComment(self):
try:
parsestring('/*')
except JSSyntaxError as error:
self.assertEqual(error.offset, 1)
else:
self.assert_(False)
def testObjectEndComma(self):
root = parsestring('a={a:1,}')
node, = root.kids
self.assertEquals(node.kind, kind.SEMI)
node, = node.kids
self.assertEquals(node.kind, kind.ASSIGN)
left, right = node.kids
self.assertEquals(left.atom, 'a')
self.assertEquals(right.kind, kind.RC)
node = right.end_comma
self.assertEquals(node.kind, kind.COMMA)
self.assertEquals(node.start_offset, 6)
self.assertEquals(node.end_offset, 6)
def _testArrayEndComma(self, script, col):
root = parsestring(script)
node, = root.kids
self.assertEquals(node.kind, kind.SEMI)
node, = node.kids
self.assertEquals(node.kind, kind.ASSIGN)
left, right = node.kids
self.assertEquals(left.atom, 'a')
self.assertEquals(right.kind, kind.RB)
node = right.end_comma
self.assertEquals(node is None, col is None)
if col is None:
self.assert_(node is None)
else:
self.assertEquals(node.kind, kind.COMMA)
self.assertEquals(node.start_offset, col)
self.assertEquals(node.end_offset, col)
def testArrayEndComma(self):
self._testArrayEndComma('a=[,]', 3)
self._testArrayEndComma('a=[a,]', 4)
self._testArrayEndComma('a=[a,b,c]', None)
def _testArrayCommas(self, script, items, end_comma):
root = parsestring(script)
node, = root.kids
self.assertEquals(node.kind, kind.SEMI)
node, = node.kids
self.assertEquals(node.kind, kind.ASSIGN)
left, right = node.kids
self.assertEquals(left.atom, 'a')
self.assertEquals(right.kind, kind.RB)
node = right
self.assertEquals(len(node.kids), len(items))
for kid, item in zip(node.kids, items):
self.assertEquals(kid.atom, item)
self.assertEquals(bool(node.end_comma), end_comma)
def testArrayCommas(self):
self._testArrayCommas('a=[]', [], False)
self._testArrayCommas('a=[,]', [None], True)
self._testArrayCommas('a=[,,]', [None, None], True)
self._testArrayCommas('a=[,1]', [None, '1'], False)
self._testArrayCommas('a=[,,1]', [None, None, '1'], False)
self._testArrayCommas('a=[1,,1]', ['1', None, '1'], False)
self._testArrayCommas('a=[,1,]', [None, '1'], True)
def testParseArray(self):
try:
parsestring('a=[1 1]')
except JSSyntaxError as error:
pass
else:
self.assert_(False)
| 36.150108 | 96 | 0.569709 |
dd8d9e83c2d6068fa8faa82e5cac75deb4e7d5d4 | 19,291 | py | Python | homeassistant/components/roon/media_player.py | basicpail/core | 5cc54618c5af3f75c08314bf2375cc7ac40d2b7e | [
"Apache-2.0"
] | 11 | 2018-02-16T15:35:47.000Z | 2020-01-14T15:20:00.000Z | homeassistant/components/roon/media_player.py | basicpail/core | 5cc54618c5af3f75c08314bf2375cc7ac40d2b7e | [
"Apache-2.0"
] | 79 | 2020-07-23T07:13:37.000Z | 2022-03-22T06:02:37.000Z | homeassistant/components/roon/media_player.py | Vaarlion/core | f3de8b9f28de01abf72c0f5bb0b457eb1841f201 | [
"Apache-2.0"
] | 11 | 2020-12-16T13:48:14.000Z | 2022-02-01T00:28:05.000Z | """MediaPlayer platform for Roon integration."""
import logging
from roonapi import split_media_path
import voluptuous as vol
from homeassistant.components.media_player import MediaPlayerEntity
from homeassistant.components.media_player.const import (
SUPPORT_BROWSE_MEDIA,
SUPPORT_GROUPING,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SEEK,
SUPPORT_SHUFFLE_SET,
SUPPORT_STOP,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
)
from homeassistant.const import (
DEVICE_DEFAULT_NAME,
STATE_IDLE,
STATE_OFF,
STATE_PAUSED,
STATE_PLAYING,
)
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv, entity_platform
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.util import convert
from homeassistant.util.dt import utcnow
from .const import DOMAIN
from .media_browser import browse_media
SUPPORT_ROON = (
SUPPORT_BROWSE_MEDIA
| SUPPORT_GROUPING
| SUPPORT_PAUSE
| SUPPORT_VOLUME_SET
| SUPPORT_STOP
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_NEXT_TRACK
| SUPPORT_SHUFFLE_SET
| SUPPORT_SEEK
| SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_VOLUME_MUTE
| SUPPORT_PLAY
| SUPPORT_PLAY_MEDIA
| SUPPORT_VOLUME_STEP
)
_LOGGER = logging.getLogger(__name__)
SERVICE_TRANSFER = "transfer"
ATTR_TRANSFER = "transfer_id"
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Roon MediaPlayer from Config Entry."""
roon_server = hass.data[DOMAIN][config_entry.entry_id]
media_players = set()
# Register entity services
platform = entity_platform.async_get_current_platform()
platform.async_register_entity_service(
SERVICE_TRANSFER,
{vol.Required(ATTR_TRANSFER): cv.entity_id},
"async_transfer",
)
@callback
def async_update_media_player(player_data):
"""Add or update Roon MediaPlayer."""
dev_id = player_data["dev_id"]
if dev_id not in media_players:
# new player!
media_player = RoonDevice(roon_server, player_data)
media_players.add(dev_id)
async_add_entities([media_player])
else:
# update existing player
async_dispatcher_send(
hass, f"room_media_player_update_{dev_id}", player_data
)
# start listening for players to be added or changed by the server component
async_dispatcher_connect(hass, "roon_media_player", async_update_media_player)
class RoonDevice(MediaPlayerEntity):
"""Representation of an Roon device."""
def __init__(self, server, player_data):
"""Initialize Roon device object."""
self._remove_signal_status = None
self._server = server
self._available = True
self._last_position_update = None
self._supports_standby = False
self._state = STATE_IDLE
self._unique_id = None
self._zone_id = None
self._output_id = None
self._name = DEVICE_DEFAULT_NAME
self._media_title = None
self._media_album_name = None
self._media_artist = None
self._media_position = 0
self._media_duration = 0
self._is_volume_muted = False
self._volume_step = 0
self._shuffle = False
self._media_image_url = None
self._volume_level = 0
self.update_data(player_data)
async def async_added_to_hass(self):
"""Register callback."""
self.async_on_remove(
async_dispatcher_connect(
self.hass,
f"room_media_player_update_{self.unique_id}",
self.async_update_callback,
)
)
self._server.add_player_id(self.entity_id, self.name)
@callback
def async_update_callback(self, player_data):
"""Handle device updates."""
self.update_data(player_data)
self.async_write_ha_state()
@property
def available(self):
"""Return True if entity is available."""
return self._available
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_ROON
@property
def group_members(self):
"""Return the grouped players."""
roon_names = self._server.roonapi.grouped_zone_names(self._output_id)
return [self._server.entity_id(roon_name) for roon_name in roon_names]
@property
def device_info(self):
"""Return the device info."""
dev_model = "player"
if self.player_data.get("source_controls"):
dev_model = self.player_data["source_controls"][0].get("display_name")
return {
"identifiers": {(DOMAIN, self.unique_id)},
"name": self.name,
"manufacturer": "RoonLabs",
"model": dev_model,
"via_device": (DOMAIN, self._server.roon_id),
}
def update_data(self, player_data=None):
"""Update session object."""
if player_data:
self.player_data = player_data
if not self.player_data["is_available"]:
# this player was removed
self._available = False
self._state = STATE_OFF
else:
self._available = True
# determine player state
self.update_state()
if self.state == STATE_PLAYING:
self._last_position_update = utcnow()
@classmethod
def _parse_volume(cls, player_data):
"""Parse volume data to determine volume levels and mute state."""
volume = {
"level": 0,
"step": 0,
"muted": False,
}
try:
volume_data = player_data["volume"]
volume_muted = volume_data["is_muted"]
volume_step = convert(volume_data["step"], int, 0)
if volume_data["type"] == "db":
level = convert(volume_data["value"], float, 0.0) / 80 * 100 + 100
else:
level = convert(volume_data["value"], float, 0.0)
volume_level = convert(level, int, 0) / 100
except KeyError:
# catch KeyError
pass
else:
volume["muted"] = volume_muted
volume["step"] = volume_step
volume["level"] = volume_level
return volume
def _parse_now_playing(self, player_data):
"""Parse now playing data to determine title, artist, position, duration and artwork."""
now_playing = {
"title": None,
"artist": None,
"album": None,
"position": 0,
"duration": 0,
"image": None,
}
now_playing_data = None
try:
now_playing_data = player_data["now_playing"]
media_title = now_playing_data["three_line"]["line1"]
media_artist = now_playing_data["three_line"]["line2"]
media_album_name = now_playing_data["three_line"]["line3"]
media_position = convert(now_playing_data["seek_position"], int, 0)
media_duration = convert(now_playing_data.get("length"), int, 0)
image_id = now_playing_data.get("image_key")
except KeyError:
# catch KeyError
pass
else:
now_playing["title"] = media_title
now_playing["artist"] = media_artist
now_playing["album"] = media_album_name
now_playing["position"] = media_position
now_playing["duration"] = media_duration
if image_id:
now_playing["image"] = self._server.roonapi.get_image(image_id)
return now_playing
def update_state(self):
"""Update the power state and player state."""
new_state = ""
# power state from source control (if supported)
if "source_controls" in self.player_data:
for source in self.player_data["source_controls"]:
if source["supports_standby"] and source["status"] != "indeterminate":
self._supports_standby = True
if source["status"] in ["standby", "deselected"]:
new_state = STATE_OFF
break
# determine player state
if not new_state:
if self.player_data["state"] == "playing":
new_state = STATE_PLAYING
elif self.player_data["state"] == "loading":
new_state = STATE_PLAYING
elif self.player_data["state"] == "stopped":
new_state = STATE_IDLE
elif self.player_data["state"] == "paused":
new_state = STATE_PAUSED
else:
new_state = STATE_IDLE
self._state = new_state
self._unique_id = self.player_data["dev_id"]
self._zone_id = self.player_data["zone_id"]
self._output_id = self.player_data["output_id"]
self._shuffle = self.player_data["settings"]["shuffle"]
self._name = self.player_data["display_name"]
volume = RoonDevice._parse_volume(self.player_data)
self._is_volume_muted = volume["muted"]
self._volume_step = volume["step"]
self._is_volume_muted = volume["muted"]
self._volume_level = volume["level"]
now_playing = self._parse_now_playing(self.player_data)
self._media_title = now_playing["title"]
self._media_artist = now_playing["artist"]
self._media_album_name = now_playing["album"]
self._media_position = now_playing["position"]
self._media_duration = now_playing["duration"]
self._media_image_url = now_playing["image"]
@property
def media_position_updated_at(self):
"""When was the position of the current playing media valid."""
# Returns value from homeassistant.util.dt.utcnow().
return self._last_position_update
@property
def unique_id(self):
"""Return the id of this roon client."""
return self._unique_id
@property
def should_poll(self):
"""Return True if entity has to be polled for state."""
return False
@property
def zone_id(self):
"""Return current session Id."""
return self._zone_id
@property
def output_id(self):
"""Return current session Id."""
return self._output_id
@property
def name(self):
"""Return device name."""
return self._name
@property
def media_title(self):
"""Return title currently playing."""
return self._media_title
@property
def media_album_name(self):
"""Album name of current playing media (Music track only)."""
return self._media_album_name
@property
def media_artist(self):
"""Artist of current playing media (Music track only)."""
return self._media_artist
@property
def media_album_artist(self):
"""Album artist of current playing media (Music track only)."""
return self._media_artist
@property
def media_image_url(self):
"""Image url of current playing media."""
return self._media_image_url
@property
def media_position(self):
"""Return position currently playing."""
return self._media_position
@property
def media_duration(self):
"""Return total runtime length."""
return self._media_duration
@property
def volume_level(self):
"""Return current volume level."""
return self._volume_level
@property
def is_volume_muted(self):
"""Return mute state."""
return self._is_volume_muted
@property
def volume_step(self):
""".Return volume step size."""
return self._volume_step
@property
def supports_standby(self):
"""Return power state of source controls."""
return self._supports_standby
@property
def state(self):
"""Return current playstate of the device."""
return self._state
@property
def shuffle(self):
"""Boolean if shuffle is enabled."""
return self._shuffle
def media_play(self):
"""Send play command to device."""
self._server.roonapi.playback_control(self.output_id, "play")
def media_pause(self):
"""Send pause command to device."""
self._server.roonapi.playback_control(self.output_id, "pause")
def media_play_pause(self):
"""Toggle play command to device."""
self._server.roonapi.playback_control(self.output_id, "playpause")
def media_stop(self):
"""Send stop command to device."""
self._server.roonapi.playback_control(self.output_id, "stop")
def media_next_track(self):
"""Send next track command to device."""
self._server.roonapi.playback_control(self.output_id, "next")
def media_previous_track(self):
"""Send previous track command to device."""
self._server.roonapi.playback_control(self.output_id, "previous")
def media_seek(self, position):
"""Send seek command to device."""
self._server.roonapi.seek(self.output_id, position)
# Seek doesn't cause an async update - so force one
self._media_position = position
self.schedule_update_ha_state()
def set_volume_level(self, volume):
"""Send new volume_level to device."""
volume = int(volume * 100)
self._server.roonapi.change_volume(self.output_id, volume)
def mute_volume(self, mute=True):
"""Send mute/unmute to device."""
self._server.roonapi.mute(self.output_id, mute)
def volume_up(self):
"""Send new volume_level to device."""
self._server.roonapi.change_volume(self.output_id, 3, "relative")
def volume_down(self):
"""Send new volume_level to device."""
self._server.roonapi.change_volume(self.output_id, -3, "relative")
def turn_on(self):
"""Turn on device (if supported)."""
if not (self.supports_standby and "source_controls" in self.player_data):
self.media_play()
return
for source in self.player_data["source_controls"]:
if source["supports_standby"] and source["status"] != "indeterminate":
self._server.roonapi.convenience_switch(
self.output_id, source["control_key"]
)
return
def turn_off(self):
"""Turn off device (if supported)."""
if not (self.supports_standby and "source_controls" in self.player_data):
self.media_stop()
return
for source in self.player_data["source_controls"]:
if source["supports_standby"] and source["status"] != "indeterminate":
self._server.roonapi.standby(self.output_id, source["control_key"])
return
def set_shuffle(self, shuffle):
"""Set shuffle state."""
self._server.roonapi.shuffle(self.output_id, shuffle)
def play_media(self, media_type, media_id, **kwargs):
"""Send the play_media command to the media player."""
_LOGGER.debug("Playback request for %s / %s", media_type, media_id)
if media_type in ("library", "track"):
# media_id is a roon browser id
self._server.roonapi.play_id(self.zone_id, media_id)
else:
# media_id is a path matching the Roon menu structure
path_list = split_media_path(media_id)
if not self._server.roonapi.play_media(self.zone_id, path_list):
_LOGGER.error(
"Playback request for %s / %s / %s was unsuccessful",
media_type,
media_id,
path_list,
)
def join_players(self, group_members):
"""Join `group_members` as a player group with the current player."""
zone_data = self._server.roonapi.zone_by_output_id(self._output_id)
if zone_data is None:
_LOGGER.error("No zone data for %s", self.name)
return
sync_available = {}
for zone in self._server.zones.values():
for output in zone["outputs"]:
if (
zone["display_name"] != self.name
and output["output_id"]
in self.player_data["can_group_with_output_ids"]
and zone["display_name"] not in sync_available
):
sync_available[zone["display_name"]] = output["output_id"]
names = []
for entity_id in group_members:
name = self._server.roon_name(entity_id)
if name is None:
_LOGGER.error("No roon player found for %s", entity_id)
return
if name not in sync_available:
_LOGGER.error(
"Can't join player %s with %s because it's not in the join available list %s",
name,
self.name,
list(sync_available),
)
return
names.append(name)
_LOGGER.debug("Joining %s to %s", names, self.name)
self._server.roonapi.group_outputs(
[self._output_id] + [sync_available[name] for name in names]
)
def unjoin_player(self):
"""Remove this player from any group."""
if not self._server.roonapi.is_grouped(self._output_id):
_LOGGER.error(
"Can't unjoin player %s because it's not in a group",
self.name,
)
return
self._server.roonapi.ungroup_outputs([self._output_id])
async def async_transfer(self, transfer_id):
"""Transfer playback from this roon player to another."""
name = self._server.roon_name(transfer_id)
if name is None:
_LOGGER.error("No roon player found for %s", transfer_id)
return
zone_ids = {
output["display_name"]: output["zone_id"]
for output in self._server.zones.values()
if output["display_name"] != self.name
}
transfer_id = zone_ids.get(name)
if transfer_id is None:
_LOGGER.error(
"Can't transfer from %s to %s because destination is not known %s",
self.name,
transfer_id,
list(zone_ids),
)
_LOGGER.debug("Transferring from %s to %s", self.name, name)
await self.hass.async_add_executor_job(
self._server.roonapi.transfer_zone, self._zone_id, transfer_id
)
async def async_browse_media(self, media_content_type=None, media_content_id=None):
"""Implement the websocket media browsing helper."""
return await self.hass.async_add_executor_job(
browse_media,
self.zone_id,
self._server,
media_content_type,
media_content_id,
)
| 33.375433 | 98 | 0.610958 |
7ffd976dc565f47b9c9880aa852c49064f174a73 | 1,129 | py | Python | lpot/experimental/data/dataloaders/dataloader.py | deb-intel/LPOTtest | f7b7524c733e581668d15192b69f9d9a7ca5222d | [
"Apache-2.0"
] | null | null | null | lpot/experimental/data/dataloaders/dataloader.py | deb-intel/LPOTtest | f7b7524c733e581668d15192b69f9d9a7ca5222d | [
"Apache-2.0"
] | null | null | null | lpot/experimental/data/dataloaders/dataloader.py | deb-intel/LPOTtest | f7b7524c733e581668d15192b69f9d9a7ca5222d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .tensorflow_dataloader import TensorflowDataLoader
from .mxnet_dataloader import MXNetDataLoader
from .pytorch_dataloader import PyTorchDataLoader
from .onnxrt_dataloader import ONNXRTDataLoader
DATALOADERS = {"tensorflow": TensorflowDataLoader,
"mxnet": MXNetDataLoader,
"pytorch": PyTorchDataLoader,
"pytorch_ipex": PyTorchDataLoader,
"onnxrt_qlinearops": ONNXRTDataLoader,
"onnxrt_integerops": ONNXRTDataLoader}
| 37.633333 | 74 | 0.73605 |
8e3579bb5678a733a058296f692bad4d7f15938b | 13,235 | py | Python | t61codec.py | exhuma/t61codec | 544a3d476817420576b97a6e35e92a0affff15ac | [
"MIT"
] | null | null | null | t61codec.py | exhuma/t61codec | 544a3d476817420576b97a6e35e92a0affff15ac | [
"MIT"
] | null | null | null | t61codec.py | exhuma/t61codec | 544a3d476817420576b97a6e35e92a0affff15ac | [
"MIT"
] | null | null | null | """
Python Character Mapping Codec for T61
See https://en.wikipedia.org/wiki/ITU_T.61
"""
# pylint: disable=invalid-name, no-member, redefined-builtin
import codecs
from typing import Tuple
try:
import importlib.metadata as imlib
except ImportError:
import importlib_metadata as imlib # type: ignore
__version__ = imlib.Distribution.from_name("t61codec").version
class Codec(codecs.Codec):
"""
Main implementation for the T.61 codec, based on
:py:func:`codecs.charmap_encode` and :py:func:`codecs.charmap_decode`
"""
def encode(self, input: str, errors: str = "strict") -> Tuple[bytes, int]:
return codecs.charmap_encode(input, errors, ENCODING_TABLE) # type: ignore
def decode(self, input: str, errors: str = "strict") -> Tuple[str, int]:
return codecs.charmap_decode(input, errors, DECODING_TABLE) # type: ignore
class IncrementalEncoder(codecs.IncrementalEncoder):
"""
See :py:class:`codecs.IncrementalEncoder`
"""
def encode(self, input: str, final: bool = False) -> bytes:
return codecs.charmap_encode( # type: ignore
input, self.errors, ENCODING_TABLE
)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
"""
See :py:class:`codecs.IncrementalDecoder`
"""
def decode(self, input: bytes, final: bool = False) -> str:
return codecs.charmap_decode( # type: ignore
input, self.errors, DECODING_TABLE
)[0]
class StreamWriter(Codec, codecs.StreamWriter):
"""
See :py:class:`codecs.StreamWriter`
"""
class StreamReader(Codec, codecs.StreamReader):
"""
See :py:class:`codecs.StreamReader`
"""
def getregentry() -> codecs.CodecInfo:
"""
Creates a :py:class:`codecs.CodecInfo` instance for use in the registry
"""
return codecs.CodecInfo(
name="t.61",
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
DECODING_TABLE = (
"\x00" # 0x00 -> NULL
"\x01" # 0x01 -> START OF HEADING
"\x02" # 0x02 -> START OF TEXT
"\x03" # 0x03 -> END OF TEXT
"\x04" # 0x04 -> END OF TRANSMISSION
"\x05" # 0x05 -> ENQUIRY
"\x06" # 0x06 -> ACKNOWLEDGE
"\x07" # 0x07 -> BELL
"\x08" # 0x08 -> BACKSPACE
"\t" # 0x09 -> HORIZONTAL TABULATION
"\n" # 0x0A -> LINE FEED
"\x0b" # 0x0B -> VERTICAL TABULATION
"\x0c" # 0x0C -> FORM FEED
"\r" # 0x0D -> CARRIAGE RETURN
"\x0e" # 0x0E -> SHIFT OUT
"\x0f" # 0x0F -> SHIFT IN
"\x10" # 0x10 -> DATA LINK ESCAPE
"\x11" # 0x11 -> DEVICE CONTROL ONE
"\x12" # 0x12 -> DEVICE CONTROL TWO
"\x13" # 0x13 -> DEVICE CONTROL THREE
"\x14" # 0x14 -> DEVICE CONTROL FOUR
"\x15" # 0x15 -> NEGATIVE ACKNOWLEDGE
"\x16" # 0x16 -> SYNCHRONOUS IDLE
"\x17" # 0x17 -> END OF TRANSMISSION BLOCK
"\x18" # 0x18 -> CANCEL
"\x19" # 0x19 -> END OF MEDIUM
"\x1a" # 0x1A -> SUBSTITUTE
"\x1b" # 0x1B -> ESCAPE
"\x1c" # 0x1C -> FILE SEPARATOR
"\x1d" # 0x1D -> GROUP SEPARATOR
"\x1e" # 0x1E -> RECORD SEPARATOR
"\x1f" # 0x1F -> UNIT SEPARATOR
" " # 0x20 -> SPACE
"!" # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
"\ufffe" # 0x23 -> *unmapped*
"\ufffe" # 0x24 -> *unmapped*
"%" # 0x25 -> PERCENT SIGN
"&" # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
"(" # 0x28 -> LEFT PARENTHESIS
")" # 0x29 -> RIGHT PARENTHESIS
"*" # 0x2A -> ASTERISK
"+" # 0x2B -> PLUS SIGN
"," # 0x2C -> COMMA
"-" # 0x2D -> HYPHEN-MINUS
"." # 0x2E -> FULL STOP
"/" # 0x2F -> SOLIDUS
"0" # 0x30 -> DIGIT ZERO
"1" # 0x31 -> DIGIT ONE
"2" # 0x32 -> DIGIT TWO
"3" # 0x33 -> DIGIT THREE
"4" # 0x34 -> DIGIT FOUR
"5" # 0x35 -> DIGIT FIVE
"6" # 0x36 -> DIGIT SIX
"7" # 0x37 -> DIGIT SEVEN
"8" # 0x38 -> DIGIT EIGHT
"9" # 0x39 -> DIGIT NINE
":" # 0x3A -> COLON
";" # 0x3B -> SEMICOLON
"<" # 0x3C -> LESS-THAN SIGN
"=" # 0x3D -> EQUALS SIGN
">" # 0x3E -> GREATER-THAN SIGN
"?" # 0x3F -> QUESTION MARK
"@" # 0x40 -> COMMERCIAL AT
"A" # 0x41 -> LATIN CAPITAL LETTER A
"B" # 0x42 -> LATIN CAPITAL LETTER B
"C" # 0x43 -> LATIN CAPITAL LETTER C
"D" # 0x44 -> LATIN CAPITAL LETTER D
"E" # 0x45 -> LATIN CAPITAL LETTER E
"F" # 0x46 -> LATIN CAPITAL LETTER F
"G" # 0x47 -> LATIN CAPITAL LETTER G
"H" # 0x48 -> LATIN CAPITAL LETTER H
"I" # 0x49 -> LATIN CAPITAL LETTER I
"J" # 0x4A -> LATIN CAPITAL LETTER J
"K" # 0x4B -> LATIN CAPITAL LETTER K
"L" # 0x4C -> LATIN CAPITAL LETTER L
"M" # 0x4D -> LATIN CAPITAL LETTER M
"N" # 0x4E -> LATIN CAPITAL LETTER N
"O" # 0x4F -> LATIN CAPITAL LETTER O
"P" # 0x50 -> LATIN CAPITAL LETTER P
"Q" # 0x51 -> LATIN CAPITAL LETTER Q
"R" # 0x52 -> LATIN CAPITAL LETTER R
"S" # 0x53 -> LATIN CAPITAL LETTER S
"T" # 0x54 -> LATIN CAPITAL LETTER T
"U" # 0x55 -> LATIN CAPITAL LETTER U
"V" # 0x56 -> LATIN CAPITAL LETTER V
"W" # 0x57 -> LATIN CAPITAL LETTER W
"X" # 0x58 -> LATIN CAPITAL LETTER X
"Y" # 0x59 -> LATIN CAPITAL LETTER Y
"Z" # 0x5A -> LATIN CAPITAL LETTER Z
"[" # 0x5B -> LEFT SQUARE BRACKET
"\ufffe" # 0x5C -> *unmapped*
"]" # 0x5D -> RIGHT SQUARE BRACKET
"\ufffe" # 0x5E -> *unmapped*
"_" # 0x5F -> LOW LINE
"\ufffe" # 0x60 -> *unmapped*
"a" # 0x61 -> LATIN SMALL LETTER A
"b" # 0x62 -> LATIN SMALL LETTER B
"c" # 0x63 -> LATIN SMALL LETTER C
"d" # 0x64 -> LATIN SMALL LETTER D
"e" # 0x65 -> LATIN SMALL LETTER E
"f" # 0x66 -> LATIN SMALL LETTER F
"g" # 0x67 -> LATIN SMALL LETTER G
"h" # 0x68 -> LATIN SMALL LETTER H
"i" # 0x69 -> LATIN SMALL LETTER I
"j" # 0x6A -> LATIN SMALL LETTER J
"k" # 0x6B -> LATIN SMALL LETTER K
"l" # 0x6C -> LATIN SMALL LETTER L
"m" # 0x6D -> LATIN SMALL LETTER M
"n" # 0x6E -> LATIN SMALL LETTER N
"o" # 0x6F -> LATIN SMALL LETTER O
"p" # 0x70 -> LATIN SMALL LETTER P
"q" # 0x71 -> LATIN SMALL LETTER Q
"r" # 0x72 -> LATIN SMALL LETTER R
"s" # 0x73 -> LATIN SMALL LETTER S
"t" # 0x74 -> LATIN SMALL LETTER T
"u" # 0x75 -> LATIN SMALL LETTER U
"v" # 0x76 -> LATIN SMALL LETTER V
"w" # 0x77 -> LATIN SMALL LETTER W
"x" # 0x78 -> LATIN SMALL LETTER X
"y" # 0x79 -> LATIN SMALL LETTER Y
"z" # 0x7A -> LATIN SMALL LETTER Z
"\ufffe" # 0x7B -> *unmapped*
"|" # 0x7C -> VERTICAL LINE
"\ufffe" # 0x7D -> *unmapped*
"\ufffe" # 0x7E -> *unmapped*
"\x7f" # 0x7F -> DELETE
"\x80" # 0x80 -> PADDING CHARACTER
"\x81" # 0x81 -> HIGH OCTET PRESET
"\x82" # 0x82 -> BREAK PERMITTED HERE (BPH)
"\x83" # 0x83 -> NO BREAK HERE (NBH)
"\x84" # 0x84 -> INDEX (IND)
"\x85" # 0x85 -> NEXT LINE (NEL)
"\x86" # 0x86 -> START OF SELECTED AREA (SSA)
"\x87" # 0x87 -> END OF SELECTED AREA (ESA)
"\x88" # 0x88 -> CHARACTER TABULATION SET (HTS)
"\x89" # 0x89 -> CHARACTER TABULATION WITH JUSTIFICATION (HTJ)
"\x8a" # 0x8a -> LINE TABULATION SET (VTS)
"\x8b" # 0x8b -> PARTIAL LINE FORWARD (PLD)
"\x8c" # 0x8c -> PARTIAL LINE BACKWARD (PLU)
"\x8d" # 0x8d -> REVERSE LINE FEED (RI)
"\x8e" # 0x8e -> SINGLE-SHIFT TWO (SS2)
"\x8f" # 0x8f -> SINGLE-SHIFT THREE (SS3)
"\x90" # 0x90 -> DEVICE CONTROL STRING (DCS)
"\x91" # 0x91 -> PRIVATE USE ONE (PU1)
"\x92" # 0x92 -> PRIVATE USE TWO (PU2)
"\x93" # 0x93 -> SET TRANSMIT STATE (STS)
"\x94" # 0x94 -> CANCEL CHARACTER (CCH)
"\x95" # 0x95 -> MESSAGE WAITING (MW)
"\x96" # 0x96 -> START OF GUARDED AREA (SPA)
"\x97" # 0x97 -> END OF GUARDED AREA (EPA)
"\x98" # 0x98 -> START OF STRING (SOS)
"\x99" # 0x99 -> SINGLE GRAPHIC CHARACTER INTRODUCER (SGCI)
"\x9a" # 0x9a -> SINGLE CHARACTER INTRODUCER (SCI)
"\x9b" # 0x9b -> CONTROL SEQUENCE INTRODUCER (CSI)
"\x9c" # 0x9c -> STRING TERMINATOR (ST)
"\x9d" # 0x9d -> OPERATING SYSTEM COMMAND (OSC)
"\x9e" # 0x9e -> PRIVACY MESSAGE (PM)
"\x9f" # 0x9f -> APPLICATION PROGRAM COMMAND (APC)
"\xa0" # 0xA0 -> NO-BREAK SPACE
"\xa1" # 0xA1 -> INVERTED EXCLAMATION MARK
"\xa2" # 0xA2 -> CENT SIGN
"\xa3" # 0xA3 -> POUND SIGN
"$" # 0xA4 -> DOLLAR SIGN
"\xa5" # 0xA5 -> YEN SIGN
"#" # 0xA6 -> NUMBER SIGN
"\xa7" # 0xA7 -> SECTION SIGN
"\xa4" # 0xA8 -> CURRENCY SIGN
"\ufffe" # 0xA9 -> *unmapped*
"\ufffe" # 0xAA -> *unmapped*
"\xab" # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
"\ufffe" # 0xAC -> *unmapped*
"\ufffe" # 0xAD -> *unmapped*
"\ufffe" # 0xAE -> *unmapped*
"\ufffe" # 0xAF -> *unmapped*
"\xb0" # 0xB0 -> DEGREE SIGN
"\xb1" # 0xB1 -> PLUS-MINUS SIGN
"\xb2" # 0xB2 -> SUPERSCRIPT TWO
"\xb3" # 0xB3 -> SUPERSCRIPT THREE
"\xd7" # 0xD7 -> MULTIPLICATION SIGN
"\xb5" # 0xB5 -> MICRO SIGN
"\xb6" # 0xB6 -> PILCROW SIGN
"\xb7" # 0xB7 -> MIDDLE DOT
"\xf7" # 0xF7 -> DIVISION SIGN
"\ufffe" # 0xF8 -> *unmapped*
"\ufffe" # 0xF9 -> *unmapped*
"\xbb" # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
"\xbc" # 0xBC -> VULGAR FRACTION ONE QUARTER
"\xbd" # 0xBD -> VULGAR FRACTION ONE HALF
"\xbe" # 0xBE -> VULGAR FRACTION THREE QUARTERS
"\xbf" # 0xBF -> INVERTED QUESTION MARK
"\ufffe" # 0xC0 -> *unmapped*
"\u0300" # 0xC1 -> COMBINING GRAVE ACCENT
"\u0301" # 0xC2 -> COMBINING ACUTE ACCENT
"\u0302" # 0xC3 -> COMBINING CIRCUMFLEX ACCENT
"\u0303" # 0xC4 -> COMBINING TILDE
"\u0304" # 0xC5 -> COMBINING MACRON
"\u0306" # 0xC6 -> COMBINING BREVE
"\u0307" # 0xC7 -> COMBINING DOT ABOVE
"\u0308" # 0xC8 -> COMBINING DIAERESIS
"\ufffe" # 0xC9 -> *unmapped*
"\u030a" # 0xCA -> COMBINING RING ABOVE
"\u0327" # 0xCB -> COMBINING CEDILLA
"\u0332" # 0xCC -> COMBINING LOW LINE
"\u030b" # 0xCD -> COMBINING DOUBLE ACUTE ACCENT
"\u032b" # 0xCE -> COMBINING INVERTED DOUBLE ARCH BELOW
"\u030c" # 0xCF -> COMBINING CARON
"\ufffe" # 0xD0 -> *unmapped*
"\ufffe" # 0xD1 -> *unmapped*
"\ufffe" # 0xD2 -> *unmapped*
"\ufffe" # 0xD3 -> *unmapped*
"\ufffe" # 0xD4 -> *unmapped*
"\ufffe" # 0xD5 -> *unmapped*
"\ufffe" # 0xD6 -> *unmapped*
"\ufffe" # 0xD7 -> *unmapped*
"\ufffe" # 0xD8 -> *unmapped*
"\ufffe" # 0xD9 -> *unmapped*
"\ufffe" # 0xDA -> *unmapped*
"\ufffe" # 0xDB -> *unmapped*
"\ufffe" # 0xDC -> *unmapped*
"\ufffe" # 0xDD -> *unmapped*
"\ufffe" # 0xDE -> *unmapped*
"\ufffe" # 0xDF -> *unmapped*
"\u2126" # 0xE0 -> OHM SIGN
"\u00c6" # 0xE1 -> LATIN CAPITAL LETTER AE
"\u00d0" # 0xE2 -> LATIN CAPITAL LETTER ETH
"\u00aa" # 0xE3 -> FEMININE ORDINAL INDICATOR
"\u0126" # 0xE4 -> LATIN CAPITAL LETTER H WITH STROKE
"\ufffe" # 0xE5 -> *unmapped*
"\u0132" # 0xE6 -> LATIN CAPITAL LIGATURE IJ
"\u013f" # 0xE7 -> LATIN CAPITAL LETTER L WITH MIDDLE DOT
"\u0141" # 0xE8 -> LATIN CAPITAL LETTER L WITH STROKE
"\u00d8" # 0xE9 -> LATIN CAPITAL LETTER O WITH STROKE
"\u0152" # 0xEA -> LATIN CAPITAL LIGATURE OE
"\u00ba" # 0xEB -> MASCULINE ORDINAL INDICATOR
"\u00de" # 0xEC -> LATIN CAPITAL LETTER THORN
"\u0166" # 0xED -> LATIN CAPITAL LETTER T WITH STROKE
"\u014a" # 0xEE -> LATIN CAPITAL LETTER ENG
"\u0149" # 0xEF -> LATIN SMALL LETTER N PRECEDED BY APOSTROPHE
"\u0138" # 0xF0 -> LATIN SMALL LETTER KRA
"\u00e6" # 0xF1 -> LATIN SMALL LETTER AE
"\u0111" # 0xF2 -> LATIN SMALL LETTER D WITH STROKE
"\u00f0" # 0xF3 -> LATIN SMALL LETTER ETH
"\u0127" # 0xF4 -> LATIN SMALL LETTER H WITH STROKE
"\u0131" # 0xF5 -> LATIN SMALL LETTER DOTLESS I
"\u0133" # 0xF6 -> LATIN SMALL LIGATURE IJ
"\u0140" # 0xF7 -> LATIN SMALL LETTER L WITH MIDDLE DOT
"\u0142" # 0xF8 -> LATIN SMALL LETTER L WITH STROKE
"\u00f8" # 0xF9 -> LATIN SMALL LETTER O WITH STROKE
"\u0153" # 0xFA -> LATIN SMALL LIGATURE OE
"\u00df" # 0xFB -> LATIN SMALL LETTER SHARP S
"\u00fe" # 0xFC -> LATIN SMALL LETTER THORN
"\u0167" # 0xFD -> LATIN SMALL LETTER T WITH STROKE
"\u014b" # 0xFE -> LATIN SMALL LETTER ENG
"\ufffe" # 0xFF -> *unmapped*
)
# Encoding table
ENCODING_TABLE = codecs.charmap_build(DECODING_TABLE) # type: ignore
def search_function(encoding: str) -> codecs.CodecInfo:
"""
A search function which can be used with :py:func:`codecs.register`.
As a convenience, there is also :func:`~.register` in this module.
"""
if encoding.lower() in ("t61", "t.61"):
return getregentry()
return codecs.lookup(encoding) # type: ignore
def register() -> None:
"""
Convenience function which registers a new default Python search function
Example:
>>> import t61codec
>>> t61codec.register()
>>> b'Hello T.61: \\xe0'.decode('t.61')
'Hello T.61: Ω'
"""
codecs.register(search_function)
| 35.867209 | 83 | 0.575595 |
7f9ac49e7f23db27c2501d747fe413da99b30d19 | 4,521 | py | Python | exps/example/yolox_voc/yolox_voc_s.py | newsun-boki/yolox-openvino-video-infer | caef7ab1ef881be9dcb4091685e8e4afd0a9b05a | [
"Apache-2.0"
] | null | null | null | exps/example/yolox_voc/yolox_voc_s.py | newsun-boki/yolox-openvino-video-infer | caef7ab1ef881be9dcb4091685e8e4afd0a9b05a | [
"Apache-2.0"
] | null | null | null | exps/example/yolox_voc/yolox_voc_s.py | newsun-boki/yolox-openvino-video-infer | caef7ab1ef881be9dcb4091685e8e4afd0a9b05a | [
"Apache-2.0"
] | null | null | null | # encoding: utf-8
import os
import torch
import torch.distributed as dist
from yolox.data import get_yolox_datadir
from yolox.exp import Exp as MyExp
class Exp(MyExp):
def __init__(self):
super(Exp, self).__init__()
self.num_classes = 5
self.depth = 0.33
self.width = 0.50
self.warmup_epochs = 1
self.input_size = (640,640)
# ---------- transform config ------------ #
self.mosaic_prob = 1.0
self.mixup_prob = 1.0
self.hsv_prob = 1.0
self.flip_prob = 0.5
self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
def get_data_loader(self, batch_size, is_distributed, no_aug=False, cache_img=False):
from yolox.data import (
VOCDetection,
TrainTransform,
YoloBatchSampler,
DataLoader,
InfiniteSampler,
MosaicDetection,
worker_init_reset_seed,
)
from yolox.utils import (
wait_for_the_master,
get_local_rank,
)
local_rank = get_local_rank()
with wait_for_the_master(local_rank):
dataset = VOCDetection(
data_dir=os.path.join(get_yolox_datadir(), "VOCdevkit"),
image_sets=[('2007', 'trainval')],
img_size=self.input_size,
preproc=TrainTransform(
max_labels=50,
flip_prob=self.flip_prob,
hsv_prob=self.hsv_prob),
cache=cache_img,
)
dataset = MosaicDetection(
dataset,
mosaic=not no_aug,
img_size=self.input_size,
preproc=TrainTransform(
max_labels=120,
flip_prob=self.flip_prob,
hsv_prob=self.hsv_prob),
degrees=self.degrees,
translate=self.translate,
mosaic_scale=self.mosaic_scale,
mixup_scale=self.mixup_scale,
shear=self.shear,
perspective=self.perspective,
enable_mixup=self.enable_mixup,
mosaic_prob=self.mosaic_prob,
mixup_prob=self.mixup_prob,
)
self.dataset = dataset
if is_distributed:
batch_size = batch_size // dist.get_world_size()
sampler = InfiniteSampler(
len(self.dataset), seed=self.seed if self.seed else 0
)
batch_sampler = YoloBatchSampler(
sampler=sampler,
batch_size=batch_size,
drop_last=False,
mosaic=not no_aug,
)
dataloader_kwargs = {"num_workers": self.data_num_workers, "pin_memory": True}
dataloader_kwargs["batch_sampler"] = batch_sampler
# Make sure each process has different random seed, especially for 'fork' method
dataloader_kwargs["worker_init_fn"] = worker_init_reset_seed
train_loader = DataLoader(self.dataset, **dataloader_kwargs)
return train_loader
def get_eval_loader(self, batch_size, is_distributed, testdev=False, legacy=False):
from yolox.data import VOCDetection, ValTransform
valdataset = VOCDetection(
data_dir=os.path.join(get_yolox_datadir(), "VOCdevkit"),
image_sets=[('2007', 'test')],
img_size=self.test_size,
preproc=ValTransform(legacy=legacy),
)
if is_distributed:
batch_size = batch_size // dist.get_world_size()
sampler = torch.utils.data.distributed.DistributedSampler(
valdataset, shuffle=False
)
else:
sampler = torch.utils.data.SequentialSampler(valdataset)
dataloader_kwargs = {
"num_workers": self.data_num_workers,
"pin_memory": True,
"sampler": sampler,
}
dataloader_kwargs["batch_size"] = batch_size
val_loader = torch.utils.data.DataLoader(valdataset, **dataloader_kwargs)
return val_loader
def get_evaluator(self, batch_size, is_distributed, testdev=False, legacy=False):
from yolox.evaluators import VOCEvaluator
val_loader = self.get_eval_loader(batch_size, is_distributed, testdev, legacy)
evaluator = VOCEvaluator(
dataloader=val_loader,
img_size=self.test_size,
confthre=self.test_conf,
nmsthre=self.nmsthre,
num_classes=self.num_classes,
)
return evaluator
| 32.06383 | 89 | 0.592126 |
17809cc3cc66049e718ecbcd34960bfec211e0f3 | 1,956 | py | Python | araig_calculators/src/comparators/edge_detector.py | ipa320/araig_test_stack | c704a4a4ac55f4113ff7ccd72aede0695e78a709 | [
"Apache-2.0"
] | null | null | null | araig_calculators/src/comparators/edge_detector.py | ipa320/araig_test_stack | c704a4a4ac55f4113ff7ccd72aede0695e78a709 | [
"Apache-2.0"
] | 52 | 2021-01-14T11:02:14.000Z | 2022-01-19T17:26:36.000Z | araig_calculators/src/comparators/edge_detector.py | ipa320/araig_test_stack | c704a4a4ac55f4113ff7ccd72aede0695e78a709 | [
"Apache-2.0"
] | 1 | 2021-01-27T14:33:00.000Z | 2021-01-27T14:33:00.000Z | #!/usr/bin/env python
import rospy
import threading
from std_msgs.msg import Float64
from araig_msgs.msg import BoolStamped
from base_classes.base_calculator import BaseCalculator
"""Compare bool from a topic, publish and latch a level shift
pub_list = {"out_high": "BoolStamped", "out_low": "BoolStamped"}
sub_list = {"in_bool": "BoolStamped"}
rosparam
inherit Base, only modify compare function"""
class edgeDetector(BaseCalculator):
_pub_topic_high = "/out_high"
_pub_topic_low = "/out_low"
_sub_topic = "/in_bool"
def __init__(self,
sub_dict = {_sub_topic: BoolStamped},
pub_dict = {_pub_topic_high: BoolStamped,
_pub_topic_low: BoolStamped},
rate = None):
self.pre_state = None
self.msg_high = BoolStamped()
self.msg_low = BoolStamped()
super(edgeDetector, self).__init__(
sub_dict = sub_dict,
pub_dict = pub_dict,
rate = rate)
def calculate(self):
with BaseCalculator.LOCK[self._sub_topic]:
current_val = BaseCalculator.MSG[self._sub_topic]
if current_val != None:
if current_val.data != self.pre_state:
self.msg_high.header = current_val.header
self.msg_low.header = current_val.header
if current_val.data:
self.msg_low.data = False
self.PubDiag[self._pub_topic_low].publish(self.msg_low)
self.msg_high.data = True
self.PubDiag[self._pub_topic_high].publish(self.msg_high)
self.pre_state = True
else:
self.msg_high.data = False
self.PubDiag[self._pub_topic_high].publish(self.msg_high)
self.msg_low.data = True
self.PubDiag[self._pub_topic_low].publish(self.msg_low)
self.pre_state = False
| 36.90566 | 77 | 0.607362 |
d11235935f448b11e5ae5edad174a18684b9dd9f | 83,166 | py | Python | qnarre/models/perceiver.py | quantapix/qnarre.com | f51d5945c20ef8182c4aa11f1b407d064c190c70 | [
"MIT"
] | null | null | null | qnarre/models/perceiver.py | quantapix/qnarre.com | f51d5945c20ef8182c4aa11f1b407d064c190c70 | [
"MIT"
] | null | null | null | qnarre/models/perceiver.py | quantapix/qnarre.com | f51d5945c20ef8182c4aa11f1b407d064c190c70 | [
"MIT"
] | null | null | null | # Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import abc
import math
import numpy as np
import torch
import torch.utils.checkpoint
from dataclasses import dataclass
from functools import reduce
from operator import __add__
from torch import nn
from torch.nn import functional as F
from transformers.utils import logging
from .. import core as qc
from ..core import utils as qu
from ..core import forward as qf
from ..core import output as qo
from ..core import attention as qa
from ..core.embed import Embeds
from ..core.ffnet import Classifier, FFNet, Masker, Pool
from ..prep.config.perceiver import PreTrained
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...pytorch_utils import (
apply_chunking_to_forward,
)
log = logging.get_logger(__name__)
LIST = [
"deepmind/language-perceiver",
]
@dataclass
class PerceiverModelOutput(ModelOutput):
logits = None
y = None
hiddens = None
attns = None
crosses = None
@dataclass
class PerceiverDecoderOutput(ModelOutput):
logits = None
crosses = None
class PerceiverEmbeddings(qc.Module):
def __init__(self, config):
super().__init__()
self.latents = nn.Parameter(torch.randn(config.num_latents, config.d_latents))
def forward(self, batch_size):
return self.latents.expand(batch_size, -1, -1) # Thanks, Phil Wang
class PerceiverSelfAttention(qc.Module):
def __init__(
self,
config,
is_cross_attention=False,
qk_channels=None,
v_channels=None,
n_heads=1,
q_dim=None,
kv_dim=None,
):
super().__init__()
self.n_heads = n_heads
if qk_channels is None:
qk_channels = q_dim
if v_channels is None:
v_channels = qk_channels
if qk_channels % n_heads != 0:
raise ValueError(
f"qk_channels ({qk_channels}) must be divisible by n_heads ({n_heads})."
)
if v_channels % n_heads != 0:
raise ValueError(f"v_channels ({v_channels}) must be divisible by n_heads ({n_heads}).")
self.qk_channels = qk_channels
self.v_channels = v_channels
self.qk_channels_per_head = self.qk_channels // n_heads
self.v_channels_per_head = self.v_channels // n_heads
# Layer normalization
self.layernorm1 = qc.LayerNorm(q_dim)
self.layernorm2 = qc.LayerNorm(kv_dim) if is_cross_attention else nn.Identity()
# Projection matrices
self.query = qc.Linear(q_dim, qk_channels)
self.key = qc.Linear(kv_dim, qk_channels)
self.value = qc.Linear(kv_dim, v_channels)
self.drop = qc.Dropout(config.drop_attn)
def transpose_for_scores(self, x, channels_per_head):
new_x_shape = x.size()[:-1] + (self.n_heads, channels_per_head)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hiddens,
attention_mask=None,
head_mask=None,
inputs=None,
inputs_mask=None,
output_attentions=False,
):
hiddens = self.layernorm1(hiddens)
inputs = self.layernorm2(inputs)
is_cross_attention = inputs is not None
queries = self.query(hiddens)
if is_cross_attention:
keys = self.key(inputs)
values = self.value(inputs)
attention_mask = inputs_mask
else:
keys = self.key(hiddens)
values = self.value(hiddens)
queries = self.transpose_for_scores(queries, self.qk_channels_per_head)
keys = self.transpose_for_scores(keys, self.qk_channels_per_head)
values = self.transpose_for_scores(values, self.v_channels_per_head)
# Take the dot product between the queries and keys to get the raw attention scores.
attention_scores = torch.matmul(queries, keys.transpose(-1, -2))
batch_size, n_heads, seq_len, q_head_dim = queries.shape
_, _, _, v_head_dim = values.shape
hiddens = self.n_heads * v_head_dim
attention_scores = attention_scores / math.sqrt(q_head_dim)
if attention_mask is not None:
# Apply the attention mask (precomputed for all layers in PerceiverModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.drop(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, values)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (hiddens,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
class PerceiverSelfOutput(qc.Module):
def __init__(self, config, input_channels, output_channels):
super().__init__()
self.dense = qc.Linear(input_channels, output_channels)
def forward(self, hiddens):
hiddens = self.dense(hiddens)
return hiddens
class Attention(qc.Module):
def __init__(
self,
config,
is_cross_attention=False,
qk_channels=None,
v_channels=None,
n_heads=1,
q_dim=None,
kv_dim=None,
use_query_residual=True,
):
super().__init__()
# MultiHead attention
if is_cross_attention and qk_channels is None:
if config.cross_attention_shape_for_attention == "q":
qk_channels = q_dim
elif config.cross_attention_shape_for_attention == "kv":
qk_channels = kv_dim
else:
raise ValueError(
f"Unknown value {config.cross_attention_shape_for_attention} for "
"cross_attention_shape_for_attention."
)
else:
if qk_channels is None:
qk_channels = q_dim
if v_channels is None:
v_channels = qk_channels
self.self = PerceiverSelfAttention(
config,
is_cross_attention=is_cross_attention,
qk_channels=qk_channels,
v_channels=v_channels,
n_heads=n_heads,
q_dim=q_dim,
kv_dim=kv_dim,
)
# dense block
output_channels = None
if is_cross_attention:
output_channels = q_dim
else:
if output_channels is None:
output_channels = v_channels
self.output = PerceiverSelfOutput(
config, input_channels=self.self.v_channels, output_channels=output_channels
)
self.use_query_residual = use_query_residual
def forward(
self,
hiddens,
attention_mask=None,
head_mask=None,
inputs=None,
inputs_mask=None,
output_attentions=False,
):
self_outputs = self.self(
hiddens,
attention_mask,
head_mask,
inputs,
inputs_mask,
output_attentions,
)
attention_output = self.output(self_outputs[0])
if self.use_query_residual:
attention_output = attention_output + hiddens
outputs = (attention_output,) + self_outputs[1:]
return outputs
class PerceiverMLP(qc.Module):
def __init__(self, cfg, input_size, widening_factor):
super().__init__()
self.dense1 = qc.Linear(input_size, widening_factor * input_size)
self.act = qu.activation(cfg.act)
self.dense2 = qc.Linear(widening_factor * input_size, input_size)
def forward(self, x):
y = self.dense1(x)
y = self.act(y)
y = self.dense2(y)
return y
class Layer(qc.Module):
def __init__(
self,
config,
is_cross_attention=False,
qk_channels=None,
v_channels=None,
n_heads=1,
q_dim=None,
kv_dim=None,
widening_factor=4,
use_query_residual=True,
):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = Attention(
config,
is_cross_attention=is_cross_attention,
qk_channels=qk_channels,
v_channels=v_channels,
n_heads=n_heads,
q_dim=q_dim,
kv_dim=kv_dim,
use_query_residual=use_query_residual,
)
self.layernorm = qc.LayerNorm(q_dim)
self.mlp = PerceiverMLP(config, input_size=q_dim, widening_factor=widening_factor)
def forward(
self,
hiddens,
attention_mask=None,
head_mask=None,
inputs=None,
inputs_mask=None,
output_attentions=False,
):
attention_outputs = self.attention(
hiddens,
attention_mask,
head_mask,
inputs,
inputs_mask,
output_attentions,
)
attention_output = attention_outputs[0]
outputs = attention_outputs[1:] # add attns if we output attention weights
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk,
self.chunk_size_feed_forward,
self.seq_len_dim,
attention_output,
)
layer_output = layer_output + attention_output # residual connection
outputs = (layer_output,) + outputs
return outputs
def feed_forward_chunk(self, attention_output):
layer_output = self.layernorm(attention_output)
layer_output = self.mlp(layer_output)
return layer_output
class Encoder(qc.Module):
def __init__(self, config, kv_dim=None):
super().__init__()
self.config = config
if config.d_latents % config.num_self_attention_heads != 0:
raise ValueError(
f"num_z_channels ({config.d_latents}) must be divisible by"
f" num_self_attend_heads ({config.num_self_attention_heads})."
)
if config.d_latents % config.num_cross_attention_heads != 0:
raise ValueError(
f"num_z_channels ({config.d_latents}) must be divisible by"
f" num_cross_attend_heads ({config.num_cross_attention_heads})."
)
# Construct the cross attention layer.
self.cross_attention = Layer(
config,
is_cross_attention=True,
qk_channels=config.qk_channels,
v_channels=config.v_channels,
n_heads=config.num_cross_attention_heads,
q_dim=config.d_latents,
kv_dim=kv_dim,
widening_factor=config.cross_attention_widening_factor,
use_query_residual=config.use_query_residual,
)
# Construct a single block of self-attention layers.
# We get deeper architectures by applying this block more than once.
self_attention_layers = []
for _ in range(config.num_self_attends_per_block):
layer = Layer(
config,
is_cross_attention=False,
qk_channels=config.qk_channels,
v_channels=config.v_channels,
n_heads=config.num_self_attention_heads,
q_dim=config.d_latents,
kv_dim=config.d_latents,
widening_factor=config.self_attention_widening_factor,
)
self_attention_layers.append(layer)
self.self_attends = nn.ModuleList(self_attention_layers)
def forward(
self,
hiddens,
attention_mask=None,
head_mask=None,
inputs=None,
inputs_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions else None
# Apply the cross-attention between the latents (hiddens) and inputs:
layer_outputs = self.cross_attention(
hiddens,
attention_mask=attention_mask,
head_mask=None,
inputs=inputs,
inputs_mask=inputs_mask,
output_attentions=output_attentions,
)
hiddens = layer_outputs[0]
if output_attentions:
all_cross_attentions = all_cross_attentions + (layer_outputs[1],)
# Apply the block of self-attention layers more than once:
for _ in range(self.config.num_blocks):
for i, layer_module in enumerate(self.self_attends):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hiddens,)
layer_head_mask = head_mask[i] if head_mask is not None else None
layer_outputs = layer_module(
hiddens,
attention_mask=attention_mask,
head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hiddens = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hiddens,)
if not return_dict:
return tuple(
v
for v in [
hiddens,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return qo.BaseWithCrossAttentions(
y=hiddens,
hiddens=all_hidden_states,
attns=all_self_attentions,
crosses=all_cross_attentions,
)
class Model(PreTrained):
def __init__(
self,
config,
decoder=None,
input_preprocessor=None,
output_postprocessor=None,
):
super().__init__(config)
self.config = config
self.input_preprocessor = input_preprocessor
self.output_postprocessor = output_postprocessor
self.embeddings = PerceiverEmbeddings(config)
self.encoder = Encoder(
config,
kv_dim=input_preprocessor.num_channels
if input_preprocessor is not None
else config.d_model,
)
self.decoder = decoder
def forward(
self,
inputs,
attention_mask=None,
subsampled_output_points=None,
head_mask=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = (
output_attentions if output_attentions is not None else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.input_preprocessor is not None:
inputs, modality_sizes, inputs_without_pos = self.input_preprocessor(inputs)
else:
modality_sizes = None
inputs_without_pos = None
if inputs.size()[-1] != self.config.d_model:
raise ValueError(
f"Last dimension of the inputs: {inputs.size()[-1]} doesn't correspond to config.d_model: {self.config.d_model}. "
"Make sure to set config.d_model appropriately."
)
batch_size, seq_length, _ = inputs.size()
device = inputs.device
# If no attention mask is provided, make them all ones
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length)), device=device)
# Make the attention mask broadcastable to [batch_size, n_heads, seq_length, seq_length]
extended_attention_mask = self.invert_attention_mask(attention_mask)
head_mask = self.get_head_mask(
head_mask, self.config.num_blocks * self.config.num_self_attends_per_block
)
embedding_output = self.embeddings(batch_size=batch_size)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=None,
head_mask=head_mask,
inputs=inputs,
inputs_mask=extended_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
logits = None
if self.decoder:
if subsampled_output_points is not None:
output_modality_sizes = {
"audio": subsampled_output_points["audio"].shape[0],
"image": subsampled_output_points["image"].shape[0],
"label": 1,
}
else:
output_modality_sizes = None
decoder_query = self.decoder.decoder_query(
inputs,
modality_sizes,
inputs_without_pos,
subsampled_points=subsampled_output_points,
)
decoder_outputs = self.decoder(
decoder_query,
z=sequence_output,
query_mask=extended_attention_mask,
output_attentions=output_attentions,
)
logits = decoder_outputs.logits
# add cross-attns of decoder
if output_attentions and decoder_outputs.crosses is not None:
if return_dict:
encoder_outputs.crosses = encoder_outputs.crosses + decoder_outputs.crosses
else:
encoder_outputs = encoder_outputs + decoder_outputs.crosses
if self.output_postprocessor:
logits = self.output_postprocessor(logits, modality_sizes=output_modality_sizes)
if not return_dict:
if logits is not None:
return (logits, sequence_output) + encoder_outputs[1:]
else:
return (sequence_output,) + encoder_outputs[1:]
return PerceiverModelOutput(
logits=logits,
y=sequence_output,
hiddens=encoder_outputs.hiddens,
attns=encoder_outputs.attns,
crosses=encoder_outputs.crosses,
)
class ForMasked(PreTrained):
def __init__(self, config):
super().__init__(config)
text_preprocessor = PerceiverTextPreprocessor(config)
trainable_position_encoding_kwargs_decoder = dict(
num_channels=text_preprocessor.num_channels, index_dims=config.n_pos
)
self.perceiver = Model(
config,
input_preprocessor=text_preprocessor,
decoder=PerceiverBasicDecoder(
config,
output_num_channels=config.d_latents,
output_index_dims=config.n_pos, # we need to define the seq_len of the inputs beforehand
num_channels=text_preprocessor.num_channels,
qk_channels=8 * 32,
v_channels=text_preprocessor.num_channels,
n_heads=8,
use_query_residual=False,
final_project=False,
trainable_position_encoding_kwargs=trainable_position_encoding_kwargs_decoder,
),
)
self.embedding_decoder = PerceiverEmbeddingDecoder(config)
self.post_init()
def forward(
self,
inputs=None,
attention_mask=None,
head_mask=None,
output_attentions=None,
output_hidden_states=None,
labels=None,
return_dict=None,
input_ids=None,
):
if inputs is not None and input_ids is not None:
raise ValueError("You cannot use both `inputs` and `input_ids`")
elif inputs is None and input_ids is not None:
inputs = input_ids
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.perceiver(
inputs=inputs,
attention_mask=attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
logits = self.embedding_decoder(
outputs.logits if return_dict else outputs[0],
embedding_layer=self.perceiver.input_preprocessor.embeddings,
)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(logits.view(-1, self.config.s_vocab), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return qo.LossCrosses(
loss=masked_lm_loss,
logits=logits,
hiddens=outputs.hiddens,
attns=outputs.attns,
crosses=outputs.crosses,
)
class ForSeqClassifier(PreTrained):
def __init__(self, **kw):
super().__init__(**kw)
cfg = self.get_cfg(kw)
kw2 = dict(num_channels=cfg.d_latents, index_dims=1)
self.model = Model(
input_preprocessor=PerceiverTextPreprocessor(cfg),
decoder=PerceiverClassificationDecoder(
cfg,
num_channels=cfg.d_latents,
trainable_position_encoding_kwargs=kw2,
use_query_residual=True,
),
**kw,
)
self.proj = Classifier(**kw)
forward = qf.forward_seq
class PerceiverForImageClassificationLearned(PreTrained):
def __init__(self, config):
super().__init__(config)
trainable_position_encoding_kwargs_preprocessor = dict(
num_channels=256, index_dims=config.image_size**2
)
trainable_position_encoding_kwargs_decoder = dict(
num_channels=config.d_latents, index_dims=1
)
self.n_labels = config.n_labels
self.perceiver = Model(
config,
input_preprocessor=PerceiverImagePreprocessor(
config,
prep_type="conv1x1",
spatial_downsample=1,
out_channels=256,
position_encoding_type="trainable",
concat_or_add_pos="concat",
project_pos_dim=256,
trainable_position_encoding_kwargs=trainable_position_encoding_kwargs_preprocessor,
),
decoder=PerceiverClassificationDecoder(
config,
num_channels=config.d_latents,
trainable_position_encoding_kwargs=trainable_position_encoding_kwargs_decoder,
use_query_residual=True,
),
)
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
inputs=None,
attention_mask=None,
head_mask=None,
output_attentions=None,
output_hidden_states=None,
labels=None,
return_dict=None,
pixel_values=None,
):
if inputs is not None and pixel_values is not None:
raise ValueError("You cannot use both `inputs` and `pixel_values`")
elif inputs is None and pixel_values is not None:
inputs = pixel_values
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.perceiver(
inputs=inputs,
attention_mask=attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
logits = outputs.logits if return_dict else outputs[0]
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.n_labels == 1:
self.config.problem_type = "regression"
elif self.n_labels > 1 and (
labels.dtype == torch.long or labels.dtype == torch.int
):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.n_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.n_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return qo.LossCrosses(
loss=loss,
logits=logits,
hiddens=outputs.hiddens,
attns=outputs.attns,
crosses=outputs.crosses,
)
class PerceiverForImageClassificationFourier(PreTrained):
def __init__(self, config):
super().__init__(config)
fourier_position_encoding_kwargs_preprocessor = dict(
concat_pos=True, max_resolution=(224, 224), num_bands=64, sine_only=False
)
trainable_position_encoding_kwargs_decoder = dict(
num_channels=config.d_latents, index_dims=1
)
self.n_labels = config.n_labels
self.perceiver = Model(
config,
input_preprocessor=PerceiverImagePreprocessor(
config,
prep_type="pixels",
spatial_downsample=1,
fourier_position_encoding_kwargs=fourier_position_encoding_kwargs_preprocessor,
),
decoder=PerceiverClassificationDecoder(
config,
num_channels=config.d_latents,
trainable_position_encoding_kwargs=trainable_position_encoding_kwargs_decoder,
use_query_residual=True,
),
)
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
inputs=None,
attention_mask=None,
head_mask=None,
output_attentions=None,
output_hidden_states=None,
labels=None,
return_dict=None,
pixel_values=None,
):
if inputs is not None and pixel_values is not None:
raise ValueError("You cannot use both `inputs` and `pixel_values`")
elif inputs is None and pixel_values is not None:
inputs = pixel_values
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.perceiver(
inputs=inputs,
attention_mask=attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
logits = outputs.logits if return_dict else outputs[0]
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.n_labels == 1:
self.config.problem_type = "regression"
elif self.n_labels > 1 and (
labels.dtype == torch.long or labels.dtype == torch.int
):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.n_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.n_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return qo.LossCrosses(
loss=loss,
logits=logits,
hiddens=outputs.hiddens,
attns=outputs.attns,
crosses=outputs.crosses,
)
class PerceiverForImageClassificationConvProcessing(PreTrained):
def __init__(self, config):
super().__init__(config)
fourier_position_encoding_kwargs_preprocessor = dict(
concat_pos=True, max_resolution=(56, 56), num_bands=64, sine_only=False
)
trainable_position_encoding_kwargs_decoder = dict(
num_channels=config.d_latents, index_dims=1
)
self.n_labels = config.n_labels
self.perceiver = Model(
config,
input_preprocessor=PerceiverImagePreprocessor(
config,
prep_type="conv",
spatial_downsample=1,
position_encoding_type="fourier",
fourier_position_encoding_kwargs=fourier_position_encoding_kwargs_preprocessor,
),
decoder=PerceiverClassificationDecoder(
config,
num_channels=config.d_latents,
trainable_position_encoding_kwargs=trainable_position_encoding_kwargs_decoder,
use_query_residual=True,
),
)
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
inputs=None,
attention_mask=None,
head_mask=None,
output_attentions=None,
output_hidden_states=None,
labels=None,
return_dict=None,
pixel_values=None,
):
if inputs is not None and pixel_values is not None:
raise ValueError("You cannot use both `inputs` and `pixel_values`")
elif inputs is None and pixel_values is not None:
inputs = pixel_values
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.perceiver(
inputs=inputs,
attention_mask=attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
logits = outputs.logits if return_dict else outputs[0]
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.n_labels == 1:
self.config.problem_type = "regression"
elif self.n_labels > 1 and (
labels.dtype == torch.long or labels.dtype == torch.int
):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.n_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.n_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return qo.LossCrosses(
loss=loss,
logits=logits,
hiddens=outputs.hiddens,
attns=outputs.attns,
crosses=outputs.crosses,
)
class PerceiverForOpticalFlow(PreTrained):
def __init__(self, config):
super().__init__(config)
fourier_position_encoding_kwargs_preprocessor = dict(
num_bands=64,
max_resolution=config.train_size,
sine_only=False,
concat_pos=True,
)
fourier_position_encoding_kwargs_decoder = dict(
concat_pos=True, max_resolution=config.train_size, num_bands=64, sine_only=False
)
image_preprocessor = PerceiverImagePreprocessor(
config,
prep_type="patches",
spatial_downsample=1,
conv_after_patching=True,
conv_after_patching_in_channels=54,
temporal_downsample=2,
position_encoding_type="fourier",
# position_encoding_kwargs
fourier_position_encoding_kwargs=fourier_position_encoding_kwargs_preprocessor,
)
self.perceiver = Model(
config,
input_preprocessor=image_preprocessor,
decoder=PerceiverOpticalFlowDecoder(
config,
num_channels=image_preprocessor.num_channels,
output_image_shape=config.train_size,
rescale_factor=100.0,
# decoder kw
use_query_residual=False,
output_num_channels=2,
# We query the decoder using the first frame features
# rather than a standard decoder position encoding.
position_encoding_type="fourier",
fourier_position_encoding_kwargs=fourier_position_encoding_kwargs_decoder,
),
)
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
inputs=None,
attention_mask=None,
head_mask=None,
output_attentions=None,
output_hidden_states=None,
labels=None,
return_dict=None,
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.perceiver(
inputs=inputs,
attention_mask=attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
logits = outputs.logits if return_dict else outputs[0]
loss = None
if labels is not None:
raise NotImplementedError("Optical flow training is not yet supported")
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return qo.LossCrosses(
loss=loss,
logits=logits,
hiddens=outputs.hiddens,
attns=outputs.attns,
crosses=outputs.crosses,
)
class PerceiverForMultimodalAutoencoding(PreTrained):
def __init__(self, config):
super().__init__(config)
n_audio_samples = config.num_frames * config.audio_samples_per_frame
input_preprocessor = PerceiverMultimodalPreprocessor(
min_padding_size=4,
modalities={
"audio": PerceiverAudioPreprocessor(
config,
position_encoding_type="fourier",
fourier_position_encoding_kwargs=dict(
num_bands=192,
max_resolution=(n_audio_samples,),
sine_only=False,
concat_pos=True,
),
prep_type="patches",
samples_per_patch=config.samples_per_patch,
),
"image": PerceiverImagePreprocessor(
config,
position_encoding_type="fourier",
fourier_position_encoding_kwargs=dict(
num_bands=32,
max_resolution=(config.num_frames, config.image_size, config.image_size),
sine_only=False,
concat_pos=True,
),
prep_type="patches",
spatial_downsample=4,
temporal_downsample=1,
),
"label": PerceiverOneHotPreprocessor(config),
},
mask_probs={"image": 0.0, "audio": 0.0, "label": 1.0},
)
image_decoder = PerceiverBasicVideoAutoencodingDecoder(
config,
# Autoencoding, don't pass inputs to the queries.
concat_preprocessed_input=False,
output_shape=config.output_shape,
output_num_channels=512,
use_query_residual=False,
position_encoding_only=True,
position_encoding_type="fourier",
fourier_position_encoding_kwargs=dict(
num_bands=32,
max_resolution=(config.num_frames, config.image_size, config.image_size),
sine_only=False,
concat_pos=True,
),
)
decoder = PerceiverMultimodalDecoder(
config,
# Autoencoding, don't pass inputs to the queries.
concat_preprocessed_input=False,
# Modality specific decoders are used ONLY to generate queries.
# All modalties are decoded together using a unified decoder.
modalities={
"audio": PerceiverBasicDecoder(
config,
# Autoencoding, don't pass inputs to the queries.
concat_preprocessed_input=False,
output_index_dims=(n_audio_samples // config.samples_per_patch,),
output_num_channels=512,
use_query_residual=False,
position_encoding_only=True,
position_encoding_type="fourier",
fourier_position_encoding_kwargs=dict(
num_bands=192,
max_resolution=(n_audio_samples,),
sine_only=False,
concat_pos=True,
),
),
"image": image_decoder,
"label": PerceiverClassificationDecoder(
config,
# Autoencoding, don't pass inputs to the queries.
concat_preprocessed_input=False,
use_query_residual=False,
position_encoding_only=True,
position_encoding_type="trainable",
trainable_position_encoding_kwargs=dict(
num_channels=1024,
index_dims=1,
),
),
},
num_outputs=None,
output_num_channels=512,
use_query_residual=False,
)
output_postprocessor = PerceiverMultimodalPostprocessor(
modalities={
"audio": PerceiverAudioPostprocessor(config, in_channels=512),
"image": PerceiverProjectionPostprocessor(in_channels=512, out_channels=3),
"label": PerceiverClassificationPostprocessor(config, in_channels=512),
}
)
self.perceiver = Model(
config,
input_preprocessor=input_preprocessor,
decoder=decoder,
output_postprocessor=output_postprocessor,
)
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
inputs=None,
attention_mask=None,
subsampled_output_points=None,
head_mask=None,
output_attentions=None,
output_hidden_states=None,
labels=None,
return_dict=None,
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.perceiver(
inputs=inputs,
attention_mask=attention_mask,
subsampled_output_points=subsampled_output_points,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
logits = outputs.logits if return_dict else outputs[0]
loss = None
if labels is not None:
raise NotImplementedError("Multimodal autoencoding training is not yet supported")
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return qo.LossCrosses(
loss=loss,
logits=logits,
hiddens=outputs.hiddens,
attns=outputs.attns,
crosses=outputs.crosses,
)
def build_position_encoding(
position_encoding_type,
out_channels=None,
project_pos_dim=-1,
trainable_position_encoding_kwargs=None,
fourier_position_encoding_kwargs=None,
):
if position_encoding_type == "trainable":
if not trainable_position_encoding_kwargs:
raise ValueError("Make sure to pass trainable_position_encoding_kwargs")
output_pos_enc = PerceiverTrainablePositionEncoding(**trainable_position_encoding_kwargs)
elif position_encoding_type == "fourier":
# We don't use the index_dims argument, as this is only known during the forward pass
if not fourier_position_encoding_kwargs:
raise ValueError("Make sure to pass fourier_position_encoding_kwargs")
output_pos_enc = PerceiverFourierPositionEncoding(**fourier_position_encoding_kwargs)
else:
raise ValueError(f"Unknown position encoding type: {position_encoding_type}.")
positions_projection = (
qc.Linear(out_channels, project_pos_dim) if project_pos_dim > 0 else nn.Identity()
)
return output_pos_enc, positions_projection
class PerceiverAbstractDecoder(qc.Module, metaclass=abc.ABCMeta):
@abc.abstractmethod
def decoder_query(
self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None
):
raise NotImplementedError
@property
@abc.abstractmethod
def num_query_channels(self):
raise NotImplementedError
@abc.abstractmethod
def forward(self, query, z, query_mask=None):
raise NotImplementedError
class PerceiverProjectionDecoder(PerceiverAbstractDecoder):
def __init__(self, config):
super().__init__()
self.classifier = qc.Linear(config.d_latents, config.n_labels)
def decoder_query(
self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None
):
return None
def forward(self, query, z, query_mask=None):
z = torch.mean(z, dim=1)
logits = self.classifier(z)
return logits
class PerceiverBasicDecoder(PerceiverAbstractDecoder):
def __init__(
self,
config,
output_num_channels,
position_encoding_type="trainable",
# The following 2 arguments are ignored if position_encoding_type == 'none':
output_index_dims=None,
num_channels=128,
subsampled_index_dims=None,
qk_channels=None,
v_channels=None,
n_heads=1,
widening_factor=1,
use_query_residual=False,
concat_preprocessed_input=False,
final_project=True,
position_encoding_only=False,
**position_encoding_kwargs,
):
super().__init__()
self.output_num_channels = output_num_channels
# If `none`, the decoder will not construct any position encodings.
# You should construct your own when quering the decoder.
self.output_position_encodings = None
self.position_encoding_type = position_encoding_type
self.position_encoding_kwargs = position_encoding_kwargs
if position_encoding_type != "none":
self.output_position_encodings, self.positions_projection = build_position_encoding(
position_encoding_type=position_encoding_type, **position_encoding_kwargs
)
self.output_index_dims = output_index_dims
self.num_channels = num_channels
if subsampled_index_dims is None:
subsampled_index_dims = output_index_dims
self.subsampled_index_dims = subsampled_index_dims
self.concat_preprocessed_input = concat_preprocessed_input
self.final_project = final_project
self.position_encoding_only = position_encoding_only
# for multimodal autoencoding, we don't need the decoder cross-attention and final layer
# so then we will set position_encoding_only to True
if not self.position_encoding_only:
self.decoding_cross_attention = Layer(
config,
is_cross_attention=True,
qk_channels=qk_channels,
v_channels=v_channels,
n_heads=n_heads,
q_dim=num_channels,
kv_dim=config.d_latents,
widening_factor=widening_factor,
use_query_residual=use_query_residual,
)
self.final_layer = (
qc.Linear(num_channels, output_num_channels) if final_project else nn.Identity()
)
@property
def num_query_channels(self):
if self.position_encoding_type == "none": # Queries come from elsewhere
raise ValueError(
"You cannot calculate number of decoder query channels when position_encoding_type is set to none"
)
if self.position_encoding_only:
if "project_pos_dim" in self.position_encoding_kwargs:
return self.position_encoding_kwargs["project_pos_dim"]
return self.output_position_encodings.output_size()
if self.final_project:
return self.output_num_channels
return self.num_channels
def decoder_query(
self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None
):
if self.position_encoding_type == "none": # Queries come from elsewhere
raise ValueError(
"You cannot construct decoder queries when position_encoding_type is set to none"
)
if subsampled_points is not None:
indices = list(
torch.from_numpy(x)
for x in np.unravel_index(subsampled_points.cpu(), self.output_index_dims)
)
pos = torch.stack(indices, dim=1)
batch_size = inputs.shape[0]
# Map these coordinates to [-1, 1]
pos = -1 + 2 * pos / torch.tensor(self.output_index_dims)[None, :]
pos = torch.broadcast_to(pos[None], [batch_size, pos.shape[0], pos.shape[1]])
# Construct the position encoding.
if self.position_encoding_type == "trainable":
pos_emb = self.output_position_encodings(batch_size)
elif self.position_encoding_type == "fourier":
pos_emb = self.output_position_encodings(
self.output_index_dims, batch_size=batch_size, device=inputs.device, pos=pos
)
pos_emb = self.positions_projection(pos_emb)
pos_emb = torch.reshape(pos_emb, [pos_emb.shape[0], -1, pos_emb.shape[-1]])
else:
batch_size = inputs.shape[0]
index_dims = inputs.shape[2:]
# Construct the position encoding.
if self.position_encoding_type == "trainable":
pos_emb = self.output_position_encodings(batch_size)
elif self.position_encoding_type == "fourier":
pos_emb = self.output_position_encodings(
index_dims, batch_size, device=inputs.device
)
pos_emb = self.positions_projection(pos_emb)
if self.concat_preprocessed_input:
if inputs_without_pos is None:
raise ValueError(
"Value is required for inputs_without_pos if concat_preprocessed_input is True"
)
pos_emb = torch.cat([inputs_without_pos, pos_emb], div=-1)
return pos_emb
def forward(self, query, z, query_mask=None, output_attentions=False):
crosses = () if output_attentions else None
layer_outputs = self.decoding_cross_attention(
query,
attention_mask=query_mask,
head_mask=None,
inputs=z,
inputs_mask=None,
output_attentions=output_attentions,
)
output = layer_outputs[0]
if output_attentions:
crosses = crosses + (layer_outputs[1],)
logits = self.final_layer(output)
return PerceiverDecoderOutput(logits=logits, crosses=crosses)
class PerceiverClassificationDecoder(PerceiverAbstractDecoder):
def __init__(self, config, **decoder_kwargs):
super().__init__()
self.n_labels = config.n_labels
self.decoder = PerceiverBasicDecoder(
config,
output_num_channels=self.n_labels,
output_index_dims=1, # Predict a single logit array.
**decoder_kwargs,
)
@property
def num_query_channels(self):
return self.decoder.num_query_channels
def decoder_query(
self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None
):
return self.decoder.decoder_query(
inputs, modality_sizes, inputs_without_pos, subsampled_points=subsampled_points
)
def forward(self, query, z, query_mask=None, output_attentions=False):
decoder_outputs = self.decoder(query, z, output_attentions=output_attentions)
logits = decoder_outputs.logits[:, 0, :]
return PerceiverDecoderOutput(logits=logits, crosses=decoder_outputs.crosses)
class PerceiverOpticalFlowDecoder(PerceiverAbstractDecoder):
def __init__(
self,
config,
output_image_shape,
output_num_channels=2,
rescale_factor=100.0,
**decoder_kwargs,
):
super().__init__()
self.output_image_shape = output_image_shape
self.output_num_channels = output_num_channels
self.rescale_factor = rescale_factor
self.decoder = PerceiverBasicDecoder(
config, output_num_channels=output_num_channels, **decoder_kwargs
)
@property
def num_query_channels(self):
return self.decoder.num_query_channels
def decoder_query(
self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None
):
if subsampled_points is not None:
raise ValueError("FlowDecoder doesn't support subsampling yet.")
return inputs
def forward(self, query, z, query_mask=None, output_attentions=False):
decoder_outputs = self.decoder(query, z, output_attentions=output_attentions)
preds = decoder_outputs.logits
# Output flow and rescale.
preds /= self.rescale_factor
preds = preds.reshape([preds.shape[0]] + list(self.output_image_shape) + [preds.shape[-1]])
return PerceiverDecoderOutput(logits=preds, crosses=decoder_outputs.crosses)
class PerceiverBasicVideoAutoencodingDecoder(PerceiverAbstractDecoder):
def __init__(self, config, output_shape, position_encoding_type, **decoder_kwargs):
super().__init__()
if len(output_shape) != 4: # B, T, H, W
raise ValueError(f"Expected rank 4 output_shape, got {output_shape}.")
# Build the decoder components:
self.output_shape = output_shape
self.output_num_channels = decoder_kwargs["output_num_channels"]
self.decoder = PerceiverBasicDecoder(
config,
output_index_dims=self.output_shape[1:4], # T*H*W
position_encoding_type=position_encoding_type,
**decoder_kwargs,
)
@property
def num_query_channels(self):
return self.decoder.num_query_channels
def decoder_query(
self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None
):
return self.decoder.decoder_query(
inputs,
modality_sizes=modality_sizes,
inputs_without_pos=inputs_without_pos,
subsampled_points=subsampled_points,
)
def forward(self, query, z, query_mask=None):
decoder_outputs = self.decoder(query, z)
logits = decoder_outputs.logits
logits = torch.reshape(logits, self.output_shape + [logits.shape[-1]])
return PerceiverDecoderOutput(logits=logits, crosses=decoder_outputs.crosses)
def restructure(modality_sizes, inputs):
outputs = {}
index = 0
# Apply a predictable ordering to the modalities
for modality in sorted(modality_sizes.keys()):
size = modality_sizes[modality]
inp = inputs[:, index : index + size]
index += size
outputs[modality] = inp
return outputs
class PerceiverMultimodalDecoder(PerceiverAbstractDecoder):
def __init__(
self,
config,
modalities,
num_outputs,
output_num_channels,
min_padding_size=2,
subsampled_index_dims=None,
**decoder_kwargs,
):
super().__init__()
self.modalities = nn.ModuleDict(modalities)
self.subsampled_index_dims = subsampled_index_dims
self.min_padding_size = min_padding_size
self.output_num_channels = output_num_channels
self.num_outputs = num_outputs
self.decoder = PerceiverBasicDecoder(
config,
output_index_dims=(num_outputs,),
output_num_channels=output_num_channels,
position_encoding_type="none",
num_channels=self.num_query_channels,
**decoder_kwargs,
)
self.padding = nn.ParameterDict(
{
modality: nn.Parameter(
torch.randn(1, self.num_query_channels - decoder.num_query_channels)
)
for modality, decoder in modalities.items()
}
)
@property
def num_query_channels(self):
max_channel_size = max(decoder.num_query_channels for _, decoder in self.modalities.items())
common_channel_size = max_channel_size + self.min_padding_size
return common_channel_size
def decoder_query(
self, inputs, modality_sizes, inputs_without_pos=None, subsampled_points=None
):
# Partition the flat inputs among the different modalities
inputs = restructure(modality_sizes, inputs)
# Obtain modality-specific decoders' queries
subsampled_points = subsampled_points or dict()
decoder_queries = dict()
for modality, decoder in self.modalities.items():
# Get input_without_pos for this modality if it exists.
input_without_pos = None
if inputs_without_pos is not None:
input_without_pos = inputs_without_pos.get(modality, None)
query = decoder.decoder_query(
inputs=inputs[modality],
modality_sizes=None,
inputs_without_pos=input_without_pos,
subsampled_points=subsampled_points.get(modality, None),
)
decoder_queries[modality] = query
# Pad all queries with trainable position encodings to make them have the same channels
def embed(modality, x):
x = torch.reshape(x, [x.shape[0], np.prod(x.shape[1:-1]), x.shape[-1]])
pos = self.padding[modality]
pos = torch.broadcast_to(
pos, [x.shape[0], x.shape[1], self.num_query_channels - x.shape[2]]
)
return torch.cat([x, pos], dim=2)
# Apply a predictable ordering to the modalities
return torch.cat(
[
embed(modality, decoder_queries[modality])
for modality in sorted(self.modalities.keys())
],
dim=1,
)
def forward(self, query, z, query_mask=None, output_attentions=False):
decoder_outputs = self.decoder(query, z, output_attentions=output_attentions)
return decoder_outputs
# Below: IO pre- and post-processor classes for Perceiver.
def space_to_depth(frames, temporal_block_size=1, spatial_block_size=1):
if len(frames.shape) == 4:
batch_size, num_channels, height, width = frames.shape
# split up dimensions (height by spatial_block_size, width by spatial_block_size)
frames = frames.view(
batch_size,
num_channels,
height // spatial_block_size,
spatial_block_size,
width // spatial_block_size,
spatial_block_size,
)
# move blocks to last dimension: (batch_size, H//bs, W//bs, bs, bs, C)
frames = frames.permute(0, 2, 4, 3, 5, 1).contiguous()
# concatenate blocks along channel dimension: (batch_size, H//bs, W//bs, bs*bs*C)
frames = frames.view(
batch_size,
height // spatial_block_size,
width // spatial_block_size,
(spatial_block_size**2) * num_channels,
)
return frames
elif len(frames.shape) == 5:
batch_size, time, num_channels, height, width = frames.shape
# split up dimensions (time by temporal_block_size, height by spatial_block_size, width by spatial_block_size)
frames = frames.view(
batch_size,
time // temporal_block_size,
temporal_block_size,
num_channels,
height // spatial_block_size,
spatial_block_size,
width // spatial_block_size,
spatial_block_size,
)
# move blocks to last dimension: (batch_size, T//ts, H//bs, W//bs, ts, bs, bs, C)
frames = frames.permute(0, 1, 4, 6, 2, 5, 7, 3).contiguous()
# concatenate blocks along channel dimension: (batch_size, T//ts, H//bs, W//bs, ts*bs*bs*C)
frames = frames.view(
batch_size,
time // temporal_block_size,
height // spatial_block_size,
width // spatial_block_size,
temporal_block_size * (spatial_block_size**2) * num_channels,
)
return frames
else:
raise ValueError(
"Frames should be of rank 4 (batch, channels, height, width)"
" or rank 5 (batch, time, channels, height, width)"
)
class Conv2dSamePadding(nn.Conv2d):
def __init__(self, *args, **kw):
super(Conv2dSamePadding, self).__init__(*args, **kw)
self.zero_pad_2d = nn.ZeroPad2d(
reduce(
__add__, [(k // 2 + (k - 2 * (k // 2)) - 1, k // 2) for k in self.kernel_size[::-1]]
)
)
def forward(self, input):
return self._conv_forward(self.zero_pad_2d(input), self.weight, self.bias)
class Conv2DDownsample(qc.Module):
def __init__(
self,
n_lays=1,
in_channels=3,
out_channels=64,
use_batchnorm=True,
):
super().__init__()
self.conv = Conv2dSamePadding(
in_channels=in_channels, out_channels=out_channels, kernel_size=7, stride=2, bias=False
)
self.batchnorm = (
nn.BatchNorm2d(num_features=out_channels) if use_batchnorm else nn.Identity()
)
self.relu = nn.ReLU()
self.max_pool = nn.MaxPool2d(kernel_size=3, stride=2)
def forward(self, inputs):
out = self.conv(inputs)
out = self.batchnorm(out)
out = self.relu(out)
out = self.max_pool(out)
return out
def generate_fourier_features(
pos, num_bands, max_resolution=(224, 224), concat_pos=True, sine_only=False
):
batch_size = pos.shape[0]
min_freq = 1.0
# Nyquist frequency at the target resolution:
freq_bands = torch.stack(
[torch.linspace(start=min_freq, end=res / 2, steps=num_bands) for res in max_resolution],
dim=0,
)
# Get frequency bands for each spatial dimension.
# Output is size [n, d * num_bands]
per_pos_features = pos[0, :, :][:, :, None] * freq_bands[None, :, :]
per_pos_features = torch.reshape(per_pos_features, [-1, np.prod(per_pos_features.shape[1:])])
if sine_only:
# Output is size [n, d * num_bands]
per_pos_features = torch.sin(np.pi * (per_pos_features))
else:
# Output is size [n, 2 * d * num_bands]
per_pos_features = torch.cat(
[torch.sin(np.pi * per_pos_features), torch.cos(np.pi * per_pos_features)], dim=-1
)
# Concatenate the raw input positions.
if concat_pos:
# Adds d bands to the encoding.
per_pos_features = torch.cat([pos, per_pos_features.expand(batch_size, -1, -1)], dim=-1)
return per_pos_features
def build_linear_positions(index_dims, output_range=(-1.0, 1.0)):
def _linspace(n_xels_per_dim):
return torch.linspace(
start=output_range[0], end=output_range[1], steps=n_xels_per_dim, dtype=torch.float32
)
dim_ranges = [_linspace(n_xels_per_dim) for n_xels_per_dim in index_dims]
array_index_grid = torch.meshgrid(*dim_ranges)
return torch.stack(array_index_grid, dim=-1)
class PerceiverAbstractPositionEncoding(qc.Module, metaclass=abc.ABCMeta):
@property
@abc.abstractmethod
def num_dimensions(self):
raise NotImplementedError
@abc.abstractmethod
def output_size(self, *args, **kw):
raise NotImplementedError
@abc.abstractmethod
def forward(self, batch_size, pos):
raise NotImplementedError
class PerceiverTrainablePositionEncoding(PerceiverAbstractPositionEncoding):
def __init__(self, index_dims, num_channels=128):
super().__init__()
self._num_channels = num_channels
self._index_dims = index_dims
index_dim = np.prod(index_dims)
self.position_embeddings = nn.Parameter(torch.randn(index_dim, num_channels))
@property
def num_dimensions(self):
if isinstance(self._index_dims, int):
return 1
return len(self._index_dims)
def output_size(self, *args, **kw):
return self._num_channels
def forward(self, batch_size):
position_embeddings = self.position_embeddings
if batch_size is not None:
position_embeddings = position_embeddings.expand(batch_size, -1, -1)
return position_embeddings
def _check_or_build_spatial_positions(pos, index_dims, batch_size):
if pos is None:
pos = build_linear_positions(index_dims)
pos = torch.broadcast_to(pos[None], (batch_size,) + pos.shape)
pos = torch.reshape(pos, [batch_size, np.prod(index_dims), -1])
else:
if pos.shape[-1] != len(index_dims):
raise ValueError("Spatial features have the wrong number of dimensions.")
return pos
class PerceiverFourierPositionEncoding(PerceiverAbstractPositionEncoding):
def __init__(self, num_bands, max_resolution, concat_pos=True, sine_only=False):
super().__init__()
self.num_bands = num_bands
self.max_resolution = max_resolution
self.concat_pos = concat_pos
self.sine_only = sine_only
@property
def num_dimensions(self):
return len(self.max_resolution)
def output_size(self):
"""Returns size of positional encodings last dimension."""
num_dims = len(self.max_resolution)
encoding_size = self.num_bands * num_dims
if not self.sine_only:
encoding_size *= 2
if self.concat_pos:
encoding_size += self.num_dimensions
return encoding_size
def forward(self, index_dims, batch_size, device, pos=None):
pos = _check_or_build_spatial_positions(pos, index_dims, batch_size)
fourier_pos_enc = generate_fourier_features(
pos,
num_bands=self.num_bands,
max_resolution=self.max_resolution,
concat_pos=self.concat_pos,
sine_only=self.sine_only,
).to(device)
return fourier_pos_enc
class AbstractPreprocessor(qc.Module):
@property
def num_channels(self):
raise NotImplementedError()
class PerceiverTextPreprocessor(AbstractPreprocessor):
def __init__(self, config):
super().__init__()
self.config = config
self.embeddings = qc.Embed(num_embeddings=config.s_vocab, embedding_dim=config.d_model)
self.position_embeddings = qc.Embed(config.n_pos, config.d_model)
@property
def num_channels(self):
return self.config.d_model
def forward(self, inputs):
embeddings = self.embeddings(inputs)
seq_length = inputs.shape[1]
position_ids = torch.arange(0, seq_length, device=inputs.device)
embeddings = embeddings + self.position_embeddings(position_ids)
return embeddings, None, None
class PerceiverEmbeddingDecoder(qc.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.s_vocab = config.s_vocab
self.bias = nn.Parameter(torch.zeros(self.s_vocab))
def forward(self, hiddens, embedding_layer):
batch_size, seq_len, d_model = hiddens.shape
output = torch.matmul(
hiddens.reshape([-1, d_model]), embedding_layer.weight.T
) # Flatten batch dim
output = output + self.bias
return output.reshape([batch_size, seq_len, self.s_vocab])
class PerceiverMultimodalPostprocessor(qc.Module):
def __init__(self, modalities, input_is_dict=False):
super().__init__()
self.modalities = nn.ModuleDict(modalities)
self.input_is_dict = input_is_dict
def forward(self, inputs, pos=None, modality_sizes=None):
if not self.input_is_dict:
# Slice up modalities by their sizes.
if modality_sizes is None:
raise ValueError("Modality sizes should be specified if input is not a dictionary.")
inputs = restructure(modality_sizes=modality_sizes, inputs=inputs)
outputs = {
modality: postprocessor(inputs[modality], pos=pos, modality_sizes=None)
for modality, postprocessor in self.modalities.items()
}
return outputs
class PerceiverClassificationPostprocessor(qc.Module):
def __init__(self, config, in_channels):
super().__init__()
self.classifier = qc.Linear(in_channels, config.n_labels)
def forward(self, inputs, pos=None, modality_sizes=None):
logits = self.classifier(inputs)
return logits[:, 0, :]
class PerceiverAudioPostprocessor(qc.Module):
def __init__(self, config, in_channels, postproc_type="patches"):
super().__init__()
if postproc_type not in ("patches",): # to be supported: 'conv', 'patches', 'pixels'
raise ValueError("Invalid postproc_type!")
# Architecture parameters:
self.classifier = qc.Linear(in_channels, config.samples_per_patch)
def forward(self, inputs, pos=None, modality_sizes=None):
logits = self.classifier(inputs)
return torch.reshape(logits, [inputs.shape[0], -1])
class PerceiverProjectionPostprocessor(qc.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.classifier = qc.Linear(in_channels, out_channels)
def forward(self, inputs, pos=None, modality_sizes=None):
logits = self.classifier(inputs)
return logits
class PerceiverImagePreprocessor(AbstractPreprocessor):
def __init__(
self,
config,
prep_type="conv",
spatial_downsample=4,
temporal_downsample=1,
position_encoding_type="fourier",
in_channels=3,
out_channels=64,
conv_after_patching=False,
conv_after_patching_in_channels=54, # only relevant when conv_after_patching = True
conv2d_use_batchnorm=True,
concat_or_add_pos="concat",
project_pos_dim=-1,
**position_encoding_kwargs,
):
super().__init__()
self.config = config
if prep_type not in ("conv", "patches", "pixels", "conv1x1"):
raise ValueError(f"Prep_type {prep_type} is invalid")
if concat_or_add_pos not in ["concat", "add"]:
raise ValueError(f"Invalid value {concat_or_add_pos} for concat_or_add_pos.")
self.in_channels = in_channels
self.prep_type = prep_type
self.spatial_downsample = spatial_downsample
self.temporal_downsample = temporal_downsample
self.position_encoding_type = position_encoding_type
self.concat_or_add_pos = concat_or_add_pos
self.conv_after_patching = conv_after_patching
self.out_channels = out_channels
if self.prep_type == "conv":
# Downsampling with conv is currently restricted
convnet_num_layers = math.log(spatial_downsample, 4)
convnet_num_layers_is_int = convnet_num_layers == np.round(convnet_num_layers)
if not convnet_num_layers_is_int or temporal_downsample != 1:
raise ValueError(
"Only powers of 4 expected for spatial and 1 expected for temporal downsampling with conv."
)
self.convnet = Conv2DDownsample(
in_channels=in_channels,
n_lays=int(convnet_num_layers),
out_channels=out_channels,
use_batchnorm=conv2d_use_batchnorm,
)
elif self.prep_type == "conv1x1":
if temporal_downsample != 1:
raise ValueError("Conv1x1 does not downsample in time.")
self.convnet_1x1 = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=(1, 1),
# spatial_downsample is unconstrained for 1x1 convolutions.
stride=(spatial_downsample, spatial_downsample),
)
# Position embeddings
self.project_pos_dim = project_pos_dim
self.position_embeddings, self.positions_projection = build_position_encoding(
position_encoding_type=position_encoding_type,
out_channels=out_channels,
project_pos_dim=project_pos_dim,
**position_encoding_kwargs,
)
self.conv_after_patches = (
qc.Linear(conv_after_patching_in_channels, self.out_channels)
if conv_after_patching
else nn.Identity()
)
@property
def num_channels(self):
is_temporal = self.position_embeddings.num_dimensions > 2
# position embedding
if self.project_pos_dim > 0:
pos_dim = self.project_pos_dim
else:
pos_dim = self.position_embeddings.output_size()
if self.concat_or_add_pos == "add":
return pos_dim
# inputs
if self.conv_after_patching or self.prep_type in ("conv1x1", "conv"):
inp_dim = self.out_channels
elif self.prep_type == "pixels":
inp_dim = self.in_channels
if not is_temporal:
inp_dim = math.ceil(inp_dim / self.spatial_downsample)
elif self.prep_type == "patches":
if self.conv_after_patching:
inp_dim = self.out_channels
else:
inp_dim = self.in_channels * self.spatial_downsample**2
if is_temporal:
inp_dim *= self.temporal_downsample
return inp_dim + pos_dim
def _build_network_inputs(self, inputs, pos, network_input_is_1d=True):
batch_size = inputs.shape[0]
index_dims = inputs.shape[1:-1]
indices = np.prod(index_dims)
# Flatten input features to a 1D index dimension if necessary.
if len(inputs.shape) > 3 and network_input_is_1d:
inputs = torch.reshape(inputs, [batch_size, indices, -1])
# Construct the position encoding.
if self.position_encoding_type == "trainable":
pos_enc = self.position_embeddings(batch_size)
elif self.position_encoding_type == "fourier":
pos_enc = self.position_embeddings(index_dims, batch_size, device=inputs.device)
pos_enc = self.positions_projection(pos_enc)
if not network_input_is_1d:
# Reshape pos to match the input feature shape
# if the network takes non-1D inputs
sh = inputs.shape
pos_enc = torch.reshape(pos_enc, list(sh)[:-1] + [-1])
if self.concat_or_add_pos == "concat":
inputs_with_pos = torch.cat([inputs, pos_enc], dim=-1)
elif self.concat_or_add_pos == "add":
inputs_with_pos = inputs + pos_enc
return inputs_with_pos, inputs
def forward(
self,
inputs,
pos=None,
network_input_is_1d=True,
):
if self.prep_type == "conv":
# Convnet image featurization.
# Downsamples spatially by a factor of 4
inputs = self.convnet(inputs)
elif self.prep_type == "conv1x1":
# map inputs to self.out_channels
inputs = self.convnet_1x1(inputs)
elif self.prep_type == "pixels":
# if requested, downsamples in the crudest way
if inputs.ndim == 4:
inputs = inputs[:: self.spatial_downsample, :: self.spatial_downsample]
elif inputs.ndim == 5:
inputs = inputs[
:,
:: self.temporal_downsample,
:,
:: self.spatial_downsample,
:: self.spatial_downsample,
]
else:
raise ValueError("Unsupported data format for pixels.")
elif self.prep_type == "patches":
# Space2depth featurization.
# Video: B x T x C x H x W
inputs = space_to_depth(
inputs,
temporal_block_size=self.temporal_downsample,
spatial_block_size=self.spatial_downsample,
)
if inputs.ndim == 5 and inputs.shape[1] == 1:
# for flow
inputs = inputs.squeeze(dim=1)
inputs = self.conv_after_patches(inputs)
if self.prep_type != "patches":
# move channels to last dimension, as the _build_network_inputs method below expects this
if inputs.ndim == 4:
inputs = torch.moveaxis(inputs, 1, -1)
elif inputs.ndim == 5:
inputs = torch.moveaxis(inputs, 2, -1)
else:
raise ValueError("Unsupported data format for conv1x1.")
inputs, inputs_without_pos = self._build_network_inputs(inputs, pos, network_input_is_1d)
modality_sizes = None # Size for each modality, only needed for multimodal
return inputs, modality_sizes, inputs_without_pos
class PerceiverOneHotPreprocessor(AbstractPreprocessor):
def __init__(self, config):
super().__init__()
self.config = config
@property
def num_channels(self):
return self.config.n_labels
def forward(
self,
inputs,
pos=None,
network_input_is_1d=True,
):
inputs = inputs[:, None, :]
return inputs, None, inputs
class PerceiverAudioPreprocessor(AbstractPreprocessor):
def __init__(
self,
config,
prep_type="patches",
samples_per_patch=96,
position_encoding_type="fourier",
concat_or_add_pos="concat",
out_channels=64,
project_pos_dim=-1,
**position_encoding_kwargs,
):
super().__init__()
self.config = config
if prep_type not in ("patches",):
raise ValueError(f"Prep_type {prep_type} is invalid, can only be 'patches'.")
if concat_or_add_pos not in ["concat", "add"]:
raise ValueError(
f"Concat_or_pos {concat_or_add_pos} is invalid, can only be 'concat' or 'add'."
)
self.samples_per_patch = samples_per_patch
self.position_encoding_type = position_encoding_type
self.concat_or_add_pos = concat_or_add_pos
self.project_pos_dim = project_pos_dim
# Position embeddings
self.position_embeddings, self.positions_projection = build_position_encoding(
position_encoding_type=position_encoding_type,
out_channels=out_channels,
project_pos_dim=project_pos_dim,
**position_encoding_kwargs,
)
@property
def num_channels(self):
# position embedding
if self.project_pos_dim > 0:
pos_dim = self.project_pos_dim
else:
pos_dim = self.position_embeddings.output_size()
if self.concat_or_add_pos == "add":
return pos_dim
return self.samples_per_patch + pos_dim
def _build_network_inputs(self, inputs, pos):
batch_size = inputs.shape[0]
index_dims = inputs.shape[1:-1]
# Construct the position encoding.
if self.position_encoding_type == "trainable":
pos_enc = self.position_embeddings(batch_size)
elif self.position_encoding_type == "fourier":
pos_enc = self.position_embeddings(index_dims, batch_size, device=inputs.device)
pos_enc = self.positions_projection(pos_enc)
if self.concat_or_add_pos == "concat":
inputs_with_pos = torch.cat([inputs, pos_enc], dim=-1)
elif self.concat_or_add_pos == "add":
inputs_with_pos = inputs + pos_enc
return inputs_with_pos, inputs
def forward(
self,
inputs,
pos=None,
network_input_is_1d=True,
):
inputs = torch.reshape(inputs, [inputs.shape[0], -1, self.samples_per_patch])
inputs, inputs_without_pos = self._build_network_inputs(inputs, pos)
modality_sizes = None # Size for each modality, only needed for multimodal
return inputs, modality_sizes, inputs_without_pos
class PerceiverMultimodalPreprocessor(AbstractPreprocessor):
def __init__(
self,
modalities,
mask_probs=None,
min_padding_size=2,
):
super().__init__()
self.modalities = modalities
self.min_padding_size = min_padding_size
self.mask_probs = mask_probs if mask_probs is not None else dict()
self.padding = nn.ParameterDict(
{
modality: nn.Parameter(
torch.randn(1, self.num_channels - preprocessor.num_channels)
)
for modality, preprocessor in modalities.items()
}
)
self.mask = nn.ParameterDict(
{
modality: nn.Parameter(torch.randn(1, self.num_channels))
for modality, _ in self.mask_probs.items()
}
)
@property
def num_channels(self):
max_channel_size = max(processor.num_channels for _, processor in self.modalities.items())
common_channel_size = max_channel_size + self.min_padding_size
return common_channel_size
def forward(
self,
inputs,
pos=None,
network_input_is_1d=True,
):
padded = {}
modality_sizes = {}
inputs_without_pos = {}
for modality, preprocessor in self.modalities.items():
# preprocess each modality using the respective preprocessor.
output, _, inputs_without_pos[modality] = preprocessor(
inputs[modality], pos=pos, network_input_is_1d=network_input_is_1d
)
# pad to the same common_channel_size.
batch_size, num_samples, num_channels = output.shape
pos_enc = self.padding[modality].expand(batch_size, -1, -1)
padding = torch.broadcast_to(
pos_enc,
[batch_size, num_samples, self.num_channels - num_channels],
)
output_padded = torch.cat([output, padding], dim=2)
# mask if required
if modality in self.mask_probs:
msk = self.mask[modality].expand(batch_size, -1, -1)
mask_prob = self.mask_probs[modality]
mask = torch.bernoulli(torch.full([batch_size, num_samples], mask_prob))
mask = torch.unsqueeze(mask, dim=2).to(msk.device)
output_padded = (1 - mask) * output_padded + mask * msk
padded[modality] = output_padded
modality_sizes[modality] = output_padded.shape[1]
# Apply a predictable ordering to the modalities
padded_ls = [padded[k] for k in sorted(padded.keys())]
# Finally, concatenate along the time dimension
final_inputs = torch.cat(padded_ls, dim=1)
return final_inputs, modality_sizes, inputs_without_pos
| 35.754944 | 134 | 0.615564 |
5bff93a0dbeafa219f3fca6a16660be815dcc395 | 3,434 | py | Python | mail_fetch.py | Edison-Hsu/12306Calendar | e4303db3777508c6be90fdbcf372f2437121e570 | [
"MIT"
] | 6 | 2020-11-02T10:08:51.000Z | 2022-01-18T06:41:34.000Z | mail_fetch.py | Edison-Hsu/12306Calendar | e4303db3777508c6be90fdbcf372f2437121e570 | [
"MIT"
] | null | null | null | mail_fetch.py | Edison-Hsu/12306Calendar | e4303db3777508c6be90fdbcf372f2437121e570 | [
"MIT"
] | 4 | 2020-11-18T14:53:41.000Z | 2022-01-18T06:41:36.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from calendar_generate import CalendarGenerate
from calendar_resovle import CalendarResovle
from mail_resovle import MailResovle
import poplib, os, re
class MailFetch:
def __init__(self, user_email, password, pop3_server):
if len(pop3_server) == 0:
pop3_server = 'pop.' + re.split('@',user_email)[-1]
self.user_email = user_email
self.password = password
self.pop3_server = pop3_server
self.server = None
def get_mails(self) -> list:
if self.server == None:
return []
mail_model_list = self.resovle_all_mails()
mail_model_list = self.filter_validate_mail(mail_model_list)
self.stop_server()
return mail_model_list
# 已排好序: 旧 -> 新
def resovle_all_mails(self) -> list:
if self.server == None:
return []
server = self.server
_, mails, _ = server.list() # list()返回所有邮件的编号
if len(mails) == 0:
return []
mail_model_list = []
for index in range(1, len(mails) + 1): # 注意索引号从1开始
_, lines, _ = server.retr(index) # lines存储了邮件的原始文本的每一行,
if len(lines) == 0:
continue
msg_content = b'\r\n'.join(lines).decode('utf-8')
mail_model = MailResovle().resovle_to_mail(msg_content) # 解析出邮件
if len(mail_model):
mail_model_list.append(mail_model)
return mail_model_list
def login(self):
try:
poplib._MAXLINE=20480
# 连接到POP3服务器:
server = poplib.POP3_SSL(self.pop3_server, 995)
server.user(self.user_email)
server.pass_(self.password)
self.server = server
return True
except Exception:
server = None
return False
def stop_server(self):
if self.server == None:
return
self.server.quit() # 关闭连接
def filter_validate_mail(self, mail_model_list):
target_list = []
for mail in mail_model_list:
need_remove_mail = []
for target in target_list:
if target['order_id'] == mail['order_id']:
need_remove_mail.append(target)
continue
if len(need_remove_mail) > 0:
target_list.remove(need_remove_mail[0])
if mail["order_type"] == "delete":
continue
target_list.append(mail)
return target_list
if __name__ == '__main__':
user_email = ''
password = '' # 这个密码不是邮箱登录密码,是pop3服务密码
pop3_server = 'pop.qq.com'
mail_fetch = MailFetch(user_email, password, pop3_server)
alarm_before_hour = 2
if mail_fetch.login():
print("登录成功")
mail_model_list = mail_fetch.get_mails()
DATABASE_DIR_PATH = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + os.path.sep + "./database")
calendarHelper = CalendarGenerate('12306', DATABASE_DIR_PATH + '/'+ user_email +'.ics')
for mail in mail_model_list:
event_id = mail['order_id']
event_title, event_start, event_description = CalendarResovle().generate_calendar_model(mail)
calendarHelper.add_event(event_id, event_title, event_start, event_start, event_description, alarm_before_hour)
calendarHelper.save_ics()
else:
print("登录失败") | 36.147368 | 123 | 0.598719 |
9234b02d346326610ecb07d024d57dd6febf877b | 80 | py | Python | openpay/__init__.py | openpaygithub/PythonSDK | 13fe155fe9c9f72c3ba575e6ee65e3bba16037b7 | [
"MIT"
] | null | null | null | openpay/__init__.py | openpaygithub/PythonSDK | 13fe155fe9c9f72c3ba575e6ee65e3bba16037b7 | [
"MIT"
] | 3 | 2021-03-31T18:59:15.000Z | 2021-12-13T19:54:07.000Z | openpay/__init__.py | openpaygithub/PythonSDK | 13fe155fe9c9f72c3ba575e6ee65e3bba16037b7 | [
"MIT"
] | 1 | 2019-01-22T13:37:58.000Z | 2019-01-22T13:37:58.000Z | from .new_checkout import (Client, Merchant)
__all__ = ['Client', 'Merchant']
| 16 | 44 | 0.7125 |
557f782d9278ed5990bdd031ed182cea881bd3ca | 5,371 | py | Python | tests/test_magicgraph.py | dihuang0220/magic-graph-py3 | 391abb36491a1b8e3160a27da55cce0e2eaa098e | [
"BSD-3-Clause"
] | 2 | 2019-07-15T20:16:22.000Z | 2019-07-16T02:56:01.000Z | tests/test_magicgraph.py | dihuang0220/magic-graph-py3 | 391abb36491a1b8e3160a27da55cce0e2eaa098e | [
"BSD-3-Clause"
] | null | null | null | tests/test_magicgraph.py | dihuang0220/magic-graph-py3 | 391abb36491a1b8e3160a27da55cce0e2eaa098e | [
"BSD-3-Clause"
] | null | null | null | import unittest
import magicgraph
import random
from magicgraph.generators import clique
class TestDiGraph(unittest.TestCase):
def test_nodes(self):
network = clique(5)
self.assertEqual(set(range(1, 6)).difference(network.nodes()), set())
def test_adjacency_iter(self):
network = clique(3)
self.assertEqual([x for x in network.adjacency_iter()], [(1, [2, 3]), (2, [1, 3]), (3,[1, 2])])
def test_subgraph(self):
network = clique(5)
self.assertEqual(network.subgraph({1, 2}), {1: [2], 2: [1]})
def test_make_undirected(self):
network = clique(5)
network[4] = []
network.make_undirected()
self.assertTrue(network == {1: [2, 3, 4, 5], 2: [1, 3, 4, 5], 3: [1, 2, 4, 5], 4: [1, 2, 3, 5], 5: [1, 2, 3, 4]})
def test_make_consistent(self):
network = clique(5)
network[5].extend([1, 2, 3, 4])
self.assertNotEqual(network[5], [1, 2, 3, 4])
network.make_consistent()
self.assertEqual(network[5], [1, 2, 3, 4])
def test_remove_self_loops(self):
network = clique(5)
network[5].append(5)
network.remove_self_loops()
self.assertEqual(network[5], [1, 2, 3, 4])
def test_check_self_loops(self):
network = clique(5)
self.assertFalse(network.check_self_loops())
network[5].append(5)
self.assertTrue(network.check_self_loops())
def test_has_edge(self):
network = clique(5)
self.assertTrue(network.has_edge(1, 5))
self.assertFalse(network.has_edge(1, 6))
self.assertFalse(network.has_edge(6, 1))
def test_degree(self):
network = clique(5)
self.assertEqual(network.degree(1), 4)
self.assertEqual(network.degree([1, 2, 3]), {1: 4, 2: 4, 3: 4})
def test_order(self):
network = clique(5)
self.assertEqual(network.order(), 5)
def test_number_of_edges(self):
network = clique(5)
self.assertEqual(network.number_of_edges(), 4 + 3 + 2 + 1)
network = clique(4)
self.assertEqual(network.number_of_edges(), 3 + 2 + 1)
def test_number_of_nodes(self):
network = clique(5)
self.assertEqual(network.number_of_nodes(), 5)
class TestWeightedNode(unittest.TestCase):
def test_append(self):
node = magicgraph.WeightedNode()
self.assertEqual(node.weights, [])
node.append(1, 1.0)
self.assertEqual(node, [1])
self.assertEqual(node.weight(0), 1.0)
def test_extend(self):
node = magicgraph.WeightedNode()
self.assertEqual(node.weights, [])
node.extend([1, 2, 3, 4], [1., 0.5, 0.25, 0.125])
self.assertEqual(node, [1, 2, 3, 4])
self.assertEqual(node.weights, [1., 0.5, 0.25, 0.125])
def test_pop(self):
node = magicgraph.WeightedNode()
node.extend([1, 2, 3, 4], [1., 0.5, 0.25, 0.125])
dst_removed, weight_removed = node.pop(2)
self.assertEqual(dst_removed, 3)
self.assertEqual(weight_removed, 0.25)
self.assertEqual(node, [1, 2, 4])
self.assertEqual(node.weights, [1., 0.5, 0.125])
def test_remove(self):
node = magicgraph.WeightedNode()
node.extend([1, 2, 3, 4], [1., 0.5, 0.25, 0.125])
node.remove(3)
self.assertEqual(node, [1, 2, 4])
self.assertEqual(node.weights, [1., 0.5, 0.125])
def test_choice(self):
node = magicgraph.WeightedNode()
node.extend([1, 2, 3, 4], [1., 0.5, 0.25, 0.125])
rand = random.Random(0)
times_chose = {x: 0 for x in node}
for x in range(0, 100):
times_chose[node.choice(rand)] += 1
self.assertLess(times_chose[2], times_chose[1])
self.assertLess(times_chose[3], times_chose[2])
self.assertLess(times_chose[4], times_chose[3])
class TestWeightedDiGraph(unittest.TestCase):
def test_random_walk(self):
network = magicgraph.WeightedDiGraph()
network[1].extend([2, 3, 4, 5], [1., 1., 1., 0.1])
network[2].extend([1, 3, 4, 5], [1., 1., 1., 0.1])
network[3].extend([1, 2, 4, 5], [1., 1., 1., 0.1])
network[4].extend([1, 2, 3, 5], [1., 1., 1., 0.1])
network[5].extend([1, 2, 3, 4], [0.1, 0.1, 0.1, 0.1])
small_walk = network.random_walk(10, start=1)
self.assertTrue(5 not in small_walk)
long_walk = network.random_walk(1000, start=1)
self.assertTrue(5 in long_walk)
times_chose = {x: 0 for x in network}
for x in long_walk:
times_chose[x] += 1
# 5 shouldn't be chosen too often
self.assertLess(times_chose[5], times_chose[1])
self.assertLess(times_chose[5], times_chose[2])
self.assertLess(times_chose[5], times_chose[3])
self.assertLess(times_chose[5], times_chose[4])
def test_make_consistent(self):
network = magicgraph.WeightedDiGraph()
network[1].extend([2, 3, 4, 5], [1., 2., 3., 4])
network[1].extend([2,5], [1., 4])
network[2].extend([3, 4, 5], [1., 1., 1.])
assert(network[1] == [2,3,4,5,2,5])
network.make_consistent()
assert(network[1] == [2,3,4,5])
def test_make_undirected(self):
network = magicgraph.WeightedDiGraph()
network[1].extend([2, 3, 4, 5], [1., 2., 3., 4])
assert(network[1] == [2,3,4,5])
network.make_undirected()
assert(network[1] == [2,3,4,5])
assert(network[2] == [1])
assert(network[3] == [1])
assert(network[4] == [1])
assert(network[5] == [1])
assert(network[2].weight(0) == 1)
assert(network[3].weight(0) == 2)
assert(network[4].weight(0) == 3)
assert(network[5].weight(0) == 4)
if __name__ == '__main__':
unittest.main()
| 27.685567 | 117 | 0.62372 |
d5ee8accb3ceb5651bbda2a61b3db10f7d81f641 | 39 | py | Python | my_plugin/tests/__init__.py | FragmentedPacket/test-cookiecutter | 61b8b723a3081a5981ff63b56628f07b0d9fbc7b | [
"Apache-2.0"
] | null | null | null | my_plugin/tests/__init__.py | FragmentedPacket/test-cookiecutter | 61b8b723a3081a5981ff63b56628f07b0d9fbc7b | [
"Apache-2.0"
] | null | null | null | my_plugin/tests/__init__.py | FragmentedPacket/test-cookiecutter | 61b8b723a3081a5981ff63b56628f07b0d9fbc7b | [
"Apache-2.0"
] | null | null | null | """Unit tests for my_plugin plugin."""
| 19.5 | 38 | 0.692308 |
88632c3cc304ecf70bbdf83fc62bd1ee2b3012d7 | 4,826 | py | Python | pytorch_toolkit/person_reidentification/main.py | AnastasiaaSenina/openvino_training_extensions | 267425d64372dff5b9083dc0ca6abfc305a71449 | [
"Apache-2.0"
] | 1 | 2020-02-09T15:50:49.000Z | 2020-02-09T15:50:49.000Z | pytorch_toolkit/person_reidentification/main.py | AnastasiaaSenina/openvino_training_extensions | 267425d64372dff5b9083dc0ca6abfc305a71449 | [
"Apache-2.0"
] | null | null | null | pytorch_toolkit/person_reidentification/main.py | AnastasiaaSenina/openvino_training_extensions | 267425d64372dff5b9083dc0ca6abfc305a71449 | [
"Apache-2.0"
] | null | null | null | """
MIT License
Copyright (c) 2018 Kaiyang Zhou
"""
"""
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import os
import os.path as osp
import time
import argparse
import torch
import torch.nn as nn
from config.default_config import (
get_default_config, imagedata_kwargs, videodata_kwargs,
optimizer_kwargs, lr_scheduler_kwargs, engine_run_kwargs
)
import torchreid
from torchreid.utils import (
Logger, set_random_seed, check_isfile, resume_from_checkpoint,
load_pretrained_weights, compute_model_complexity, collect_env_info
)
from data.datamanager import ImageDataManagerWithTransforms
from engine.builder import build_engine
from engine.schedulers.lr_scheduler import build_lr_scheduler
from models.builder import build_model
from models.openvino_wrapper import OpenVINOModel
def build_datamanager(cfg):
if cfg.data.type == 'image':
return ImageDataManagerWithTransforms(**imagedata_kwargs(cfg))
else:
return torchreid.data.VideoDataManager(**videodata_kwargs(cfg))
def reset_config(cfg, args):
if args.root:
cfg.data.root = args.root
if args.sources:
cfg.data.sources = args.sources
if args.targets:
cfg.data.targets = args.targets
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--config-file', type=str, default='', help='path to config file')
parser.add_argument('-s', '--sources', type=str, nargs='+', help='source datasets (delimited by space)')
parser.add_argument('-t', '--targets', type=str, nargs='+', help='target datasets (delimited by space)')
parser.add_argument('--root', type=str, default='', help='path to data root')
parser.add_argument('opts', default=None, nargs=argparse.REMAINDER,
help='Modify config options using the command-line')
args = parser.parse_args()
cfg = get_default_config()
cfg.use_gpu = torch.cuda.is_available()
if args.config_file:
cfg.merge_from_file(args.config_file)
reset_config(cfg, args)
cfg.merge_from_list(args.opts)
cfg.freeze()
set_random_seed(cfg.train.seed)
log_name = 'test.log' if cfg.test.evaluate else 'train.log'
log_name += time.strftime('-%Y-%m-%d-%H-%M-%S')
sys.stdout = Logger(osp.join(cfg.data.save_dir, log_name))
print('Show configuration\n{}\n'.format(cfg))
print('Collecting env info ...')
print('** System info **\n{}\n'.format(collect_env_info()))
if cfg.use_gpu:
torch.backends.cudnn.benchmark = True
datamanager = build_datamanager(cfg)
print('Building model: {}'.format(cfg.model.name))
model = build_model(
name=cfg.model.name,
num_classes=datamanager.num_train_pids,
loss=cfg.loss.name,
pretrained=cfg.model.pretrained,
use_gpu=cfg.use_gpu,
dropout_prob=cfg.model.dropout_prob,
feature_dim=cfg.model.feature_dim,
fpn=cfg.model.fpn,
fpn_dim=cfg.model.fpn_dim,
gap_as_conv=cfg.model.gap_as_conv,
input_size=(cfg.data.height, cfg.data.width),
IN_first=cfg.model.IN_first
)
num_params, flops = compute_model_complexity(model, (1, 3, cfg.data.height, cfg.data.width))
print('Model complexity: params={:,} flops={:,}'.format(num_params, flops))
if cfg.model.load_weights and check_isfile(cfg.model.load_weights):
load_pretrained_weights(model, cfg.model.load_weights)
if cfg.use_gpu:
model = nn.DataParallel(model).cuda()
optimizer = torchreid.optim.build_optimizer(model, **optimizer_kwargs(cfg))
scheduler = build_lr_scheduler(optimizer, **lr_scheduler_kwargs(cfg))
if cfg.model.resume and check_isfile(cfg.model.resume):
args.start_epoch = resume_from_checkpoint(cfg.model.resume, model, optimizer=optimizer)
if len(cfg.model.openvino.name):
openvino_model = OpenVINOModel(cfg.model.openvino.name, cfg.model.openvino.cpu_extension)
else:
openvino_model = None
print('Building {}-engine for {}-reid'.format(cfg.loss.name, cfg.data.type))
engine = build_engine(cfg, datamanager, model, optimizer, scheduler, openvino_model=openvino_model)
engine.run(**engine_run_kwargs(cfg))
if __name__ == '__main__':
main()
| 34.719424 | 108 | 0.717157 |
1573d001d141534c722978352c920b2749e72a3d | 7,114 | py | Python | mmdet/core/data_structures/instance_data.py | mrzhuzhe/mmdetection | c04ca2c2a65500bc248a5d2ab6ace5b15f00064d | [
"Apache-2.0"
] | null | null | null | mmdet/core/data_structures/instance_data.py | mrzhuzhe/mmdetection | c04ca2c2a65500bc248a5d2ab6ace5b15f00064d | [
"Apache-2.0"
] | null | null | null | mmdet/core/data_structures/instance_data.py | mrzhuzhe/mmdetection | c04ca2c2a65500bc248a5d2ab6ace5b15f00064d | [
"Apache-2.0"
] | null | null | null | # Copyright (c) OpenMMLab. All rights reserved.
import itertools
import numpy as np
import torch
from .general_data import GeneralData
class InstanceData(GeneralData):
"""Data structure for instance-level annnotations or predictions.
Subclass of :class:`GeneralData`. All value in `data_fields`
should have the same length. This design refer to
https://github.com/facebookresearch/detectron2/blob/master/detectron2/structures/instances.py # noqa E501
Examples:
>>> from mmdet.core import InstanceData
>>> import numpy as np
>>> img_meta = dict(img_shape=(800, 1196, 3), pad_shape=(800, 1216, 3))
>>> results = InstanceData(img_meta)
>>> img_shape in results
True
>>> results.det_labels = torch.LongTensor([0, 1, 2, 3])
>>> results["det_scores"] = torch.Tensor([0.01, 0.7, 0.6, 0.3])
>>> results["det_masks"] = np.ndarray(4, 2, 2)
>>> len(results)
4
>>> print(resutls)
<InstanceData(
META INFORMATION
pad_shape: (800, 1216, 3)
img_shape: (800, 1196, 3)
PREDICTIONS
shape of det_labels: torch.Size([4])
shape of det_masks: (4, 2, 2)
shape of det_scores: torch.Size([4])
) at 0x7fe26b5ca990>
>>> sorted_results = results[results.det_scores.sort().indices]
>>> sorted_results.det_scores
tensor([0.0100, 0.3000, 0.6000, 0.7000])
>>> sorted_results.det_labels
tensor([0, 3, 2, 1])
>>> print(results[results.scores > 0.5])
<InstanceData(
META INFORMATION
pad_shape: (800, 1216, 3)
img_shape: (800, 1196, 3)
PREDICTIONS
shape of det_labels: torch.Size([2])
shape of det_masks: (2, 2, 2)
shape of det_scores: torch.Size([2])
) at 0x7fe26b6d7790>
>>> results[results.det_scores > 0.5].det_labels
tensor([1, 2])
>>> results[results.det_scores > 0.5].det_scores
tensor([0.7000, 0.6000])
"""
def __setattr__(self, name, value):
if name in ('_meta_info_fields', '_data_fields'):
if not hasattr(self, name):
super().__setattr__(name, value)
else:
raise AttributeError(
f'{name} has been used as a '
f'private attribute, which is immutable. ')
else:
assert isinstance(value, (torch.Tensor, np.ndarray, list)), \
f'Can set {type(value)}, only support' \
f' {(torch.Tensor, np.ndarray, list)}'
if self._data_fields:
assert len(value) == len(self), f'the length of ' \
f'values {len(value)} is ' \
f'not consistent with' \
f' the length ' \
f'of this :obj:`InstanceData` ' \
f'{len(self)} '
super().__setattr__(name, value)
def __getitem__(self, item):
"""
Args:
item (str, obj:`slice`,
obj`torch.LongTensor`, obj:`torch.BoolTensor`):
get the corresponding values according to item.
Returns:
obj:`InstanceData`: Corresponding values.
"""
assert len(self), ' This is a empty instance'
assert isinstance(
item, (str, slice, int, torch.LongTensor, torch.BoolTensor))
if isinstance(item, str):
return getattr(self, item)
if type(item) == int:
if item >= len(self) or item < -len(self):
raise IndexError(f'Index {item} out of range!')
else:
# keep the dimension
item = slice(item, None, len(self))
new_data = self.new()
if isinstance(item, (torch.Tensor)):
assert item.dim() == 1, 'Only support to get the' \
' values along the first dimension.'
if isinstance(item, torch.BoolTensor):
assert len(item) == len(self), f'The shape of the' \
f' input(BoolTensor)) ' \
f'{len(item)} ' \
f' does not match the shape ' \
f'of the indexed tensor ' \
f'in results_filed ' \
f'{len(self)} at ' \
f'first dimension. '
for k, v in self.items():
if isinstance(v, torch.Tensor):
new_data[k] = v[item]
elif isinstance(v, np.ndarray):
new_data[k] = v[item.cpu().numpy()]
elif isinstance(v, list):
r_list = []
# convert to indexes from boolTensor
if isinstance(item, torch.BoolTensor):
indexes = torch.nonzero(item).view(-1)
else:
indexes = item
for index in indexes:
r_list.append(v[index])
new_data[k] = r_list
else:
# item is a slice
for k, v in self.items():
new_data[k] = v[item]
return new_data
@staticmethod
def cat(instances_list):
"""Concat the predictions of all :obj:`InstanceData` in the list.
Args:
instances_list (list[:obj:`InstanceData`]): A list
of :obj:`InstanceData`.
Returns:
obj:`InstanceData`
"""
assert all(
isinstance(results, InstanceData) for results in instances_list)
assert len(instances_list) > 0
if len(instances_list) == 1:
return instances_list[0]
new_data = instances_list[0].new()
for k in instances_list[0]._data_fields:
values = [results[k] for results in instances_list]
v0 = values[0]
if isinstance(v0, torch.Tensor):
values = torch.cat(values, dim=0)
elif isinstance(v0, np.ndarray):
values = np.concatenate(values, axis=0)
elif isinstance(v0, list):
values = list(itertools.chain(*values))
else:
raise ValueError(
f'Can not concat the {k} which is a {type(v0)}')
new_data[k] = values
return new_data
def __len__(self):
if len(self._data_fields):
for v in self.values():
return len(v)
else:
raise AssertionError('This is an empty `InstanceData`.')
| 37.640212 | 110 | 0.480461 |
40ed75d540a6a729d708798eb27aee454687e0e7 | 1,710 | py | Python | sdk/core/azure-core/azure/core/__init__.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 10 | 2021-05-31T07:18:08.000Z | 2022-03-19T09:20:11.000Z | sdk/core/azure-core/azure/core/__init__.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 226 | 2019-07-24T07:57:21.000Z | 2019-10-15T01:07:24.000Z | sdk/core/azure-core/azure/core/__init__.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 2 | 2020-05-21T22:51:22.000Z | 2020-05-26T20:53:01.000Z | # --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
from ._version import VERSION
__version__ = VERSION
from ._pipeline_client import PipelineClient
from ._match_conditions import MatchConditions
__all__ = [
"PipelineClient",
"MatchConditions"
]
try:
from ._pipeline_client_async import AsyncPipelineClient #pylint: disable=unused-import
__all__.extend(["AsyncPipelineClient"])
except (ImportError, SyntaxError): # Python <= 3.5
pass
| 38.863636 | 90 | 0.702339 |
c63cff1ef99441b5def867eaff65a6fc9ab6b093 | 1,850 | py | Python | main.py | emppu-dev/encrypt-decrypt | 84800b51d9033603d57b8f7455ee96b91be03e8b | [
"MIT"
] | null | null | null | main.py | emppu-dev/encrypt-decrypt | 84800b51d9033603d57b8f7455ee96b91be03e8b | [
"MIT"
] | null | null | null | main.py | emppu-dev/encrypt-decrypt | 84800b51d9033603d57b8f7455ee96b91be03e8b | [
"MIT"
] | null | null | null | from cryptography.fernet import Fernet
import json
import os
print("[1] - Encrypt")
print("[2] - Decrypt")
print("[3] - Key options")
valinta = str(input("emppu.cc @> "))
with open('config.json') as f:
config = json.load(f)
key = config.get('key')
if valinta == "1":
os.system("cls")
print("Encrypt")
tiedosto = str(input("File: "))
f = Fernet(key)
with open(tiedosto, 'rb') as original_file:
original = original_file.read()
encrypted = f.encrypt(original)
tiedosto2 = str('enc_')+str(tiedosto)
with open(tiedosto2,'wb') as encrypted_file:
encrypted_file.write(encrypted)
elif valinta == "2":
os.system("cls")
print("Decrypt")
tiedosto = str(input("File: "))
f = Fernet(key)
with open(tiedosto, 'rb') as encrypted_file:
encrypted = encrypted_file.read()
try:
decrypted = f.decrypt(encrypted)
if tiedosto[:4] == "enc_":
tiedosto = tiedosto[4:]
tiedosto2 = str('dec_')+str(tiedosto)
with open(tiedosto2, 'wb') as decrypted_file:
decrypted_file.write(decrypted)
except:
print("[-] Wrong key")
elif valinta == "3":
os.system("cls")
print("[1] - Generate a new key")
print("[2] - Change the key manually")
valinta2 = input("emppu.cc @> ")
if valinta2 == "1":
os.system("cls")
print("Generate a new key")
key = Fernet.generate_key()
key = str(key,'utf-8')
config["key"] = f"{key}"
with open('config.json', 'w') as f:
json.dump(config, f)
elif valinta2 == "2":
os.system("cls")
print("Change the key manually")
key = input("Key: ")
config["key"] = f"{key}"
with open('config.json', 'w') as f:
json.dump(config, f)
print("[+] Done")
| 23.125 | 53 | 0.558919 |
6f404c73e58333c647cf2860c473e5931bda9de0 | 734 | py | Python | consoleproxy/setup.py | mchtech/zstack-utility | 18c7947b083b70b2b23f7f21992ca0ef09ac7e75 | [
"Apache-2.0"
] | null | null | null | consoleproxy/setup.py | mchtech/zstack-utility | 18c7947b083b70b2b23f7f21992ca0ef09ac7e75 | [
"Apache-2.0"
] | null | null | null | consoleproxy/setup.py | mchtech/zstack-utility | 18c7947b083b70b2b23f7f21992ca0ef09ac7e75 | [
"Apache-2.0"
] | null | null | null | from setuptools import setup, find_packages
import sys, os
version = '3.0.0'
setup(name='consoleproxy',
version=version,
description="zstack console proxy agent",
long_description="""\
""",
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='zstack console proxy',
author='Frank Zhang',
author_email='xing5820@gmail.com',
url='http://zstack.org',
license='Apache License 2',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=True,
install_requires=[
'websockify',
],
entry_points="""
# -*- Entry points: -*-
""",
)
| 27.185185 | 95 | 0.619891 |
011e5d2af6936449eaca4f5f863b78187b91981b | 865 | py | Python | archives/histograma01.py | LBarros77/Python | 283b383d9d14c8d7b907b80f03f7cdc5dbd1e8af | [
"MIT"
] | null | null | null | archives/histograma01.py | LBarros77/Python | 283b383d9d14c8d7b907b80f03f7cdc5dbd1e8af | [
"MIT"
] | null | null | null | archives/histograma01.py | LBarros77/Python | 283b383d9d14c8d7b907b80f03f7cdc5dbd1e8af | [
"MIT"
] | null | null | null | '''
Pasos:
Pida al usuario el nombre del archivo de entrada.
Lea el archivo (si es posible) y cuente todas las letras latinas (las letras mayúsculas y minúsculas se tratan como iguales).
Imprima un histograma simple en orden alfabético (solo se deben presentar recuentos distintos de cero).
Questión:
Crea un archivo de prueba para tu código y verifica si tu histograma contiene resultados válidos.
'''
from sys import path
try:
fl = f"{path[0]}/" + input("Escribe el nobre del archvo: ")
with open(fl, "w") as f:
f.write("aBca")
except Exception as e:
print(e)
dic = {}
try:
f = open(fl, "r")
data = f.read()
f.close()
for k in data:
if k not in dic.keys():
dic[k] = 1
else:
dic[k] += 1
for k, v in dic.items():
print(f"{k} -> {v}")
except Exception as e:
print(e) | 27.03125 | 125 | 0.623121 |
2454b7c04d349f13c69b131e9e7993585ea817a8 | 6,795 | py | Python | repodata_tools/tokens.py | conda-forge/repodata-tools | 6792fa3cc9bb52cbaf6a7ac5c63539c859cab19c | [
"BSD-3-Clause"
] | 1 | 2021-06-16T19:11:30.000Z | 2021-06-16T19:11:30.000Z | repodata_tools/tokens.py | conda-forge/repodata-tools | 6792fa3cc9bb52cbaf6a7ac5c63539c859cab19c | [
"BSD-3-Clause"
] | 3 | 2021-03-23T13:32:15.000Z | 2022-03-01T10:49:51.000Z | repodata_tools/tokens.py | conda-forge/repodata-tools | 6792fa3cc9bb52cbaf6a7ac5c63539c859cab19c | [
"BSD-3-Clause"
] | 1 | 2021-07-02T13:08:32.000Z | 2021-07-02T13:08:32.000Z | import time
import base64
import os
import io
import sys
from contextlib import redirect_stdout, redirect_stderr
import github
import click
import jwt
import requests
from cryptography.hazmat.backends import default_backend
from nacl import encoding, public
def _encrypt_github_secret(public_key, secret_value):
"""Encrypt a Unicode string using the public key."""
public_key = public.PublicKey(public_key.encode("utf-8"), encoding.Base64Encoder())
sealed_box = public.SealedBox(public_key)
encrypted = sealed_box.encrypt(secret_value.encode("utf-8"))
return base64.b64encode(encrypted).decode("utf-8")
def generate_app_token(app_id, raw_pem):
"""Get an app token.
Parameters
----------
app_id : str
The github app ID.
raw_pem : bytes
An app private key as bytes.
Returns
-------
gh_token : str
The github token. May return None if there is an error.
"""
try:
if raw_pem[0:1] != b'-':
raw_pem = base64.b64decode(raw_pem)
f = io.StringIO()
with redirect_stdout(f), redirect_stderr(f):
private_key = default_backend().load_pem_private_key(raw_pem, None)
ti = int(time.time())
token = jwt.encode(
{
'iat': ti,
'exp': ti + 60*10,
'iss': app_id,
},
private_key,
algorithm='RS256',
)
if (
"GITHUB_ACTIONS" in os.environ
and os.environ["GITHUB_ACTIONS"] == "true"
):
sys.stdout.flush()
print("masking JWT token for github actions", flush=True)
print("::add-mask::%s" % token, flush=True)
with redirect_stdout(f), redirect_stderr(f):
r = requests.get(
"https://api.github.com/app/installations",
headers={
'Authorization': 'Bearer %s' % token,
'Accept': 'application/vnd.github.machine-man-preview+json',
},
)
r.raise_for_status()
r = requests.post(
"https://api.github.com/app/installations/"
"%s/access_tokens" % r.json()[0]["id"],
headers={
'Authorization': 'Bearer %s' % token,
'Accept': 'application/vnd.github.machine-man-preview+json',
},
)
r.raise_for_status()
gh_token = r.json()["token"]
if (
"GITHUB_ACTIONS" in os.environ
and os.environ["GITHUB_ACTIONS"] == "true"
):
sys.stdout.flush()
print("masking GITHUB token for github actions", flush=True)
print("::add-mask::%s" % gh_token, flush=True)
except Exception:
gh_token = None
return gh_token
def get_github_client_with_app_token(app_id_env, private_key_env):
"""Get a github client with an app token.
Parameters
----------
app_id_env : str
The name of the environment variable with the app id.
private_key_env : str
The name of the environment variable with the private key.
Returns
-------
gh : github.Github
The github client object. May return None if there is an error.
"""
try:
token = generate_app_token(
os.environ[app_id_env],
os.environ[private_key_env].encode(),
)
if token is not None:
gh = github.Github(token)
else:
gh = None
except Exception:
gh = None
return gh
@click.command()
@click.argument("app_id", type=str)
@click.option(
"--pem",
type=str,
default=None,
help="Path to private key in PEM format."
)
@click.option(
"--env-var",
type=str,
default=None,
help="Name of environment variable with base64 encoded PEM."
)
def main_gen(app_id, pem, env_var):
"""Generate a GitHub token using app APP_ID from a private key.
NOTE: The token is printed to stdout, so make sure not to use this command
in public CI infrastructure.
"""
if pem is None and env_var is None:
raise RuntimeError("One of --pem or --env-var must be given!")
try:
f = io.StringIO()
with redirect_stdout(f), redirect_stderr(f):
if pem is not None:
with open(pem, "r") as fp:
raw_pem = fp.read().encode()
else:
raw_pem = base64.b64decode(os.environ[env_var])
token = generate_app_token(app_id, raw_pem)
print(token)
except Exception:
pass
@click.command()
@click.argument("app_id", type=str)
@click.argument("target_repo", type=str)
@click.argument("secret_name", type=str)
@click.option(
"--pem",
type=str,
default=None,
help="Path to private key in PEM format."
)
@click.option(
"--env-var",
type=str,
default=None,
help="Name of environment variable with base64 encoded PEM."
)
def main_push(app_id, target_repo, secret_name, pem, env_var):
"""Generate a GitHub token using app APP_ID from a private key.
Then push this token to the TARGET_REPO's secrets w/ SECRET_NAME."""
if pem is None and env_var is None:
raise RuntimeError("One of --pem or --env-var must be given!")
try:
f = io.StringIO()
with redirect_stdout(f), redirect_stderr(f):
if pem is not None:
with open(pem, "r") as fp:
raw_pem = fp.read().encode()
else:
raw_pem = base64.b64decode(os.environ[env_var])
token = generate_app_token(app_id, raw_pem)
rkey = requests.get(
"https://api.github.com/repos/"
"%s/actions/secrets/public-key" % target_repo,
headers={
"Authorization": "Bearer %s" % os.environ["GITHUB_TOKEN"],
"Accept": "application/vnd.github.v3+json",
}
)
rkey.raise_for_status()
encoded_token = _encrypt_github_secret(rkey.json()["key"], token)
requests.put(
"https://api.github.com/repos/%s/actions/secrets/%s" % (
target_repo,
secret_name,
),
headers={
"Authorization": "Bearer %s" % os.environ["GITHUB_TOKEN"],
"Accept": "application/vnd.github.v3+json",
},
json={
"encrypted_value": encoded_token,
"key_id": rkey.json()["key_id"],
}
).raise_for_status()
except Exception:
pass
| 28.792373 | 87 | 0.552759 |
7c93a5a47180b92c73f50e5ae838d567ca54f54f | 1,718 | py | Python | optimization.py | morenoh149/deeplearning | 4460f97ca2e75a9060bd947fac20986c62f25272 | [
"MIT"
] | 122 | 2016-10-25T14:13:08.000Z | 2021-10-10T08:28:44.000Z | optimization.py | Andijenny/deeplearning | 24d2dc9f9710bc74a1b7a9300c59fad2d63767d3 | [
"MIT"
] | null | null | null | optimization.py | Andijenny/deeplearning | 24d2dc9f9710bc74a1b7a9300c59fad2d63767d3 | [
"MIT"
] | 31 | 2016-11-11T00:53:47.000Z | 2021-05-25T15:42:07.000Z | import itertools
import math
import numpy as np
def run_iterations(iterator, max_iterations, abs_tol=1e-20):
""" Run iterative optimization method such as gradient descent. Stop early if cost doesn't change. """
previous_cost = None
limited_iterator = itertools.islice(iterator, max_iterations)
for i, (x, cost) in enumerate(limited_iterator):
if previous_cost is not None and math.isclose(cost, previous_cost, abs_tol=abs_tol):
break
previous_cost = cost
return i, x, cost
def gradient_descent(cost, gradient, initial_value, step_size):
x = initial_value
while True:
x -= step_size * gradient(x)
yield np.copy(x), cost(x)
def newtons_method(cost, gradient, hessian, initial_value, step_size):
x = initial_value
while True:
if np.isscalar(x):
x -= step_size * gradient(x) / hessian(x)
else:
H = hessian(x)
x -= step_size * np.linalg.inv(H) @ gradient(x)
# More efficient implementation
# x = np.linalg.solve(H, H @ x - step_size * gradient(x))
yield x, cost(x)
def linear_least_square(A, b):
def cost(x):
return np.linalg.norm(A @ x - b) ** 2 / 2.0
def gradient(x):
return A.T @ (A @ x - b)
i, x, cost = run_iterations(gradient_descent(cost, gradient, np.zeros(A.shape[1]), 1.0), 10000)
return x
def constrained_linear_least_square(A, b):
x = np.linalg.solve(A, b)
if x.T @ x <= 1.0:
return x
l = 0.0
step_size = 0.1
for i in range(10000):
l += step_size * (x.T @ x - 1.0)
x = np.linalg.inv(A.T @ A + 2.0 * l * np.identity(A.shape[0])) @ A.T @ b
return x
| 26.430769 | 106 | 0.604191 |
18f1ad66ed3a7dd04dd3702999e2a6633538085d | 940 | py | Python | mqueue/tests/test_admin.py | synw/django-mqueue | 02d68baba29ab33d71d4238b850d44a637fec04a | [
"MIT"
] | 29 | 2016-04-24T11:57:52.000Z | 2022-03-22T17:12:36.000Z | mqueue/tests/test_admin.py | synw/django-mqueue | 02d68baba29ab33d71d4238b850d44a637fec04a | [
"MIT"
] | 4 | 2016-11-02T13:23:03.000Z | 2021-07-23T12:42:03.000Z | mqueue/tests/test_admin.py | synw/django-mqueue | 02d68baba29ab33d71d4238b850d44a637fec04a | [
"MIT"
] | 5 | 2016-05-22T12:34:48.000Z | 2020-03-13T10:54:46.000Z | from django.contrib import admin
from .base import MqueueBaseTest
from mqueue.models import MEvent
from mqueue.admin import link_to_object, link_to_object_admin, MEventAdmin
class MqueueTestAdmin(MqueueBaseTest):
def test_admin(self):
instance, _ = MEvent.objects.get_or_create(
name="Event name", url="http://url", admin_url="http://admin_url"
)
res = link_to_object(instance)
link = '<a href="http://url" target="_blank">http://url</a>'
self.assertEqual(link, res)
res = link_to_object_admin(instance)
link = '<a href="http://admin_url" target="_blank">http://admin_url</a>'
self.assertEqual(link, res)
request = self.factory.get("/")
class TestAdminSite(admin.AdminSite):
pass
adm = MEventAdmin(MEvent, TestAdminSite)
res = adm.get_readonly_fields(request)
self.assertEqual(res, ("notes", "request"))
| 36.153846 | 80 | 0.656383 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.