hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acf7dd03f96fcb34eed73138408ff9e3dd6ebaa4 | 468 | py | Python | stylemuzeapp/migrations/0003_user_full_name.py | Abhisheksoni1/stylemuze | 385ec1ed799914c91bbccfca9086c6eafa7e1b1b | [
"MIT"
] | null | null | null | stylemuzeapp/migrations/0003_user_full_name.py | Abhisheksoni1/stylemuze | 385ec1ed799914c91bbccfca9086c6eafa7e1b1b | [
"MIT"
] | null | null | null | stylemuzeapp/migrations/0003_user_full_name.py | Abhisheksoni1/stylemuze | 385ec1ed799914c91bbccfca9086c6eafa7e1b1b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-03-19 10:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('stylemuzeapp', '0002_comment_time_created'),
]
operations = [
migrations.AddField(
model_name='user',
name='full_name',
field=models.CharField(max_length=100, null=True),
),
]
| 22.285714 | 62 | 0.623932 |
acf7dd14de0a0209288e56595dd1bf9016ef79b9 | 930 | py | Python | tests/cisco/test_cisco_auto_enabled_switch.py | freedge/fake-switches | 939bf12f2d0f673eb2caf19aa7e99aebbaf68ff3 | [
"Apache-2.0"
] | 42 | 2016-06-29T00:29:55.000Z | 2022-02-09T17:55:42.000Z | tests/cisco/test_cisco_auto_enabled_switch.py | freedge/fake-switches | 939bf12f2d0f673eb2caf19aa7e99aebbaf68ff3 | [
"Apache-2.0"
] | 89 | 2015-12-15T15:42:49.000Z | 2021-05-27T16:48:41.000Z | tests/cisco/test_cisco_auto_enabled_switch.py | freedge/fake-switches | 939bf12f2d0f673eb2caf19aa7e99aebbaf68ff3 | [
"Apache-2.0"
] | 40 | 2016-02-04T15:27:19.000Z | 2022-02-22T13:58:38.000Z | from tests.util.protocol_util import SshTester, TelnetTester, with_protocol, ProtocolTest
class TestCiscoAutoEnabledSwitchProtocol(ProtocolTest):
__test__ = False
test_switch = "cisco-auto-enabled"
@with_protocol
def test_enable_command_requires_a_password(self, t):
t.write("enable")
t.read("my_switch#")
t.write("terminal length 0")
t.read("my_switch#")
t.write("terminal width 0")
t.read("my_switch#")
t.write("configure terminal")
t.readln("Enter configuration commands, one per line. End with CNTL/Z.")
t.read("my_switch(config)#")
t.write("exit")
t.read("my_switch#")
class TestCiscoSwitchProtocolSSH(TestCiscoAutoEnabledSwitchProtocol):
__test__ = True
tester_class = SshTester
class TestCiscoSwitchProtocolTelnet(TestCiscoAutoEnabledSwitchProtocol):
__test__ = True
tester_class = TelnetTester
| 30 | 89 | 0.696774 |
acf7dd26cb27fbd10a20a97a07ec857376dc30e1 | 3,363 | py | Python | commands/notify.py | xUndero/noc | 9fb34627721149fcf7064860bd63887e38849131 | [
"BSD-3-Clause"
] | 1 | 2019-09-20T09:36:48.000Z | 2019-09-20T09:36:48.000Z | commands/notify.py | ewwwcha/noc | aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb | [
"BSD-3-Clause"
] | null | null | null | commands/notify.py | ewwwcha/noc | aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# Notification utility
# ----------------------------------------------------------------------
# Copyright (C) 2007-2017 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
from __future__ import print_function
# NOC modules
from noc.core.management.base import BaseCommand
from noc.main.models.template import Template
from noc.main.models.notificationgroup import NotificationGroup
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument("--dry-run", action="store_true", help="Dry Run (Do not send message)")
parser.add_argument(
"--notification-group",
action="append",
dest="notification_group",
help="Notification group name",
required=True,
)
parser.add_argument("--template", action="store", dest="template", help="Template name")
parser.add_argument("--subject", action="store", dest="subject", help="Message subject")
parser.add_argument("--body", action="store", dest="body", help="Message body")
parser.add_argument(
"--body-file", action="store", dest="body_file", help="Message body file"
)
parser.add_argument(
"--var", action="append", dest="var", help="Template variable in key=value form"
)
def handle(
self,
notification_group=None,
template=None,
subject=None,
body=None,
body_file=None,
var=None,
debug=False,
dry_run=False,
*args,
**kwargs
):
groups = []
for ng in notification_group:
g = NotificationGroup.get_by_name(ng)
if not g:
self.die("Invalid notification group '%s'" % ng)
groups += [g]
if subject and (body or body_file):
# Get message from command line
if body_file:
with open(body_file) as f:
body = f.read()
elif template:
# Get message from template
t = Template.get_by_name(template)
if not t:
self.die("Invalid template name '%s'" % template)
# Convert variables
var = var or []
ctx = {}
for x in var:
if "=" not in x:
continue
k, v = x.split("=", 1)
ctx[k.strip()] = v.strip()
subject = t.render_subject(**ctx)
body = t.render_body(**ctx)
else:
self.die("Either '--template' or '--subject' + '--body' parameters must be set")
if not subject:
self.die("Subject is empty")
if not body:
self.die("Body is empty")
if self.is_debug:
self.print("Subject: %s" % subject)
self.print("---[Body]---------")
self.print(body)
self.print("---[End]----------")
for g in groups:
if self.is_debug:
self.print("Sending message to group: %s" % g.name)
if not dry_run:
g.notify(subject=subject, body=body)
if __name__ == "__main__":
Command().run()
| 34.670103 | 99 | 0.502825 |
acf7ddb61aacfdb50606dcf271822eaec5caffc0 | 918 | py | Python | awacs/groundtruthlabeling.py | michael-k/awacs | ed3dc822d268f10b0cd83feb90fd279277e54ed4 | [
"BSD-2-Clause"
] | 358 | 2015-01-01T05:11:05.000Z | 2022-03-20T14:11:39.000Z | awacs/groundtruthlabeling.py | cloudtools/awacs | f66550a812073f4e3ebd545279a5a1e6856cf39d | [
"BSD-2-Clause"
] | 171 | 2015-01-17T00:32:48.000Z | 2022-03-28T02:02:57.000Z | awacs/groundtruthlabeling.py | michael-k/awacs | ed3dc822d268f10b0cd83feb90fd279277e54ed4 | [
"BSD-2-Clause"
] | 100 | 2015-01-04T16:34:34.000Z | 2022-02-21T06:17:17.000Z | # Copyright (c) 2012-2021, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from .aws import Action as BaseAction
from .aws import BaseARN
service_name = "Amazon GroundTruth Labeling"
prefix = "groundtruthlabeling"
class Action(BaseAction):
def __init__(self, action: str = None) -> None:
super().__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource: str = "", region: str = "", account: str = "") -> None:
super().__init__(
service=prefix, resource=resource, region=region, account=account
)
AssociatePatchToManifestJob = Action("AssociatePatchToManifestJob")
DescribeConsoleJob = Action("DescribeConsoleJob")
ListDatasetObjects = Action("ListDatasetObjects")
RunFilterOrSampleDatasetJob = Action("RunFilterOrSampleDatasetJob")
RunGenerateManifestByCrawlingJob = Action("RunGenerateManifestByCrawlingJob")
| 30.6 | 88 | 0.739651 |
acf7de6dbaf7f7c40915680c7b4f9b10bbb9f747 | 685 | py | Python | backend/migrations/versions/105458005ed1_adding_group_to_employers.py | Healthcare-NOW/poppwatch | b8545f40699ef521f4df2a2f2f4edcf3c5b1cc04 | [
"MIT"
] | 1 | 2020-09-09T23:06:36.000Z | 2020-09-09T23:06:36.000Z | backend/migrations/versions/105458005ed1_adding_group_to_employers.py | Healthcare-NOW/fec-watch | b8545f40699ef521f4df2a2f2f4edcf3c5b1cc04 | [
"MIT"
] | 9 | 2020-07-18T15:09:59.000Z | 2022-02-27T00:53:47.000Z | backend/migrations/versions/105458005ed1_adding_group_to_employers.py | Healthcare-NOW/pop-donation-tracker | b8545f40699ef521f4df2a2f2f4edcf3c5b1cc04 | [
"MIT"
] | null | null | null | """Adding category to employers.
Revision ID: 105458005ed1
Revises: 25f676d30c10
Create Date: 2020-03-21 15:52:13.031238
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '105458005ed1'
down_revision = '25f676d30c10'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('flagged_employer', sa.Column('group', sa.String(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('flagged_employer', 'group')
# ### end Alembic commands ###
| 23.62069 | 85 | 0.70073 |
acf7dfc5ac5cb29d811fa431ac3a0c8fdbd25e8e | 1,439 | py | Python | src/fastx/head.py | sjin09/fastx | 2a4717eb64eff6655b716862b45b6b50f6bb0bc7 | [
"MIT"
] | null | null | null | src/fastx/head.py | sjin09/fastx | 2a4717eb64eff6655b716862b45b6b50f6bb0bc7 | [
"MIT"
] | null | null | null | src/fastx/head.py | sjin09/fastx | 2a4717eb64eff6655b716862b45b6b50f6bb0bc7 | [
"MIT"
] | null | null | null | ## modules
import gzip
from Bio import SeqIO
from fastx.util import chunkstring
def fasta_head(infile, number, outfile):
counter = 0
fasta = (
SeqIO.parse(infile, "fasta")
if infile.endswith((".fa", ".fasta"))
else SeqIO.parse(gzip.open(infile, "rt"), "fasta")
)
for seq in fasta:
counter += 1
outfile.write(">{}\n".format(seq.id))
for chunk in chunkstring(seq.seq):
outfile.write("{}\n".format(chunk))
if counter == number:
break
def fastq_head(infile, threshold, outfile):
counter = 0
seqfile = open(infile) if infile.endswith((".fq", ".fastq")) else gzip.open(infile)
for i, j in enumerate(seqfile):
k = i % 4
if k == 0: ## header
seq_id = j.strip().decode("utf-8")
elif k == 1: ## sequence
seq = j.strip().decode("utf-8")
elif k == 2: ## plus
continue
elif k == 3: ## quality
seq_bq = j.strip().decode("utf-8")
outfile.write("{}\n{}\n+\n{}\n".format(seq_id, seq, seq_bq))
counter += 1
if counter == threshold:
break
def seq_head(infile, number, outfile):
if infile.endswith((".fa", ".fa.gz", ".fasta", ".fasta.gz")):
fasta_head(infile, number, outfile)
elif infile.endswith((".fq", ".fq.gz", ".fastq", ".fastq.gz")):
fastq_head(infile, number, outfile)
| 30.617021 | 87 | 0.539263 |
acf7e0d073b3bdd36e4d6b71151ed2aec34ee341 | 52,029 | py | Python | tests/unit/gapic/vision_v1p3beta1/test_image_annotator.py | TeoCrack/python-vision | 190f3ebee303ad124ebe42322f07d0d2d473e005 | [
"Apache-2.0"
] | null | null | null | tests/unit/gapic/vision_v1p3beta1/test_image_annotator.py | TeoCrack/python-vision | 190f3ebee303ad124ebe42322f07d0d2d473e005 | [
"Apache-2.0"
] | null | null | null | tests/unit/gapic/vision_v1p3beta1/test_image_annotator.py | TeoCrack/python-vision | 190f3ebee303ad124ebe42322f07d0d2d473e005 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google import auth
from google.api_core import client_options
from google.api_core import exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.auth import credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.vision_v1p3beta1.services.image_annotator import (
ImageAnnotatorAsyncClient,
)
from google.cloud.vision_v1p3beta1.services.image_annotator import ImageAnnotatorClient
from google.cloud.vision_v1p3beta1.services.image_annotator import transports
from google.cloud.vision_v1p3beta1.types import geometry
from google.cloud.vision_v1p3beta1.types import image_annotator
from google.cloud.vision_v1p3beta1.types import product_search
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.type import latlng_pb2 as latlng # type: ignore
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert ImageAnnotatorClient._get_default_mtls_endpoint(None) is None
assert (
ImageAnnotatorClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
ImageAnnotatorClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
ImageAnnotatorClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
ImageAnnotatorClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
ImageAnnotatorClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
)
@pytest.mark.parametrize(
"client_class", [ImageAnnotatorClient, ImageAnnotatorAsyncClient,]
)
def test_image_annotator_client_from_service_account_info(client_class):
creds = credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "vision.googleapis.com:443"
@pytest.mark.parametrize(
"client_class", [ImageAnnotatorClient, ImageAnnotatorAsyncClient,]
)
def test_image_annotator_client_from_service_account_file(client_class):
creds = credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "vision.googleapis.com:443"
def test_image_annotator_client_get_transport_class():
transport = ImageAnnotatorClient.get_transport_class()
available_transports = [
transports.ImageAnnotatorGrpcTransport,
]
assert transport in available_transports
transport = ImageAnnotatorClient.get_transport_class("grpc")
assert transport == transports.ImageAnnotatorGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(ImageAnnotatorClient, transports.ImageAnnotatorGrpcTransport, "grpc"),
(
ImageAnnotatorAsyncClient,
transports.ImageAnnotatorGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
ImageAnnotatorClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(ImageAnnotatorClient),
)
@mock.patch.object(
ImageAnnotatorAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(ImageAnnotatorAsyncClient),
)
def test_image_annotator_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(ImageAnnotatorClient, "get_transport_class") as gtc:
transport = transport_class(credentials=credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(ImageAnnotatorClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(ImageAnnotatorClient, transports.ImageAnnotatorGrpcTransport, "grpc", "true"),
(
ImageAnnotatorAsyncClient,
transports.ImageAnnotatorGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(ImageAnnotatorClient, transports.ImageAnnotatorGrpcTransport, "grpc", "false"),
(
ImageAnnotatorAsyncClient,
transports.ImageAnnotatorGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
ImageAnnotatorClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(ImageAnnotatorClient),
)
@mock.patch.object(
ImageAnnotatorAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(ImageAnnotatorAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_image_annotator_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(ImageAnnotatorClient, transports.ImageAnnotatorGrpcTransport, "grpc"),
(
ImageAnnotatorAsyncClient,
transports.ImageAnnotatorGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_image_annotator_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(ImageAnnotatorClient, transports.ImageAnnotatorGrpcTransport, "grpc"),
(
ImageAnnotatorAsyncClient,
transports.ImageAnnotatorGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_image_annotator_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_image_annotator_client_client_options_from_dict():
with mock.patch(
"google.cloud.vision_v1p3beta1.services.image_annotator.transports.ImageAnnotatorGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = ImageAnnotatorClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_batch_annotate_images(
transport: str = "grpc", request_type=image_annotator.BatchAnnotateImagesRequest
):
client = ImageAnnotatorClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_annotate_images), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = image_annotator.BatchAnnotateImagesResponse()
response = client.batch_annotate_images(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == image_annotator.BatchAnnotateImagesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, image_annotator.BatchAnnotateImagesResponse)
def test_batch_annotate_images_from_dict():
test_batch_annotate_images(request_type=dict)
def test_batch_annotate_images_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ImageAnnotatorClient(
credentials=credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_annotate_images), "__call__"
) as call:
client.batch_annotate_images()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == image_annotator.BatchAnnotateImagesRequest()
@pytest.mark.asyncio
async def test_batch_annotate_images_async(
transport: str = "grpc_asyncio",
request_type=image_annotator.BatchAnnotateImagesRequest,
):
client = ImageAnnotatorAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_annotate_images), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
image_annotator.BatchAnnotateImagesResponse()
)
response = await client.batch_annotate_images(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == image_annotator.BatchAnnotateImagesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, image_annotator.BatchAnnotateImagesResponse)
@pytest.mark.asyncio
async def test_batch_annotate_images_async_from_dict():
await test_batch_annotate_images_async(request_type=dict)
def test_batch_annotate_images_flattened():
client = ImageAnnotatorClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_annotate_images), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = image_annotator.BatchAnnotateImagesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.batch_annotate_images(
requests=[
image_annotator.AnnotateImageRequest(
image=image_annotator.Image(content=b"content_blob")
)
],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].requests == [
image_annotator.AnnotateImageRequest(
image=image_annotator.Image(content=b"content_blob")
)
]
def test_batch_annotate_images_flattened_error():
client = ImageAnnotatorClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.batch_annotate_images(
image_annotator.BatchAnnotateImagesRequest(),
requests=[
image_annotator.AnnotateImageRequest(
image=image_annotator.Image(content=b"content_blob")
)
],
)
@pytest.mark.asyncio
async def test_batch_annotate_images_flattened_async():
client = ImageAnnotatorAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_annotate_images), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = image_annotator.BatchAnnotateImagesResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
image_annotator.BatchAnnotateImagesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.batch_annotate_images(
requests=[
image_annotator.AnnotateImageRequest(
image=image_annotator.Image(content=b"content_blob")
)
],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].requests == [
image_annotator.AnnotateImageRequest(
image=image_annotator.Image(content=b"content_blob")
)
]
@pytest.mark.asyncio
async def test_batch_annotate_images_flattened_error_async():
client = ImageAnnotatorAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.batch_annotate_images(
image_annotator.BatchAnnotateImagesRequest(),
requests=[
image_annotator.AnnotateImageRequest(
image=image_annotator.Image(content=b"content_blob")
)
],
)
def test_async_batch_annotate_files(
transport: str = "grpc", request_type=image_annotator.AsyncBatchAnnotateFilesRequest
):
client = ImageAnnotatorClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.async_batch_annotate_files), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.async_batch_annotate_files(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == image_annotator.AsyncBatchAnnotateFilesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_async_batch_annotate_files_from_dict():
test_async_batch_annotate_files(request_type=dict)
def test_async_batch_annotate_files_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ImageAnnotatorClient(
credentials=credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.async_batch_annotate_files), "__call__"
) as call:
client.async_batch_annotate_files()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == image_annotator.AsyncBatchAnnotateFilesRequest()
@pytest.mark.asyncio
async def test_async_batch_annotate_files_async(
transport: str = "grpc_asyncio",
request_type=image_annotator.AsyncBatchAnnotateFilesRequest,
):
client = ImageAnnotatorAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.async_batch_annotate_files), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.async_batch_annotate_files(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == image_annotator.AsyncBatchAnnotateFilesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_async_batch_annotate_files_async_from_dict():
await test_async_batch_annotate_files_async(request_type=dict)
def test_async_batch_annotate_files_flattened():
client = ImageAnnotatorClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.async_batch_annotate_files), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.async_batch_annotate_files(
requests=[
image_annotator.AsyncAnnotateFileRequest(
input_config=image_annotator.InputConfig(
gcs_source=image_annotator.GcsSource(uri="uri_value")
)
)
],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].requests == [
image_annotator.AsyncAnnotateFileRequest(
input_config=image_annotator.InputConfig(
gcs_source=image_annotator.GcsSource(uri="uri_value")
)
)
]
def test_async_batch_annotate_files_flattened_error():
client = ImageAnnotatorClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.async_batch_annotate_files(
image_annotator.AsyncBatchAnnotateFilesRequest(),
requests=[
image_annotator.AsyncAnnotateFileRequest(
input_config=image_annotator.InputConfig(
gcs_source=image_annotator.GcsSource(uri="uri_value")
)
)
],
)
@pytest.mark.asyncio
async def test_async_batch_annotate_files_flattened_async():
client = ImageAnnotatorAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.async_batch_annotate_files), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.async_batch_annotate_files(
requests=[
image_annotator.AsyncAnnotateFileRequest(
input_config=image_annotator.InputConfig(
gcs_source=image_annotator.GcsSource(uri="uri_value")
)
)
],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].requests == [
image_annotator.AsyncAnnotateFileRequest(
input_config=image_annotator.InputConfig(
gcs_source=image_annotator.GcsSource(uri="uri_value")
)
)
]
@pytest.mark.asyncio
async def test_async_batch_annotate_files_flattened_error_async():
client = ImageAnnotatorAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.async_batch_annotate_files(
image_annotator.AsyncBatchAnnotateFilesRequest(),
requests=[
image_annotator.AsyncAnnotateFileRequest(
input_config=image_annotator.InputConfig(
gcs_source=image_annotator.GcsSource(uri="uri_value")
)
)
],
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.ImageAnnotatorGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = ImageAnnotatorClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.ImageAnnotatorGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = ImageAnnotatorClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.ImageAnnotatorGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = ImageAnnotatorClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.ImageAnnotatorGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
client = ImageAnnotatorClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.ImageAnnotatorGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.ImageAnnotatorGrpcAsyncIOTransport(
credentials=credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.ImageAnnotatorGrpcTransport,
transports.ImageAnnotatorGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(auth, "default") as adc:
adc.return_value = (credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = ImageAnnotatorClient(credentials=credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.ImageAnnotatorGrpcTransport,)
def test_image_annotator_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(exceptions.DuplicateCredentialArgs):
transport = transports.ImageAnnotatorTransport(
credentials=credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_image_annotator_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.vision_v1p3beta1.services.image_annotator.transports.ImageAnnotatorTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.ImageAnnotatorTransport(
credentials=credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"batch_annotate_images",
"async_batch_annotate_files",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
def test_image_annotator_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
auth, "load_credentials_from_file"
) as load_creds, mock.patch(
"google.cloud.vision_v1p3beta1.services.image_annotator.transports.ImageAnnotatorTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (credentials.AnonymousCredentials(), None)
transport = transports.ImageAnnotatorTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision",
),
quota_project_id="octopus",
)
def test_image_annotator_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(auth, "default") as adc, mock.patch(
"google.cloud.vision_v1p3beta1.services.image_annotator.transports.ImageAnnotatorTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (credentials.AnonymousCredentials(), None)
transport = transports.ImageAnnotatorTransport()
adc.assert_called_once()
def test_image_annotator_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(auth, "default") as adc:
adc.return_value = (credentials.AnonymousCredentials(), None)
ImageAnnotatorClient()
adc.assert_called_once_with(
scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision",
),
quota_project_id=None,
)
def test_image_annotator_transport_auth_adc():
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(auth, "default") as adc:
adc.return_value = (credentials.AnonymousCredentials(), None)
transports.ImageAnnotatorGrpcTransport(
host="squid.clam.whelk", quota_project_id="octopus"
)
adc.assert_called_once_with(
scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision",
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class",
[
transports.ImageAnnotatorGrpcTransport,
transports.ImageAnnotatorGrpcAsyncIOTransport,
],
)
def test_image_annotator_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision",
),
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_image_annotator_host_no_port():
client = ImageAnnotatorClient(
credentials=credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="vision.googleapis.com"
),
)
assert client.transport._host == "vision.googleapis.com:443"
def test_image_annotator_host_with_port():
client = ImageAnnotatorClient(
credentials=credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="vision.googleapis.com:8000"
),
)
assert client.transport._host == "vision.googleapis.com:8000"
def test_image_annotator_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.ImageAnnotatorGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_image_annotator_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.ImageAnnotatorGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.ImageAnnotatorGrpcTransport,
transports.ImageAnnotatorGrpcAsyncIOTransport,
],
)
def test_image_annotator_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision",
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.ImageAnnotatorGrpcTransport,
transports.ImageAnnotatorGrpcAsyncIOTransport,
],
)
def test_image_annotator_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision",
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_image_annotator_grpc_lro_client():
client = ImageAnnotatorClient(
credentials=credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_image_annotator_grpc_lro_async_client():
client = ImageAnnotatorAsyncClient(
credentials=credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_product_path():
project = "squid"
location = "clam"
product = "whelk"
expected = "projects/{project}/locations/{location}/products/{product}".format(
project=project, location=location, product=product,
)
actual = ImageAnnotatorClient.product_path(project, location, product)
assert expected == actual
def test_parse_product_path():
expected = {
"project": "octopus",
"location": "oyster",
"product": "nudibranch",
}
path = ImageAnnotatorClient.product_path(**expected)
# Check that the path construction is reversible.
actual = ImageAnnotatorClient.parse_product_path(path)
assert expected == actual
def test_product_set_path():
project = "cuttlefish"
location = "mussel"
product_set = "winkle"
expected = "projects/{project}/locations/{location}/productSets/{product_set}".format(
project=project, location=location, product_set=product_set,
)
actual = ImageAnnotatorClient.product_set_path(project, location, product_set)
assert expected == actual
def test_parse_product_set_path():
expected = {
"project": "nautilus",
"location": "scallop",
"product_set": "abalone",
}
path = ImageAnnotatorClient.product_set_path(**expected)
# Check that the path construction is reversible.
actual = ImageAnnotatorClient.parse_product_set_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = ImageAnnotatorClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = ImageAnnotatorClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = ImageAnnotatorClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder,)
actual = ImageAnnotatorClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = ImageAnnotatorClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = ImageAnnotatorClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization,)
actual = ImageAnnotatorClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = ImageAnnotatorClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = ImageAnnotatorClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project,)
actual = ImageAnnotatorClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = ImageAnnotatorClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = ImageAnnotatorClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = ImageAnnotatorClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = ImageAnnotatorClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = ImageAnnotatorClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.ImageAnnotatorTransport, "_prep_wrapped_messages"
) as prep:
client = ImageAnnotatorClient(
credentials=credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.ImageAnnotatorTransport, "_prep_wrapped_messages"
) as prep:
transport_class = ImageAnnotatorClient.get_transport_class()
transport = transport_class(
credentials=credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
| 36.691819 | 122 | 0.682735 |
acf7e1b6e9fc0596f8b95add27c24ae5569c7d15 | 251 | py | Python | codegram/users/apps.py | 9592/codegram | 8d9684977063820e4483560d2091f14875c8714a | [
"MIT"
] | null | null | null | codegram/users/apps.py | 9592/codegram | 8d9684977063820e4483560d2091f14875c8714a | [
"MIT"
] | 7 | 2020-09-04T18:48:13.000Z | 2022-02-26T10:13:19.000Z | codegram/users/apps.py | 9592/codegram | 8d9684977063820e4483560d2091f14875c8714a | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class UsersAppConfig(AppConfig):
name = "codegram.users"
verbose_name = "Users"
def ready(self):
try:
import users.signals # noqa F401
except ImportError:
pass
| 17.928571 | 45 | 0.609562 |
acf7e24f5a08f1fff702d418bee1e029e01c22ac | 4,543 | py | Python | Data/get_super_synth_loader.py | giussepi/cyto_CRLM | 4489d5d81c4270ec7b6048ceb2f2a02bfa699177 | [
"Apache-2.0"
] | null | null | null | Data/get_super_synth_loader.py | giussepi/cyto_CRLM | 4489d5d81c4270ec7b6048ceb2f2a02bfa699177 | [
"Apache-2.0"
] | 6 | 2020-03-24T18:11:41.000Z | 2022-03-12T00:16:18.000Z | Data/get_super_synth_loader.py | giussepi/cyto_CRLM | 4489d5d81c4270ec7b6048ceb2f2a02bfa699177 | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function, division
import torch
import torch.nn as nn
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
from torch.utils.data import Dataset, DataLoader
import os
from PIL import Image
import pylab as plt
import cv2
from .UnetAugmentation import *
class SynthDataset(Dataset):
"""Synthetic Data from superpixel"""
def __init__(self, root_dir, mask_dir, patch_size=448,transform=None):
"""
Args:
root_dir (string): Directory with all the images.
mask_dir (string): Directory with artifical Masks
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.root_dir = root_dir # image root
self.mask_dir = mask_dir # Mask Directory
self.transform = transform
self.filelist = self.get_filelist()
self.masklist = self.get_masklist()
self.label_name_list =['H', 'N', 'F', 'T', 'I', 'M', 'B', 'D' ,'C', 'G','Y']
label_point = [(35981,81083),(93094,105493),(3888,31980),(105494,128220),(81084,82911),\
(82912,93093),(0,1030),(3617,3887),(1031,3616),(31981,35980),(128221,128601)]
self.label_dict = dict(zip(self.label_name_list,label_point))
self.patch_size= patch_size
def get_filelist(self):
flist = os.listdir(self.root_dir)
flist.sort()
return flist
def get_rand_patch(self,num_seg):
flist_sep = [[i for i in self.filelist if i[0]==tls] for tls in self.label_name_list]
rand_patch = np.random.randint(0,11,size=num_seg)
return [cv2.imread(self.root_dir+'/'+flist_sep[ti][np.random.randint(len(flist_sep[ti]))]) for ti in rand_patch],rand_patch
def get_masklist(self):
flist = os.listdir(self.mask_dir)
flist.sort()
return flist
def __len__(self):
return len(self.filelist)
def __getitem__(self, idx):
sythetic_image = np.zeros((self.patch_size,self.patch_size,3))
sythetic_mask = np.zeros((self.patch_size,self.patch_size),dtype=np.int)
mask = cv2.imread(self.mask_dir+self.masklist[np.random.randint(len(self.masklist))],0)
mask = mask-mask.min()
num_seg = mask.max()+1
test_images,test_labels = self.get_rand_patch(num_seg)
for i in range(0,num_seg):
ttimage = np.array(test_images[i]*(np.random.random_sample()*0.3+0.8),dtype=np.uint8)
sythetic_image[mask==i] = ttimage[mask==i]
#sythetic_image[mask==i] = test_images[i][mask==i]
sythetic_mask[mask==i] = test_labels[i]
#return torch.from_numpy(sythetic_image/255.0).permute(2,0,1), torch.from_numpy(mask)
return self.transform(sythetic_image,mask)
# train_dataset = SynthDataset(root_dir=os.path.expanduser('~')+'/DATA_CRLM/Patches/Patches_Level0/Patches_448/All', mask_dir=os.path.expanduser('~')+'/DATA_CRLM/Patches/Patches_Segment/Patches_Syth_mask_448/', transform=None)
# syn_image, syn_mask = train_dataset.__getitem__(0)
def get_dataloader(batch_size = 1,\
root_dir=os.path.expanduser('~')+'/DATA_CRLM/Patches/Patches_Level0/Patches_448/All/',\
mask_dir=os.path.expanduser('~')+'/DATA_CRLM/Patches/Patches_Segment/Patches_Syth_mask_448/',\
num_workers=4):
class UnetAugmentation(object):
def __init__(self, size=448, mean=(0.485,0.456,0.406),std=(0.5,0.5,0.5),scale=(0.64, 1)):
self.augment = Compose([
ConvertFromInts(),
PhotometricDistort(),
RandomResizedCrop(size=size,scale=scale),
RandomMirror(),
RandomFlip(),
Resize(size),
ToTensor(),
Normalize(mean,std),
])
def __call__(self, img, masks):
return self.augment(img, masks)
data_transforms = UnetAugmentation()
train_dataset = SynthDataset(root_dir,mask_dir,transform=data_transforms)
dataset_loader = torch.utils.data.DataLoader(train_dataset,batch_size=batch_size,\
shuffle=True,num_workers=num_workers)
return dataset_loader
| 41.678899 | 280 | 0.600264 |
acf7e2578d8dc727f622b04393e8fb18ab34bf1b | 21,723 | py | Python | sdk/python/pulumi_azure_native/network/v20190701/express_route_circuit_connection.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/network/v20190701/express_route_circuit_connection.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/network/v20190701/express_route_circuit_connection.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ExpressRouteCircuitConnectionArgs', 'ExpressRouteCircuitConnection']
@pulumi.input_type
class ExpressRouteCircuitConnectionArgs:
def __init__(__self__, *,
circuit_name: pulumi.Input[str],
peering_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
address_prefix: Optional[pulumi.Input[str]] = None,
authorization_key: Optional[pulumi.Input[str]] = None,
connection_name: Optional[pulumi.Input[str]] = None,
express_route_circuit_peering: Optional[pulumi.Input['SubResourceArgs']] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
peer_express_route_circuit_peering: Optional[pulumi.Input['SubResourceArgs']] = None):
"""
The set of arguments for constructing a ExpressRouteCircuitConnection resource.
:param pulumi.Input[str] circuit_name: The name of the express route circuit.
:param pulumi.Input[str] peering_name: The name of the peering.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] address_prefix: /29 IP address space to carve out Customer addresses for tunnels.
:param pulumi.Input[str] authorization_key: The authorization key.
:param pulumi.Input[str] connection_name: The name of the express route circuit connection.
:param pulumi.Input['SubResourceArgs'] express_route_circuit_peering: Reference to Express Route Circuit Private Peering Resource of the circuit initiating connection.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param pulumi.Input['SubResourceArgs'] peer_express_route_circuit_peering: Reference to Express Route Circuit Private Peering Resource of the peered circuit.
"""
pulumi.set(__self__, "circuit_name", circuit_name)
pulumi.set(__self__, "peering_name", peering_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if address_prefix is not None:
pulumi.set(__self__, "address_prefix", address_prefix)
if authorization_key is not None:
pulumi.set(__self__, "authorization_key", authorization_key)
if connection_name is not None:
pulumi.set(__self__, "connection_name", connection_name)
if express_route_circuit_peering is not None:
pulumi.set(__self__, "express_route_circuit_peering", express_route_circuit_peering)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if peer_express_route_circuit_peering is not None:
pulumi.set(__self__, "peer_express_route_circuit_peering", peer_express_route_circuit_peering)
@property
@pulumi.getter(name="circuitName")
def circuit_name(self) -> pulumi.Input[str]:
"""
The name of the express route circuit.
"""
return pulumi.get(self, "circuit_name")
@circuit_name.setter
def circuit_name(self, value: pulumi.Input[str]):
pulumi.set(self, "circuit_name", value)
@property
@pulumi.getter(name="peeringName")
def peering_name(self) -> pulumi.Input[str]:
"""
The name of the peering.
"""
return pulumi.get(self, "peering_name")
@peering_name.setter
def peering_name(self, value: pulumi.Input[str]):
pulumi.set(self, "peering_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="addressPrefix")
def address_prefix(self) -> Optional[pulumi.Input[str]]:
"""
/29 IP address space to carve out Customer addresses for tunnels.
"""
return pulumi.get(self, "address_prefix")
@address_prefix.setter
def address_prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "address_prefix", value)
@property
@pulumi.getter(name="authorizationKey")
def authorization_key(self) -> Optional[pulumi.Input[str]]:
"""
The authorization key.
"""
return pulumi.get(self, "authorization_key")
@authorization_key.setter
def authorization_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "authorization_key", value)
@property
@pulumi.getter(name="connectionName")
def connection_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the express route circuit connection.
"""
return pulumi.get(self, "connection_name")
@connection_name.setter
def connection_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "connection_name", value)
@property
@pulumi.getter(name="expressRouteCircuitPeering")
def express_route_circuit_peering(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
Reference to Express Route Circuit Private Peering Resource of the circuit initiating connection.
"""
return pulumi.get(self, "express_route_circuit_peering")
@express_route_circuit_peering.setter
def express_route_circuit_peering(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "express_route_circuit_peering", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="peerExpressRouteCircuitPeering")
def peer_express_route_circuit_peering(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
Reference to Express Route Circuit Private Peering Resource of the peered circuit.
"""
return pulumi.get(self, "peer_express_route_circuit_peering")
@peer_express_route_circuit_peering.setter
def peer_express_route_circuit_peering(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "peer_express_route_circuit_peering", value)
class ExpressRouteCircuitConnection(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
address_prefix: Optional[pulumi.Input[str]] = None,
authorization_key: Optional[pulumi.Input[str]] = None,
circuit_name: Optional[pulumi.Input[str]] = None,
connection_name: Optional[pulumi.Input[str]] = None,
express_route_circuit_peering: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
peer_express_route_circuit_peering: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
peering_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Express Route Circuit Connection in an ExpressRouteCircuitPeering resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] address_prefix: /29 IP address space to carve out Customer addresses for tunnels.
:param pulumi.Input[str] authorization_key: The authorization key.
:param pulumi.Input[str] circuit_name: The name of the express route circuit.
:param pulumi.Input[str] connection_name: The name of the express route circuit connection.
:param pulumi.Input[pulumi.InputType['SubResourceArgs']] express_route_circuit_peering: Reference to Express Route Circuit Private Peering Resource of the circuit initiating connection.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param pulumi.Input[pulumi.InputType['SubResourceArgs']] peer_express_route_circuit_peering: Reference to Express Route Circuit Private Peering Resource of the peered circuit.
:param pulumi.Input[str] peering_name: The name of the peering.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ExpressRouteCircuitConnectionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Express Route Circuit Connection in an ExpressRouteCircuitPeering resource.
:param str resource_name: The name of the resource.
:param ExpressRouteCircuitConnectionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ExpressRouteCircuitConnectionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
address_prefix: Optional[pulumi.Input[str]] = None,
authorization_key: Optional[pulumi.Input[str]] = None,
circuit_name: Optional[pulumi.Input[str]] = None,
connection_name: Optional[pulumi.Input[str]] = None,
express_route_circuit_peering: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
peer_express_route_circuit_peering: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
peering_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ExpressRouteCircuitConnectionArgs.__new__(ExpressRouteCircuitConnectionArgs)
__props__.__dict__["address_prefix"] = address_prefix
__props__.__dict__["authorization_key"] = authorization_key
if circuit_name is None and not opts.urn:
raise TypeError("Missing required property 'circuit_name'")
__props__.__dict__["circuit_name"] = circuit_name
__props__.__dict__["connection_name"] = connection_name
__props__.__dict__["express_route_circuit_peering"] = express_route_circuit_peering
__props__.__dict__["id"] = id
__props__.__dict__["name"] = name
__props__.__dict__["peer_express_route_circuit_peering"] = peer_express_route_circuit_peering
if peering_name is None and not opts.urn:
raise TypeError("Missing required property 'peering_name'")
__props__.__dict__["peering_name"] = peering_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["circuit_connection_status"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20190701:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20180201:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20180201:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20180401:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20180401:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20180601:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20180601:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20180701:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20180701:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20180801:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20180801:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20181001:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20181001:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20181101:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20181101:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20181201:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20181201:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20190201:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20190201:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20190401:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20190401:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20190601:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20190601:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20190801:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20190801:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20190901:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20190901:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20191101:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20191101:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20191201:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20191201:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20200301:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20200301:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20200401:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20200401:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20200501:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20200501:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20200601:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20200601:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20200701:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20200701:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20200801:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20200801:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20201101:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20201101:ExpressRouteCircuitConnection")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ExpressRouteCircuitConnection, __self__).__init__(
'azure-native:network/v20190701:ExpressRouteCircuitConnection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ExpressRouteCircuitConnection':
"""
Get an existing ExpressRouteCircuitConnection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ExpressRouteCircuitConnectionArgs.__new__(ExpressRouteCircuitConnectionArgs)
__props__.__dict__["address_prefix"] = None
__props__.__dict__["authorization_key"] = None
__props__.__dict__["circuit_connection_status"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["express_route_circuit_peering"] = None
__props__.__dict__["name"] = None
__props__.__dict__["peer_express_route_circuit_peering"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
return ExpressRouteCircuitConnection(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="addressPrefix")
def address_prefix(self) -> pulumi.Output[Optional[str]]:
"""
/29 IP address space to carve out Customer addresses for tunnels.
"""
return pulumi.get(self, "address_prefix")
@property
@pulumi.getter(name="authorizationKey")
def authorization_key(self) -> pulumi.Output[Optional[str]]:
"""
The authorization key.
"""
return pulumi.get(self, "authorization_key")
@property
@pulumi.getter(name="circuitConnectionStatus")
def circuit_connection_status(self) -> pulumi.Output[str]:
"""
Express Route Circuit connection state.
"""
return pulumi.get(self, "circuit_connection_status")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="expressRouteCircuitPeering")
def express_route_circuit_peering(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]:
"""
Reference to Express Route Circuit Private Peering Resource of the circuit initiating connection.
"""
return pulumi.get(self, "express_route_circuit_peering")
@property
@pulumi.getter
def name(self) -> pulumi.Output[Optional[str]]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="peerExpressRouteCircuitPeering")
def peer_express_route_circuit_peering(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]:
"""
Reference to Express Route Circuit Private Peering Resource of the peered circuit.
"""
return pulumi.get(self, "peer_express_route_circuit_peering")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the express route circuit connection resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Type of the resource.
"""
return pulumi.get(self, "type")
| 56.423377 | 4,005 | 0.704185 |
acf7e25e17bf1db2ad4a9aa121d9ed45239677da | 5,115 | py | Python | tests/tests_internal_AnkiDeckNote.py | AnonymerNiklasistanonym/Md2Anki | 57bac3906d6a1f8594d0c974eb55f64e217ec954 | [
"MIT"
] | 3 | 2021-03-31T17:25:25.000Z | 2022-01-18T17:48:37.000Z | tests/tests_internal_AnkiDeckNote.py | AnonymerNiklasistanonym/Md2Anki | 57bac3906d6a1f8594d0c974eb55f64e217ec954 | [
"MIT"
] | 3 | 2021-04-26T10:41:15.000Z | 2021-10-21T08:47:31.000Z | tests/tests_internal_AnkiDeckNote.py | AnonymerNiklasistanonym/Md2Anki | 57bac3906d6a1f8594d0c974eb55f64e217ec954 | [
"MIT"
] | 1 | 2022-03-12T18:33:11.000Z | 2022-03-12T18:33:11.000Z | import sys
import re
sys.path.append("../src/md2anki/")
import md2anki
def test_regex_image_file():
def check_regex_image_file(
string="",
expected_alt_texts=[],
expected_source_paths=[],
expected_widths=[],
expected_heights=[],
):
"""Check if the recognized image file infos are as expected"""
matches = re.findall(md2anki.REGEX_MD_IMAGE_FILE, string)
assert len(matches) == len(
expected_alt_texts
), f"{len(matches)=}{len(expected_alt_texts)=}"
assert len(matches) == len(
expected_source_paths
), f"{len(matches)=}{len(expected_source_paths)=}"
assert len(matches) == len(
expected_widths
), f"{len(matches)=}{len(expected_widths)=}"
assert len(matches) == len(
expected_heights
), f"{len(matches)=}{len(expected_heights)=}"
for (
match,
expected_alt_text,
expected_source_path,
expected_width,
expected_height,
) in zip(
matches,
expected_alt_texts,
expected_source_paths,
expected_widths,
expected_heights,
):
assert match[0] == expected_alt_text, f"{match[0]=}{expected_alt_text=}"
assert (
match[1] == expected_source_path
), f"{match[1]=}{expected_source_path=}"
assert match[2] == expected_width, f"{match[2]=}{expected_width=}"
assert match[3] == expected_height, f"{match[3]=}{expected_height=}"
check_regex_image_file()
check_regex_image_file(
"{ width=100px, height=200px }",
expected_alt_texts=["alt text"],
expected_source_paths=["source path"],
expected_widths=["100px"],
expected_heights=["200px"],
)
check_regex_image_file(
"{}",
expected_alt_texts=["alt text"],
expected_source_paths=["source path"],
expected_widths=[""],
expected_heights=[""],
)
check_regex_image_file(
"",
expected_alt_texts=["alt text"],
expected_source_paths=["source path"],
expected_widths=[""],
expected_heights=[""],
)
check_regex_image_file(
"{ height=20px }",
expected_alt_texts=["alt"],
expected_source_paths=["./source.png"],
expected_widths=[""],
expected_heights=["20px"],
)
check_regex_image_file(
" ",
expected_alt_texts=["alt text", "alt text2"],
expected_source_paths=["source path", "source path2"],
expected_widths=["", ""],
expected_heights=["", ""],
)
def test_regex_tag():
def check_regex_tag(
string="",
expected_tag_list_strings=[],
):
"""Check if the recognized tag infos are as expected"""
matches = re.findall(md2anki.REGEX_MD_TAG, string)
assert len(matches) == len(
expected_tag_list_strings
), f"{len(matches)=}{len(expected_tag_list_strings)=}"
for match, expected_tag_list_string in zip(matches, expected_tag_list_strings):
assert (
match[0] == expected_tag_list_string
), f"{match[0]=}{expected_tag_list_string=}"
check_regex_tag()
check_regex_tag(
"`{=:tag list string:=}`",
expected_tag_list_strings=["tag string list"],
)
def test_get_used_files():
def check_used_files(
expected_used_files=set(),
question_string="",
answer_string="",
):
"""Check if the recognized used files are the expected ones"""
note = md2anki.AnkiDeckNote(
question=question_string,
answer=answer_string,
)
note_used_files = note.get_used_files()
assert (
len(note_used_files.difference(expected_used_files)) == 0
), f"{note_used_files=}{expected_used_files=}{note_used_files.difference(expected_used_files)=}"
check_used_files()
check_used_files(
question_string="abc",
)
check_used_files(
expected_used_files=set(["path1"]),
question_string="",
)
check_used_files(
expected_used_files=set(["path1"]),
question_string="hi\n",
)
check_used_files(
expected_used_files=set(["path1", "path2"]),
question_string="hi\n\n",
)
# Check if answer strings are also searched
check_used_files(
expected_used_files=set(["path1", "path2"]),
answer_string=" ",
)
# Check if URLs are ignored
check_used_files(
expected_used_files=set(["path1", "path2"]),
question_string="  ![https://www.google.com/image.png]",
)
if __name__ == "__main__":
test_regex_image_file()
test_get_used_files()
print("Everything passed [internal AnkiDeckNote]")
| 32.373418 | 104 | 0.592571 |
acf7e338ddbfe7ded57d36b67db54cbf4fa5cdd2 | 774 | py | Python | DjangoTemplate/polls/models.py | hsuyeemon/Testing | 3ff0e46baa9ce8db446d44cfc10b0cc8ef3a4ef0 | [
"Apache-2.0"
] | 1 | 2020-02-18T06:06:24.000Z | 2020-02-18T06:06:24.000Z | DjangoTemplate/polls/models.py | hsuyeemon/Testing | 3ff0e46baa9ce8db446d44cfc10b0cc8ef3a4ef0 | [
"Apache-2.0"
] | 4 | 2021-05-10T18:47:55.000Z | 2022-02-26T19:48:52.000Z | DjangoTemplate/polls/models.py | hsuyeemon/Testing | 3ff0e46baa9ce8db446d44cfc10b0cc8ef3a4ef0 | [
"Apache-2.0"
] | null | null | null | from django.db import models
# Create your models here.
#from neomodel import StructuredNode, StringProperty, DateProperty
from neomodel import (config, StructuredNode, StringProperty, IntegerProperty,
UniqueIdProperty, RelationshipTo)
#class Book(StructuredNode):
#title = StringProperty(unique_index=True)
#published = DateProperty()
class Venue(models.Model):
name = models.CharField('Venue Name', max_length=120)
address = models.CharField(max_length=300)
zip_code = models.CharField('Zip/Post Code', max_length=12)
phone = models.CharField('Contact Phone', max_length=20)
web = models.URLField('Web Address')
email_address = models.EmailField('Email Address')
def __str__(self):
return self.name | 36.857143 | 78 | 0.724806 |
acf7e445951b2739aa9fc214996af5d184117adf | 6,171 | py | Python | blockchain.py | 14cam31/Blockchain | f5e807d6b43e6e9c291ea461ec2d5de0bfd0ea5c | [
"MIT"
] | 1 | 2019-01-24T20:58:51.000Z | 2019-01-24T20:58:51.000Z | blockchain.py | 14cam31/Blockchain | f5e807d6b43e6e9c291ea461ec2d5de0bfd0ea5c | [
"MIT"
] | 2 | 2019-02-12T16:58:17.000Z | 2019-02-12T17:29:02.000Z | blockchain.py | 14cam31/Blockchain | f5e807d6b43e6e9c291ea461ec2d5de0bfd0ea5c | [
"MIT"
] | 1 | 2019-02-12T13:56:40.000Z | 2019-02-12T13:56:40.000Z | from time import time
from urllib.parse import urlparse
from uuid import uuid4
from flask import Flask, jsonify, request
import hashlib
import requests
import json
class Blockchain:
def __init__(self):
self.current_transactions = []
self.chain = []
self.nodes = set()
self.new_block(previous_hash=1, proof=100)
def new_block(self, previous_hash, proof):
block = {
"index": len(self.chain)+1,
"timestamp": time(),
"transactions": self.current_transactions,
"proof": proof,
"previous_hash": previous_hash
}
self.current_transactions = []
self.chain.append(block)
return block
def new_transaction(self, sender, reciever, amount):
transaction = {
"sender": sender,
"reciever": reciever,
"amount": amount
}
self.current_transactions.append(transaction)
return self.last_block["index"]+1
def register_node(self, address):
url = urlparse(address)
if url.net_loc:
self.nodes.add(url.net_loc)
elif url.path:
self.nodes.add(url.path)
else:
raise ValueError("invalid url or address")
def check_chain(self, chain):
last_block = chain[0]
current_index = 1
while current_index<len(chain):
current_block = chain[current_index]
if current_block["previous_hash"] != self.__hash__(last_block):
return False
if not self.check_proof(current_block["proof"], last_block["proof"], self.hash(last_block)):
return False
last_block = current_block
current_index += 1
return True
def resolve_conflicts(self):
min_length = len(self.chain)
correct_chain = None
for node in self.nodes:
response = requests.get(f'http://{node}/chain')
if response.status_code == 200:
if response.json()["length"] > min_length and self.check_chain(chain):
min_length = response.json()["length"]
correct_chain = response.json()["chain"]
if correct_chain:
self.chain = correct_chain
return True
return False
def proof_of_work(self, last_block):
proof = 0
while not self.valid_proof(last_block["proof"], proof, self.hash(last_block)):
proof += 1
return proof
def get_balance(self, person):
coins = 0
for block in self.chain
for transaction in block["transactions"]:
if transaction["sender"] == person:
coins -= transaction["amount"]
if transaction["reciever"] == person:
coins += transaction["amount"]
for transaction in self.current_transactions:
if transaction["sender"] == person:
coins -= transaction["amount"]
if transaction["reciever"] == person:
coins += transaction["amount"]
return coins
def check_transaction(self, sender, amount):
return get_balance(sender) >= amount
@staticmethod
def valid_proof(last_proof, proof, last_hash):
guess = f'{last_proof}{proof}{last_hash}'.encode()
guess_hash = hashlib.sha256(guess).hexdigest()
return guess_hash[:4] == "0000"
@staticmethod
def hash(block):
block_string = json.dumps(block, sort_keys=True).encode()
return hashlib.sha256(block_string).hexdigest()
@property
def last_block(self):
return self.chain[-1]
app = Flask(__name__)
node_identifier = str(uuid4()).replace("-", "")
blockchain = Blockchain()
@app.route("/mine", methods=["GET"])
def mine():
last_block = blockchain.last_block
proof = blockchain.proof_of_work(last_block)
previous_hash = blockchain.hash(last_block)
block = blockchain.new_block(proof, previous_hash)
blockchain.new_transaction(sender=0, reciever=node_identifier, amount=1)
response = {
"message": "New block added",
"index": block["index"],
"transactions": block["transactions"],
"proof": block["proof"],
"previous_hash": block["previous_hash"]
}
return jsonify(response), 200
@app.route("/transactions/new", methods=["POST"])
def new_transaction():
values = request.get_json()
required = ["sender", "reciever", "amount"]
if not all(k in values for k in required):
return "Missing a value", 400
index = blockchain.new_transaction(values["sender"], values["reciever"], values["amount"])
response = {
"message": f"Your transaction will be added to block number {index}"
}
return jsonify(response), 201
@app.route("/chain", methods=["GET"])
def get_chain():
response = {
"chain": blockchain.chain,
"length": len(blockchain.chain)
}
return jsonify(response), 200
@app.route("/nodes/register", methods=["POST"])
def register_nodes():
values = request.get_json()
nodes = values.get("nodes")
if nodes is None:
return "please supply a node or list of nodes", 400
for node in nodes:
blockchain.register_node(node)
response = {
"message": "node(s) added",
"num_nodes": list(blockchain.nodes)
}
return jsonify(response), 401
@app.route("/nodes/resolve", methods=["GET"])
def create_consesus():
replaced = blockchain.resolve_conflicts()
if replaced:
response = {
"message": "chain was replaced",
"new_chain": blockchain.chain
}
else:
response = {
"message": "chain was correct",
"chain": blockchain.chain
}
return jsonify(response), 200
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('-p', '--port', default=5000, type=int, help='port to listen on')
args = parser.parse_args()
port = args.port
app.run(host='0.0.0.0', port=port) | 24.883065 | 104 | 0.597472 |
acf7e4edc57bb3728da639cd6b9c7ba7d49b5c38 | 41,910 | py | Python | lldb/third_party/Python/module/unittest2/unittest2/test/test_case.py | medismailben/llvm-project | e334a839032fe500c3bba22bf976ab7af13ce1c1 | [
"Apache-2.0"
] | 2,338 | 2018-06-19T17:34:51.000Z | 2022-03-31T11:00:37.000Z | third_party/Python/module/unittest2/unittest2/test/test_case.py | DalavanCloud/lldb | e913eaf2468290fb94c767d474d611b41a84dd69 | [
"Apache-2.0"
] | 3,740 | 2019-01-23T15:36:48.000Z | 2022-03-31T22:01:13.000Z | third_party/Python/module/unittest2/unittest2/test/test_case.py | DalavanCloud/lldb | e913eaf2468290fb94c767d474d611b41a84dd69 | [
"Apache-2.0"
] | 500 | 2019-01-23T07:49:22.000Z | 2022-03-30T02:59:37.000Z | import difflib
import pprint
import re
import six
from copy import deepcopy
import unittest2
from unittest2.test.support import (
OldTestResult, EqualityMixin, HashingMixin, LoggingResult
)
class MyException(Exception):
pass
class Test(object):
"Keep these TestCase classes out of the main namespace"
class Foo(unittest2.TestCase):
def runTest(self): pass
def test1(self): pass
class Bar(Foo):
def test2(self): pass
class LoggingTestCase(unittest2.TestCase):
"""A test case which logs its calls."""
def __init__(self, events):
super(Test.LoggingTestCase, self).__init__('test')
self.events = events
def setUp(self):
self.events.append('setUp')
def test(self):
self.events.append('test')
def tearDown(self):
self.events.append('tearDown')
class TestCleanUp(unittest2.TestCase):
def testCleanUp(self):
class TestableTest(unittest2.TestCase):
def testNothing(self):
pass
test = TestableTest('testNothing')
self.assertEqual(test._cleanups, [])
cleanups = []
def cleanup1(*args, **kwargs):
cleanups.append((1, args, kwargs))
def cleanup2(*args, **kwargs):
cleanups.append((2, args, kwargs))
test.addCleanup(cleanup1, 1, 2, 3, four='hello', five='goodbye')
test.addCleanup(cleanup2)
self.assertEqual(
test._cleanups, [
(cleanup1, (1, 2, 3), dict(
four='hello', five='goodbye')), (cleanup2, (), {})])
result = test.doCleanups()
self.assertTrue(result)
self.assertEqual(
cleanups, [
(2, (), {}), (1, (1, 2, 3), dict(
four='hello', five='goodbye'))])
def testCleanUpWithErrors(self):
class TestableTest(unittest2.TestCase):
def testNothing(self):
pass
class MockResult(object):
errors = []
def addError(self, test, exc_info):
self.errors.append((test, exc_info))
result = MockResult()
test = TestableTest('testNothing')
test._resultForDoCleanups = result
exc1 = Exception('foo')
exc2 = Exception('bar')
def cleanup1():
raise exc1
def cleanup2():
raise exc2
test.addCleanup(cleanup1)
test.addCleanup(cleanup2)
self.assertFalse(test.doCleanups())
(test1, (Type1, instance1, _)), (test2,
(Type2, instance2, _)) = reversed(MockResult.errors)
self.assertEqual((test1, Type1, instance1), (test, Exception, exc1))
self.assertEqual((test2, Type2, instance2), (test, Exception, exc2))
def testCleanupInRun(self):
blowUp = False
ordering = []
class TestableTest(unittest2.TestCase):
def setUp(self):
ordering.append('setUp')
if blowUp:
raise Exception('foo')
def testNothing(self):
ordering.append('test')
def tearDown(self):
ordering.append('tearDown')
test = TestableTest('testNothing')
def cleanup1():
ordering.append('cleanup1')
def cleanup2():
ordering.append('cleanup2')
test.addCleanup(cleanup1)
test.addCleanup(cleanup2)
def success(some_test):
self.assertEqual(some_test, test)
ordering.append('success')
result = unittest2.TestResult()
result.addSuccess = success
test.run(result)
self.assertEqual(ordering, ['setUp', 'test', 'tearDown',
'cleanup2', 'cleanup1', 'success'])
blowUp = True
ordering = []
test = TestableTest('testNothing')
test.addCleanup(cleanup1)
test.run(result)
self.assertEqual(ordering, ['setUp', 'cleanup1'])
def testTestCaseDebugExecutesCleanups(self):
ordering = []
class TestableTest(unittest2.TestCase):
def setUp(self):
ordering.append('setUp')
self.addCleanup(cleanup1)
def testNothing(self):
ordering.append('test')
def tearDown(self):
ordering.append('tearDown')
test = TestableTest('testNothing')
def cleanup1():
ordering.append('cleanup1')
test.addCleanup(cleanup2)
def cleanup2():
ordering.append('cleanup2')
test.debug()
self.assertEqual(
ordering, [
'setUp', 'test', 'tearDown', 'cleanup1', 'cleanup2'])
class Test_TestCase(unittest2.TestCase, EqualityMixin, HashingMixin):
# Set up attributes used by inherited tests
################################################################
# Used by HashingMixin.test_hash and EqualityMixin.test_eq
eq_pairs = [(Test.Foo('test1'), Test.Foo('test1'))]
# Used by EqualityMixin.test_ne
ne_pairs = [(Test.Foo('test1'), Test.Foo('runTest')),
(Test.Foo('test1'), Test.Bar('test1')),
(Test.Foo('test1'), Test.Bar('test2'))]
################################################################
# /Set up attributes used by inherited tests
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
# ...
# "methodName defaults to "runTest"."
#
# Make sure it really is optional, and that it defaults to the proper
# thing.
def test_init__no_test_name(self):
class Test(unittest2.TestCase):
def runTest(self): raise MyException()
def test(self): pass
self.assertEqual(Test().id()[-13:], '.Test.runTest')
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
def test_init__test_name__valid(self):
class Test(unittest2.TestCase):
def runTest(self): raise MyException()
def test(self): pass
self.assertEqual(Test('test').id()[-10:], '.Test.test')
# "class unittest2.TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
def test_init__test_name__invalid(self):
class Test(unittest2.TestCase):
def runTest(self): raise MyException()
def test(self): pass
try:
Test('testfoo')
except ValueError:
pass
else:
self.fail("Failed to raise ValueError")
# "Return the number of tests represented by the this test object. For
# TestCase instances, this will always be 1"
def test_countTestCases(self):
class Foo(unittest2.TestCase):
def test(self): pass
self.assertEqual(Foo('test').countTestCases(), 1)
# "Return the default type of test result object to be used to run this
# test. For TestCase instances, this will always be
# unittest2.TestResult; subclasses of TestCase should
# override this as necessary."
def test_defaultTestResult(self):
class Foo(unittest2.TestCase):
def runTest(self):
pass
result = Foo().defaultTestResult()
self.assertEqual(type(result), unittest2.TestResult)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if setUp() raises
# an exception.
def test_run_call_order__error_in_setUp(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def setUp(self):
super(Foo, self).setUp()
raise RuntimeError('raised by Foo.setUp')
Foo(events).run(result)
expected = ['startTest', 'setUp', 'addError', 'stopTest']
self.assertEqual(events, expected)
# "With a temporary result stopTestRun is called when setUp errors.
def test_run_call_order__error_in_setUp_default_result(self):
events = []
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def setUp(self):
super(Foo, self).setUp()
raise RuntimeError('raised by Foo.setUp')
Foo(events).run()
expected = ['startTestRun', 'startTest', 'setUp', 'addError',
'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test raises
# an error (as opposed to a failure).
def test_run_call_order__error_in_test(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
raise RuntimeError('raised by Foo.test')
expected = ['startTest', 'setUp', 'test', 'addError', 'tearDown',
'stopTest']
Foo(events).run(result)
self.assertEqual(events, expected)
# "With a default result, an error in the test still results in stopTestRun
# being called."
def test_run_call_order__error_in_test_default_result(self):
events = []
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def test(self):
super(Foo, self).test()
raise RuntimeError('raised by Foo.test')
expected = ['startTestRun', 'startTest', 'setUp', 'test', 'addError',
'tearDown', 'stopTest', 'stopTestRun']
Foo(events).run()
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test signals
# a failure (as opposed to an error).
def test_run_call_order__failure_in_test(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
self.fail('raised by Foo.test')
expected = ['startTest', 'setUp', 'test', 'addFailure', 'tearDown',
'stopTest']
Foo(events).run(result)
self.assertEqual(events, expected)
# "When a test fails with a default result stopTestRun is still called."
def test_run_call_order__failure_in_test_default_result(self):
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def test(self):
super(Foo, self).test()
self.fail('raised by Foo.test')
expected = ['startTestRun', 'startTest', 'setUp', 'test', 'addFailure',
'tearDown', 'stopTest', 'stopTestRun']
events = []
Foo(events).run()
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if tearDown() raises
# an exception.
def test_run_call_order__error_in_tearDown(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def tearDown(self):
super(Foo, self).tearDown()
raise RuntimeError('raised by Foo.tearDown')
Foo(events).run(result)
expected = ['startTest', 'setUp', 'test', 'tearDown', 'addError',
'stopTest']
self.assertEqual(events, expected)
# "When tearDown errors with a default result stopTestRun is still called."
def test_run_call_order__error_in_tearDown_default_result(self):
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def tearDown(self):
super(Foo, self).tearDown()
raise RuntimeError('raised by Foo.tearDown')
events = []
Foo(events).run()
expected = ['startTestRun', 'startTest', 'setUp', 'test', 'tearDown',
'addError', 'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
# "TestCase.run() still works when the defaultTestResult is a TestResult
# that does not support startTestRun and stopTestRun.
def test_run_call_order_default_result(self):
class Foo(unittest2.TestCase):
def defaultTestResult(self):
return OldTestResult()
def test(self):
pass
Foo('test').run()
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework. The initial value of this
# attribute is AssertionError"
def test_failureException__default(self):
class Foo(unittest2.TestCase):
def test(self):
pass
self.assertTrue(Foo('test').failureException is AssertionError)
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework."
#
# Make sure TestCase.run() respects the designated failureException
def test_failureException__subclassing__explicit_raise(self):
events = []
result = LoggingResult(events)
class Foo(unittest2.TestCase):
def test(self):
raise RuntimeError()
failureException = RuntimeError
self.assertTrue(Foo('test').failureException is RuntimeError)
Foo('test').run(result)
expected = ['startTest', 'addFailure', 'stopTest']
self.assertEqual(events, expected)
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework."
#
# Make sure TestCase.run() respects the designated failureException
def test_failureException__subclassing__implicit_raise(self):
events = []
result = LoggingResult(events)
class Foo(unittest2.TestCase):
def test(self):
self.fail("foo")
failureException = RuntimeError
self.assertTrue(Foo('test').failureException is RuntimeError)
Foo('test').run(result)
expected = ['startTest', 'addFailure', 'stopTest']
self.assertEqual(events, expected)
# "The default implementation does nothing."
def test_setUp(self):
class Foo(unittest2.TestCase):
def runTest(self):
pass
# ... and nothing should happen
Foo().setUp()
# "The default implementation does nothing."
def test_tearDown(self):
class Foo(unittest2.TestCase):
def runTest(self):
pass
# ... and nothing should happen
Foo().tearDown()
# "Return a string identifying the specific test case."
#
# Because of the vague nature of the docs, I'm not going to lock this
# test down too much. Really all that can be asserted is that the id()
# will be a string (either 8-byte or unicode -- again, because the docs
# just say "string")
def test_id(self):
class Foo(unittest2.TestCase):
def runTest(self):
pass
self.assertIsInstance(Foo().id(), six.string_types)
# "If result is omitted or None, a temporary result object is created
# and used, but is not made available to the caller. As TestCase owns the
# temporary result startTestRun and stopTestRun are called.
def test_run__uses_defaultTestResult(self):
events = []
class Foo(unittest2.TestCase):
def test(self):
events.append('test')
def defaultTestResult(self):
return LoggingResult(events)
# Make run() find a result object on its own
Foo('test').run()
expected = ['startTestRun', 'startTest', 'test', 'addSuccess',
'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
def testShortDescriptionWithoutDocstring(self):
self.assertIsNone(self.shortDescription())
def testShortDescriptionWithOneLineDocstring(self):
"""Tests shortDescription() for a method with a docstring."""
self.assertEqual(
self.shortDescription(),
'Tests shortDescription() for a method with a docstring.')
def testShortDescriptionWithMultiLineDocstring(self):
"""Tests shortDescription() for a method with a longer docstring.
This method ensures that only the first line of a docstring is
returned used in the short description, no matter how long the
whole thing is.
"""
self.assertEqual(
self.shortDescription(),
'Tests shortDescription() for a method with a longer '
'docstring.')
def testAddTypeEqualityFunc(self):
class SadSnake(object):
"""Dummy class for test_addTypeEqualityFunc."""
s1, s2 = SadSnake(), SadSnake()
self.assertNotEqual(s1, s2)
def AllSnakesCreatedEqual(a, b, msg=None):
return type(a) is type(b) is SadSnake
self.addTypeEqualityFunc(SadSnake, AllSnakesCreatedEqual)
self.assertEqual(s1, s2)
# No this doesn't clean up and remove the SadSnake equality func
# from this TestCase instance but since its a local nothing else
# will ever notice that.
def testAssertIs(self):
thing = object()
self.assertIs(thing, thing)
self.assertRaises(
self.failureException,
self.assertIs,
thing,
object())
def testAssertIsNot(self):
thing = object()
self.assertIsNot(thing, object())
self.assertRaises(
self.failureException,
self.assertIsNot,
thing,
thing)
def testAssertIsInstance(self):
thing = []
self.assertIsInstance(thing, list)
self.assertRaises(self.failureException, self.assertIsInstance,
thing, dict)
def testAssertNotIsInstance(self):
thing = []
self.assertNotIsInstance(thing, dict)
self.assertRaises(self.failureException, self.assertNotIsInstance,
thing, list)
def testAssertIn(self):
animals = {'monkey': 'banana', 'cow': 'grass', 'seal': 'fish'}
self.assertIn('a', 'abc')
self.assertIn(2, [1, 2, 3])
self.assertIn('monkey', animals)
self.assertNotIn('d', 'abc')
self.assertNotIn(0, [1, 2, 3])
self.assertNotIn('otter', animals)
self.assertRaises(self.failureException, self.assertIn, 'x', 'abc')
self.assertRaises(self.failureException, self.assertIn, 4, [1, 2, 3])
self.assertRaises(self.failureException, self.assertIn, 'elephant',
animals)
self.assertRaises(self.failureException, self.assertNotIn, 'c', 'abc')
self.assertRaises(
self.failureException, self.assertNotIn, 1, [
1, 2, 3])
self.assertRaises(self.failureException, self.assertNotIn, 'cow',
animals)
def testAssertDictContainsSubset(self):
self.assertDictContainsSubset({}, {})
self.assertDictContainsSubset({}, {'a': 1})
self.assertDictContainsSubset({'a': 1}, {'a': 1})
self.assertDictContainsSubset({'a': 1}, {'a': 1, 'b': 2})
self.assertDictContainsSubset({'a': 1, 'b': 2}, {'a': 1, 'b': 2})
self.assertRaises(unittest2.TestCase.failureException,
self.assertDictContainsSubset, {'a': 2}, {'a': 1},
'.*Mismatched values:.*')
self.assertRaises(unittest2.TestCase.failureException,
self.assertDictContainsSubset, {'c': 1}, {'a': 1},
'.*Missing:.*')
self.assertRaises(unittest2.TestCase.failureException,
self.assertDictContainsSubset, {'a': 1, 'c': 1},
{'a': 1}, '.*Missing:.*')
self.assertRaises(unittest2.TestCase.failureException,
self.assertDictContainsSubset, {'a': 1, 'c': 1},
{'a': 1}, '.*Missing:.*Mismatched values:.*')
self.assertRaises(self.failureException,
self.assertDictContainsSubset, {1: "one"}, {})
def testAssertEqual(self):
equal_pairs = [
((), ()),
({}, {}),
([], []),
(set(), set()),
(frozenset(), frozenset())]
for a, b in equal_pairs:
# This mess of try excepts is to test the assertEqual behavior
# itself.
try:
self.assertEqual(a, b)
except self.failureException:
self.fail('assertEqual(%r, %r) failed' % (a, b))
try:
self.assertEqual(a, b, msg='foo')
except self.failureException:
self.fail('assertEqual(%r, %r) with msg= failed' % (a, b))
try:
self.assertEqual(a, b, 'foo')
except self.failureException:
self.fail('assertEqual(%r, %r) with third parameter failed' %
(a, b))
unequal_pairs = [
((), []),
({}, set()),
(set([4, 1]), frozenset([4, 2])),
(frozenset([4, 5]), set([2, 3])),
(set([3, 4]), set([5, 4]))]
for a, b in unequal_pairs:
self.assertRaises(self.failureException, self.assertEqual, a, b)
self.assertRaises(self.failureException, self.assertEqual, a, b,
'foo')
self.assertRaises(self.failureException, self.assertEqual, a, b,
msg='foo')
def testEquality(self):
self.assertListEqual([], [])
self.assertTupleEqual((), ())
self.assertSequenceEqual([], ())
a = [0, 'a', []]
b = []
self.assertRaises(unittest2.TestCase.failureException,
self.assertListEqual, a, b)
self.assertRaises(unittest2.TestCase.failureException,
self.assertListEqual, tuple(a), tuple(b))
self.assertRaises(unittest2.TestCase.failureException,
self.assertSequenceEqual, a, tuple(b))
b.extend(a)
self.assertListEqual(a, b)
self.assertTupleEqual(tuple(a), tuple(b))
self.assertSequenceEqual(a, tuple(b))
self.assertSequenceEqual(tuple(a), b)
self.assertRaises(self.failureException, self.assertListEqual,
a, tuple(b))
self.assertRaises(self.failureException, self.assertTupleEqual,
tuple(a), b)
self.assertRaises(self.failureException, self.assertListEqual, None, b)
self.assertRaises(self.failureException, self.assertTupleEqual, None,
tuple(b))
self.assertRaises(self.failureException, self.assertSequenceEqual,
None, tuple(b))
self.assertRaises(self.failureException, self.assertListEqual, 1, 1)
self.assertRaises(self.failureException, self.assertTupleEqual, 1, 1)
self.assertRaises(self.failureException, self.assertSequenceEqual,
1, 1)
self.assertDictEqual({}, {})
c = {'x': 1}
d = {}
self.assertRaises(unittest2.TestCase.failureException,
self.assertDictEqual, c, d)
d.update(c)
self.assertDictEqual(c, d)
d['x'] = 0
self.assertRaises(unittest2.TestCase.failureException,
self.assertDictEqual, c, d, 'These are unequal')
self.assertRaises(self.failureException, self.assertDictEqual, None, d)
self.assertRaises(self.failureException, self.assertDictEqual, [], d)
self.assertRaises(self.failureException, self.assertDictEqual, 1, 1)
def testAssertItemsEqual(self):
self.assertItemsEqual([1, 2, 3], [3, 2, 1])
self.assertItemsEqual(['foo', 'bar', 'baz'], ['bar', 'baz', 'foo'])
self.assertRaises(self.failureException, self.assertItemsEqual,
[10], [10, 11])
self.assertRaises(self.failureException, self.assertItemsEqual,
[10, 11], [10])
self.assertRaises(self.failureException, self.assertItemsEqual,
[10, 11, 10], [10, 11])
# Test that sequences of unhashable objects can be tested for sameness:
self.assertItemsEqual([[1, 2], [3, 4]], [[3, 4], [1, 2]])
self.assertItemsEqual([{'a': 1}, {'b': 2}], [{'b': 2}, {'a': 1}])
self.assertRaises(self.failureException, self.assertItemsEqual,
[[1]], [[2]])
# Test unsortable objects
self.assertItemsEqual([2j, None], [None, 2j])
self.assertRaises(self.failureException, self.assertItemsEqual,
[2j, None], [None, 3j])
def testAssertSetEqual(self):
set1 = set()
set2 = set()
self.assertSetEqual(set1, set2)
self.assertRaises(
self.failureException,
self.assertSetEqual,
None,
set2)
self.assertRaises(self.failureException, self.assertSetEqual, [], set2)
self.assertRaises(
self.failureException,
self.assertSetEqual,
set1,
None)
self.assertRaises(self.failureException, self.assertSetEqual, set1, [])
set1 = set(['a'])
set2 = set()
self.assertRaises(
self.failureException,
self.assertSetEqual,
set1,
set2)
set1 = set(['a'])
set2 = set(['a'])
self.assertSetEqual(set1, set2)
set1 = set(['a'])
set2 = set(['a', 'b'])
self.assertRaises(
self.failureException,
self.assertSetEqual,
set1,
set2)
set1 = set(['a'])
set2 = frozenset(['a', 'b'])
self.assertRaises(
self.failureException,
self.assertSetEqual,
set1,
set2)
set1 = set(['a', 'b'])
set2 = frozenset(['a', 'b'])
self.assertSetEqual(set1, set2)
set1 = set()
set2 = "foo"
self.assertRaises(
self.failureException,
self.assertSetEqual,
set1,
set2)
self.assertRaises(
self.failureException,
self.assertSetEqual,
set2,
set1)
# make sure any string formatting is tuple-safe
set1 = set([(0, 1), (2, 3)])
set2 = set([(4, 5)])
self.assertRaises(
self.failureException,
self.assertSetEqual,
set1,
set2)
def testInequality(self):
# Try ints
self.assertGreater(2, 1)
self.assertGreaterEqual(2, 1)
self.assertGreaterEqual(1, 1)
self.assertLess(1, 2)
self.assertLessEqual(1, 2)
self.assertLessEqual(1, 1)
self.assertRaises(self.failureException, self.assertGreater, 1, 2)
self.assertRaises(self.failureException, self.assertGreater, 1, 1)
self.assertRaises(self.failureException, self.assertGreaterEqual, 1, 2)
self.assertRaises(self.failureException, self.assertLess, 2, 1)
self.assertRaises(self.failureException, self.assertLess, 1, 1)
self.assertRaises(self.failureException, self.assertLessEqual, 2, 1)
# Try Floats
self.assertGreater(1.1, 1.0)
self.assertGreaterEqual(1.1, 1.0)
self.assertGreaterEqual(1.0, 1.0)
self.assertLess(1.0, 1.1)
self.assertLessEqual(1.0, 1.1)
self.assertLessEqual(1.0, 1.0)
self.assertRaises(self.failureException, self.assertGreater, 1.0, 1.1)
self.assertRaises(self.failureException, self.assertGreater, 1.0, 1.0)
self.assertRaises(
self.failureException,
self.assertGreaterEqual,
1.0,
1.1)
self.assertRaises(self.failureException, self.assertLess, 1.1, 1.0)
self.assertRaises(self.failureException, self.assertLess, 1.0, 1.0)
self.assertRaises(
self.failureException,
self.assertLessEqual,
1.1,
1.0)
# Try Strings
self.assertGreater('bug', 'ant')
self.assertGreaterEqual('bug', 'ant')
self.assertGreaterEqual('ant', 'ant')
self.assertLess('ant', 'bug')
self.assertLessEqual('ant', 'bug')
self.assertLessEqual('ant', 'ant')
self.assertRaises(
self.failureException,
self.assertGreater,
'ant',
'bug')
self.assertRaises(
self.failureException,
self.assertGreater,
'ant',
'ant')
self.assertRaises(
self.failureException,
self.assertGreaterEqual,
'ant',
'bug')
self.assertRaises(self.failureException, self.assertLess, 'bug', 'ant')
self.assertRaises(self.failureException, self.assertLess, 'ant', 'ant')
self.assertRaises(
self.failureException,
self.assertLessEqual,
'bug',
'ant')
# Try Unicode
self.assertGreater(u'bug', u'ant')
self.assertGreaterEqual(u'bug', u'ant')
self.assertGreaterEqual(u'ant', u'ant')
self.assertLess(u'ant', u'bug')
self.assertLessEqual(u'ant', u'bug')
self.assertLessEqual(u'ant', u'ant')
self.assertRaises(
self.failureException,
self.assertGreater,
u'ant',
u'bug')
self.assertRaises(
self.failureException,
self.assertGreater,
u'ant',
u'ant')
self.assertRaises(
self.failureException,
self.assertGreaterEqual,
u'ant',
u'bug')
self.assertRaises(
self.failureException,
self.assertLess,
u'bug',
u'ant')
self.assertRaises(
self.failureException,
self.assertLess,
u'ant',
u'ant')
self.assertRaises(
self.failureException,
self.assertLessEqual,
u'bug',
u'ant')
# Try Mixed String/Unicode
self.assertGreater('bug', u'ant')
self.assertGreater(u'bug', 'ant')
self.assertGreaterEqual('bug', u'ant')
self.assertGreaterEqual(u'bug', 'ant')
self.assertGreaterEqual('ant', u'ant')
self.assertGreaterEqual(u'ant', 'ant')
self.assertLess('ant', u'bug')
self.assertLess(u'ant', 'bug')
self.assertLessEqual('ant', u'bug')
self.assertLessEqual(u'ant', 'bug')
self.assertLessEqual('ant', u'ant')
self.assertLessEqual(u'ant', 'ant')
self.assertRaises(
self.failureException,
self.assertGreater,
'ant',
u'bug')
self.assertRaises(
self.failureException,
self.assertGreater,
u'ant',
'bug')
self.assertRaises(
self.failureException,
self.assertGreater,
'ant',
u'ant')
self.assertRaises(
self.failureException,
self.assertGreater,
u'ant',
'ant')
self.assertRaises(
self.failureException,
self.assertGreaterEqual,
'ant',
u'bug')
self.assertRaises(
self.failureException,
self.assertGreaterEqual,
u'ant',
'bug')
self.assertRaises(
self.failureException,
self.assertLess,
'bug',
u'ant')
self.assertRaises(
self.failureException,
self.assertLess,
u'bug',
'ant')
self.assertRaises(
self.failureException,
self.assertLess,
'ant',
u'ant')
self.assertRaises(
self.failureException,
self.assertLess,
u'ant',
'ant')
self.assertRaises(
self.failureException,
self.assertLessEqual,
'bug',
u'ant')
self.assertRaises(
self.failureException,
self.assertLessEqual,
u'bug',
'ant')
def testAssertMultiLineEqual(self):
sample_text = """\
http://www.python.org/doc/2.3/lib/module-unittest.html
test case
A test case is the smallest unit of testing. [...]
"""
revised_sample_text = """\
http://www.python.org/doc/2.4.1/lib/module-unittest.html
test case
A test case is the smallest unit of testing. [...] You may provide your
own implementation that does not subclass from TestCase, of course.
"""
sample_text_error = """\
- http://www.python.org/doc/2.3/lib/module-unittest.html
? ^
+ http://www.python.org/doc/2.4.1/lib/module-unittest.html
? ^^^
test case
- A test case is the smallest unit of testing. [...]
+ A test case is the smallest unit of testing. [...] You may provide your
? +++++++++++++++++++++
+ own implementation that does not subclass from TestCase, of course.
"""
self.maxDiff = None
for type_changer in (lambda x: x, lambda x: x.decode('utf8')):
try:
self.assertMultiLineEqual(type_changer(sample_text),
type_changer(revised_sample_text))
except self.failureException as e:
# need to remove the first line of the error message
error = str(e).encode('utf8').split('\n', 1)[1]
# assertMultiLineEqual is hooked up as the default for
# unicode strings - so we can't use it for this check
self.assertTrue(sample_text_error == error)
def testAssertSequenceEqualMaxDiff(self):
self.assertEqual(self.maxDiff, 80 * 8)
seq1 = 'a' + 'x' * 80**2
seq2 = 'b' + 'x' * 80**2
diff = '\n'.join(difflib.ndiff(pprint.pformat(seq1).splitlines(),
pprint.pformat(seq2).splitlines()))
# the +1 is the leading \n added by assertSequenceEqual
omitted = unittest2.case.DIFF_OMITTED % (len(diff) + 1,)
self.maxDiff = len(diff) // 2
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException as e:
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertTrue(len(msg) < len(diff))
self.assertIn(omitted, msg)
self.maxDiff = len(diff) * 2
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException as e:
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertTrue(len(msg) > len(diff))
self.assertNotIn(omitted, msg)
self.maxDiff = None
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException as e:
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertTrue(len(msg) > len(diff))
self.assertNotIn(omitted, msg)
def testTruncateMessage(self):
self.maxDiff = 1
message = self._truncateMessage('foo', 'bar')
omitted = unittest2.case.DIFF_OMITTED % len('bar')
self.assertEqual(message, 'foo' + omitted)
self.maxDiff = None
message = self._truncateMessage('foo', 'bar')
self.assertEqual(message, 'foobar')
self.maxDiff = 4
message = self._truncateMessage('foo', 'bar')
self.assertEqual(message, 'foobar')
def testAssertDictEqualTruncates(self):
test = unittest2.TestCase('assertEqual')
def truncate(msg, diff):
return 'foo'
test._truncateMessage = truncate
try:
test.assertDictEqual({}, {1: 0})
except self.failureException as e:
self.assertEqual(str(e), 'foo')
else:
self.fail('assertDictEqual did not fail')
def testAssertMultiLineEqualTruncates(self):
test = unittest2.TestCase('assertEqual')
def truncate(msg, diff):
return 'foo'
test._truncateMessage = truncate
try:
test.assertMultiLineEqual('foo', 'bar')
except self.failureException as e:
self.assertEqual(str(e), 'foo')
else:
self.fail('assertMultiLineEqual did not fail')
def testAssertIsNone(self):
self.assertIsNone(None)
self.assertRaises(self.failureException, self.assertIsNone, False)
self.assertIsNotNone('DjZoPloGears on Rails')
self.assertRaises(self.failureException, self.assertIsNotNone, None)
def testAssertRegexpMatches(self):
self.assertRegexpMatches('asdfabasdf', r'ab+')
self.assertRaises(self.failureException, self.assertRegexpMatches,
'saaas', r'aaaa')
def testAssertRaisesRegexp(self):
class ExceptionMock(Exception):
pass
def Stub():
raise ExceptionMock('We expect')
self.assertRaisesRegexp(ExceptionMock, re.compile('expect$'), Stub)
self.assertRaisesRegexp(ExceptionMock, 'expect$', Stub)
self.assertRaisesRegexp(ExceptionMock, u'expect$', Stub)
def testAssertNotRaisesRegexp(self):
self.assertRaisesRegexp(
self.failureException, '^Exception not raised$',
self.assertRaisesRegexp, Exception, re.compile('x'),
lambda: None)
self.assertRaisesRegexp(
self.failureException, '^Exception not raised$',
self.assertRaisesRegexp, Exception, 'x',
lambda: None)
self.assertRaisesRegexp(
self.failureException, '^Exception not raised$',
self.assertRaisesRegexp, Exception, u'x',
lambda: None)
def testAssertRaisesRegexpMismatch(self):
def Stub():
raise Exception('Unexpected')
self.assertRaisesRegexp(
self.failureException,
r'"\^Expected\$" does not match "Unexpected"',
self.assertRaisesRegexp, Exception, '^Expected$',
Stub)
self.assertRaisesRegexp(
self.failureException,
r'"\^Expected\$" does not match "Unexpected"',
self.assertRaisesRegexp, Exception, u'^Expected$',
Stub)
self.assertRaisesRegexp(
self.failureException,
r'"\^Expected\$" does not match "Unexpected"',
self.assertRaisesRegexp, Exception,
re.compile('^Expected$'), Stub)
def testSynonymAssertMethodNames(self):
"""Test undocumented method name synonyms.
Please do not use these methods names in your own code.
This test confirms their continued existence and functionality
in order to avoid breaking existing code.
"""
self.assertNotEquals(3, 5)
self.assertEquals(3, 3)
self.assertAlmostEquals(2.0, 2.0)
self.assertNotAlmostEquals(3.0, 5.0)
self.assert_(True)
def testDeepcopy(self):
# Issue: 5660
class TestableTest(unittest2.TestCase):
def testNothing(self):
pass
test = TestableTest('testNothing')
# This shouldn't blow up
deepcopy(test)
if __name__ == "__main__":
unittest2.main()
| 33.635634 | 93 | 0.574087 |
acf7e6e693cf83c62167d078386447b55537cfb8 | 257 | py | Python | fairseq/examples/speech_recognition/data/__init__.py | skeshaw/LoReNMT | 32ffd83f38258dfffd324f811695a44ad33954f5 | [
"Apache-2.0"
] | null | null | null | fairseq/examples/speech_recognition/data/__init__.py | skeshaw/LoReNMT | 32ffd83f38258dfffd324f811695a44ad33954f5 | [
"Apache-2.0"
] | null | null | null | fairseq/examples/speech_recognition/data/__init__.py | skeshaw/LoReNMT | 32ffd83f38258dfffd324f811695a44ad33954f5 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .asr_dataset import AsrDataset
__all__ = [
'AsrDataset',
]
| 23.363636 | 66 | 0.708171 |
acf7e76982db80fc79869df0bc8b2e8bbb6bca66 | 11,953 | py | Python | tic-tac-toe.py | berrym/tic-tac-toe | 42a8f7a03affb9b7e0034a462bd7fc94ac99bba2 | [
"MIT"
] | null | null | null | tic-tac-toe.py | berrym/tic-tac-toe | 42a8f7a03affb9b7e0034a462bd7fc94ac99bba2 | [
"MIT"
] | null | null | null | tic-tac-toe.py | berrym/tic-tac-toe | 42a8f7a03affb9b7e0034a462bd7fc94ac99bba2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""tic-tac-toe.py
Tic-Tac-Toe like game.
Copyright (C) 2020 Michael Berry
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from copy import deepcopy
from math import inf as infinity
from random import choice
from time import sleep
from typing import Callable, List, Tuple, Union
class Player:
def __init__(self, symbol: str) -> None:
"""Create a player.
:param symbol: character used for when str() or print() are called
:param ai: True if player is AI, else false
"""
self.symbol = symbol
def __str__(self) -> str:
"""String representation of Player for print() and str() calls."""
return self.symbol
def get_move(self, game):
"""All player types must generate moves."""
pass
class HumanPlayer(Player):
def __init__(self, symbol) -> None:
"""Create a human player.
:param symbol: character that represents the player.
:return: None
"""
super().__init__(symbol)
self.ai = False
def get_move(self, game) -> Union[bool, Tuple[int, int]]:
"""Get a move from a human player.
:param game: game state.
:return: an (x, y) board coordinate, or False if."""
number = input(f"\n{self.symbol}'s turn, Enter a number: ")
if (move := game.translate_to_coord(number)):
return move
return False
class MiniMax_AI_Player(Player):
def __init__(self, symbol) -> None:
"""Initialize and AI player.
:param symbol: character that represents the player.
:return: None"""
super().__init__(symbol)
self.ai = True
def get_move(self, game) -> Tuple[int, int]:
"""Generate and AI move.
:param game: game state
:return: None"""
if len(game.empty_cells()) == 9:
move = choice(game.empty_cells())
else:
move = self.minimax(game, len(game.empty_cells()), self.symbol)
return move
def evaluate(self, state) -> int:
"""Return the score for the current board state.
:param state: current game state
:return: 1 if self has won, -1 if other player wins, 0 for a draw
"""
if self.wins(state, self.symbol):
score = 1
elif self.wins(state, "o" if self.symbol == "x" else "x"):
score = -1
else:
score = 0
return score
def wins(self, state, player) -> Union[None, List]:
"""Check if game state has a winning move for player.
:param state: current game state
:player: current player to check for a win
:return: None or a list of player value 3 times
"""
win_state = [
[state.board[0][0], state.board[0][1], state.board[0][2]],
[state.board[1][0], state.board[1][1], state.board[1][2]],
[state.board[2][0], state.board[2][1], state.board[2][2]],
[state.board[0][0], state.board[1][0], state.board[2][0]],
[state.board[0][1], state.board[1][1], state.board[2][1]],
[state.board[0][2], state.board[1][2], state.board[2][2]],
[state.board[0][0], state.board[1][1], state.board[2][2]],
[state.board[2][0], state.board[1][1], state.board[0][2]],
]
return [player, player, player] in win_state
def minimax(self, state, depth, player) -> List:
"""Use the minimax algorithm to determine AI move.
:param state: current game state
:param depth: maximum recursion depth
:param player: current player
:return: a list where list[0:1] is x,y board coordinates, and list[2] is a score
"""
if player == self.symbol:
best = [-1, -1, -infinity]
else:
best = [-1, -1, infinity]
if depth == 0 or self.wins(state, "x") or self.wins(state, "o"):
score = self.evaluate(state)
return [-1, -1, score]
for cell in state.empty_cells():
# Create a copy of the board
board_copy = deepcopy(state.board)
# Simulate possible moves
x, y = cell[0], cell[1]
state.board[x][y] = player
score = self.minimax(
state, depth - 1, "o" if player == "x" else "x")
# Undo simulation
state.board = board_copy
# Determine best score for player
score[0], score[1] = x, y
if player == self.symbol:
if score[2] > best[2]:
best = score # max value
else:
if score[2] < best[2]:
best = score # min value
return best
class PlayerSet:
def __init__(self, player_one, player_two) -> None:
"""Create two players, player One is x, player Two is o.
Player One starts the game.
:param:
"""
self.players = {"One": player_one, "Two": player_two}
self.active = self.players["One"]
def switch_player(self) -> None:
"""Switch active player."""
if self.active == self.players["One"]:
self.active = self.players["Two"]
else:
self.active = self.players["One"]
class TicTacToe:
def __init__(self) -> None:
"""Create the game board, represented as a 3x3 matrix."""
self.board = [["1", "2", "3"], ["4", "5", "6"], ["7", "8", "9"]]
self.players = game_config()
def __str__(self) -> str:
"""Display the pretty ascii board."""
return f"""
{self.board[0][0]} | {self.board[0][1]} | {self.board[0][2]}
----------
{self.board[1][0]} | {self.board[1][1]} | {self.board[1][2]}
----------
{self.board[2][0]} | {self.board[2][1]} | {self.board[2][2]}"""
def empty_cells(self) -> Union[None, List]:
"""A list of empty empty cells.
:return: None, or a list of empty cells
"""
cells = []
for x, row in enumerate(self.board):
for y, cell in enumerate(row):
if str(cell).isdigit():
cells.append([x, y])
return cells
def valid_move(self, x, y) -> bool:
"""Check if a move is valid.
:return: True if move is available, else False
"""
return [x, y] in self.empty_cells()
def make_move(self, player: Player, move: Tuple[int, int]) -> bool:
"""Mark a move on the board as played.
:param player: current player
:param move: valid matrix index
"""
x, y = move[0], move[1]
if self.valid_move(x, y):
self.board[x][y] = str(player)
return True
return False
def translate_to_coord(self, move: str) -> Union[bool, Tuple[int, int]]:
"""Check that a move is a valid play.
:param move: digit representing index of board matrix
:return: valid row,col tuple or boolean False
"""
# Convert move number to a valid coord on game board
coord_lookup = {
"1": (0, 0),
"2": (0, 1),
"3": (0, 2),
"4": (1, 0),
"5": (1, 1),
"6": (1, 2),
"7": (2, 0),
"8": (2, 1),
"9": (2, 2),
}
# Return index of coord to symbol
if move in coord_lookup.keys():
return coord_lookup[move]
return False
def has_winner(self) -> bool:
"""Check for end of game.
:return: True is game is over
"""
# Check for horizontal wins
for row in self.board:
if row[0] == row[1] == row[2]:
return True
# Check for vertical wins
for col in range(len(self.board[0])):
vertical = []
for row in self.board:
vertical.append(row[col])
if vertical[0] == vertical[1] == vertical[2]:
return True
# Check for diagonal wins
if self.board[0][0] == self.board[1][1] == self.board[2][2]:
return True
if self.board[0][2] == self.board[1][1] == self.board[2][0]:
return True
return False
def catch_keyboard_interrupt(func) -> Callable:
"""Catch keyboard interrupt and exit process.
:param func: function to wrap around
:return: wrapper function
"""
def wrapper(*args, **kwargs) -> Callable:
"""Wrapper around func to catch keyboard interrupt.
:param args: wrapped function arguments
:param kwargs: wrapped function keyword arguments
:return: wrapped function
"""
try:
return func(*args, **kwargs)
except KeyboardInterrupt:
print("\nProcess terminated.")
exit(0)
return wrapper
def game_config() -> PlayerSet:
"""Configure a PlayerSet, human vs human, human vs computer, etc.
:return: a configured PlayerSet
"""
print("Tic-tac-toe\n")
print("1) Human vs Human")
print("2) Human vs Computer")
print("3) Computer vs Human")
print("4) Computer vs Computer\n")
# Get game type
while game_type := input("Game type [1, 2, 3, 4]: "):
try:
game_type = int(game_type)
except ValueError:
print("\nPlease enter 1, 2, 3, or 4\n")
continue
# Setup player set
if game_type == 1:
players = PlayerSet(HumanPlayer("x"), HumanPlayer("o"))
break
elif game_type == 2:
players = PlayerSet(HumanPlayer("x"), MiniMax_AI_Player("o"))
break
elif game_type == 3:
players = PlayerSet(MiniMax_AI_Player("x"), HumanPlayer("o"))
break
elif game_type == 4:
players = PlayerSet(MiniMax_AI_Player("x"), MiniMax_AI_Player("o"))
break
else:
print("\nPlease enter 1, 2, 3, or 4\n")
return players
@catch_keyboard_interrupt
def main() -> None:
"""Main function.
:return: None
"""
game = TicTacToe() # create the game
print(game)
# Main loop
while True:
print(f"\n{game.players.active}'s turn")
# Get a move
if not (move := game.players.active.get_move(game)):
continue
# Delay if player is AI
if game.players.active.ai:
sleep(0.8)
# Update the game
if game.make_move(game.players.active, move):
print(game)
# Check for a winner
if game.has_winner():
print(f"\nGame over! {game.players.active} wins.")
exit(0)
# Check for a draw
if len(game.empty_cells()) == 0:
print("\nGame over. Draw.")
exit(0)
# Swap whose turn it is
game.players.switch_player()
# __main__? Program entry point
if __name__ == "__main__":
main()
| 32.045576 | 88 | 0.556262 |
acf7e8220a790cda668d22ad0400eea377a9d9ce | 11,192 | py | Python | aesrgan/models/aesrgan_model.py | aesrgan/A-ESRGAN | e1a71deb4a47e332cad6b3d6bbbbb21a56bdd9c6 | [
"BSD-3-Clause"
] | 58 | 2021-12-21T03:57:31.000Z | 2022-03-26T15:04:02.000Z | aesrgan/models/aesrgan_model.py | aesrgan/A-ESRGAN | e1a71deb4a47e332cad6b3d6bbbbb21a56bdd9c6 | [
"BSD-3-Clause"
] | 5 | 2021-12-24T07:11:50.000Z | 2022-02-10T01:20:27.000Z | aesrgan/models/aesrgan_model.py | aesrgan/A-ESRGAN | e1a71deb4a47e332cad6b3d6bbbbb21a56bdd9c6 | [
"BSD-3-Clause"
] | 4 | 2022-01-27T14:46:35.000Z | 2022-02-13T11:52:10.000Z | import numpy as np
import random
import torch
from basicsr.data.degradations import random_add_gaussian_noise_pt, random_add_poisson_noise_pt
from basicsr.data.transforms import paired_random_crop
from basicsr.models.srgan_model import SRGANModel
from basicsr.utils import DiffJPEG, USMSharp
from basicsr.utils.img_process_util import filter2D
from basicsr.utils.registry import MODEL_REGISTRY
from collections import OrderedDict
from torch.nn import functional as F
@MODEL_REGISTRY.register()
class AESRGANModel(SRGANModel):
"""RealESRGAN Model"""
def __init__(self, opt):
super(AESRGANModel, self).__init__(opt)
self.jpeger = DiffJPEG(differentiable=False).cuda()
self.usm_sharpener = USMSharp().cuda()
self.queue_size = opt.get('queue_size', 180)
@torch.no_grad()
def _dequeue_and_enqueue(self):
# training pair pool
# initialize
b, c, h, w = self.lq.size()
if not hasattr(self, 'queue_lr'):
assert self.queue_size % b == 0, 'queue size should be divisible by batch size'
self.queue_lr = torch.zeros(self.queue_size, c, h, w).cuda()
_, c, h, w = self.gt.size()
self.queue_gt = torch.zeros(self.queue_size, c, h, w).cuda()
self.queue_ptr = 0
if self.queue_ptr == self.queue_size: # full
# do dequeue and enqueue
# shuffle
idx = torch.randperm(self.queue_size)
self.queue_lr = self.queue_lr[idx]
self.queue_gt = self.queue_gt[idx]
# get
lq_dequeue = self.queue_lr[0:b, :, :, :].clone()
gt_dequeue = self.queue_gt[0:b, :, :, :].clone()
# update
self.queue_lr[0:b, :, :, :] = self.lq.clone()
self.queue_gt[0:b, :, :, :] = self.gt.clone()
self.lq = lq_dequeue
self.gt = gt_dequeue
else:
# only do enqueue
self.queue_lr[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.lq.clone()
self.queue_gt[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.gt.clone()
self.queue_ptr = self.queue_ptr + b
@torch.no_grad()
def feed_data(self, data):
if self.is_train and self.opt.get('high_order_degradation', True):
# training data synthesis
self.gt = data['gt'].to(self.device)
self.gt_usm = self.usm_sharpener(self.gt)
self.kernel1 = data['kernel1'].to(self.device)
self.kernel2 = data['kernel2'].to(self.device)
self.sinc_kernel = data['sinc_kernel'].to(self.device)
ori_h, ori_w = self.gt.size()[2:4]
# ----------------------- The first degradation process ----------------------- #
# blur
out = filter2D(self.gt_usm, self.kernel1)
# random resize
updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob'])[0]
if updown_type == 'up':
scale = np.random.uniform(1, self.opt['resize_range'][1])
elif updown_type == 'down':
scale = np.random.uniform(self.opt['resize_range'][0], 1)
else:
scale = 1
mode = random.choice(['area', 'bilinear', 'bicubic'])
out = F.interpolate(out, scale_factor=scale, mode=mode)
# noise
gray_noise_prob = self.opt['gray_noise_prob']
if np.random.uniform() < self.opt['gaussian_noise_prob']:
out = random_add_gaussian_noise_pt(
out, sigma_range=self.opt['noise_range'], clip=True, rounds=False, gray_prob=gray_noise_prob)
else:
out = random_add_poisson_noise_pt(
out,
scale_range=self.opt['poisson_scale_range'],
gray_prob=gray_noise_prob,
clip=True,
rounds=False)
# JPEG compression
jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range'])
out = torch.clamp(out, 0, 1)
out = self.jpeger(out, quality=jpeg_p)
# ----------------------- The second degradation process ----------------------- #
# blur
if np.random.uniform() < self.opt['second_blur_prob']:
out = filter2D(out, self.kernel2)
# random resize
updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob2'])[0]
if updown_type == 'up':
scale = np.random.uniform(1, self.opt['resize_range2'][1])
elif updown_type == 'down':
scale = np.random.uniform(self.opt['resize_range2'][0], 1)
else:
scale = 1
mode = random.choice(['area', 'bilinear', 'bicubic'])
out = F.interpolate(
out, size=(int(ori_h / self.opt['scale'] * scale), int(ori_w / self.opt['scale'] * scale)), mode=mode)
# noise
gray_noise_prob = self.opt['gray_noise_prob2']
if np.random.uniform() < self.opt['gaussian_noise_prob2']:
out = random_add_gaussian_noise_pt(
out, sigma_range=self.opt['noise_range2'], clip=True, rounds=False, gray_prob=gray_noise_prob)
else:
out = random_add_poisson_noise_pt(
out,
scale_range=self.opt['poisson_scale_range2'],
gray_prob=gray_noise_prob,
clip=True,
rounds=False)
# JPEG compression + the final sinc filter
# We also need to resize images to desired sizes. We group [resize back + sinc filter] together
# as one operation.
# We consider two orders:
# 1. [resize back + sinc filter] + JPEG compression
# 2. JPEG compression + [resize back + sinc filter]
# Empirically, we find other combinations (sinc + JPEG + Resize) will introduce twisted lines.
if np.random.uniform() < 0.5:
# resize back + the final sinc filter
mode = random.choice(['area', 'bilinear', 'bicubic'])
out = F.interpolate(out, size=(ori_h // self.opt['scale'], ori_w // self.opt['scale']), mode=mode)
out = filter2D(out, self.sinc_kernel)
# JPEG compression
jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2'])
out = torch.clamp(out, 0, 1)
out = self.jpeger(out, quality=jpeg_p)
else:
# JPEG compression
jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2'])
out = torch.clamp(out, 0, 1)
out = self.jpeger(out, quality=jpeg_p)
# resize back + the final sinc filter
mode = random.choice(['area', 'bilinear', 'bicubic'])
out = F.interpolate(out, size=(ori_h // self.opt['scale'], ori_w // self.opt['scale']), mode=mode)
out = filter2D(out, self.sinc_kernel)
# clamp and round
self.lq = torch.clamp((out * 255.0).round(), 0, 255) / 255.
# random crop
gt_size = self.opt['gt_size']
(self.gt, self.gt_usm), self.lq = paired_random_crop([self.gt, self.gt_usm], self.lq, gt_size,
self.opt['scale'])
# training pair pool
self._dequeue_and_enqueue()
# sharpen self.gt again, as we have changed the self.gt with self._dequeue_and_enqueue
self.gt_usm = self.usm_sharpener(self.gt)
else:
self.lq = data['lq'].to(self.device)
if 'gt' in data:
self.gt = data['gt'].to(self.device)
self.gt_usm = self.usm_sharpener(self.gt)
def nondist_validation(self, dataloader, current_iter, tb_logger, save_img):
# do not use the synthetic process during validation
self.is_train = False
super(AESRGANModel, self).nondist_validation(dataloader, current_iter, tb_logger, save_img)
self.is_train = True
def optimize_parameters(self, current_iter):
l1_gt = self.gt_usm
percep_gt = self.gt_usm
gan_gt = self.gt_usm
if self.opt['l1_gt_usm'] is False:
l1_gt = self.gt
if self.opt['percep_gt_usm'] is False:
percep_gt = self.gt
if self.opt['gan_gt_usm'] is False:
gan_gt = self.gt
# optimize net_g
for p in self.net_d.parameters():
p.requires_grad = False
self.optimizer_g.zero_grad()
self.output = self.net_g(self.lq)
l_g_total = 0
loss_dict = OrderedDict()
if (current_iter % self.net_d_iters == 0 and current_iter > self.net_d_init_iters):
# pixel loss
if self.cri_pix:
l_g_pix = self.cri_pix(self.output, l1_gt)
l_g_total += l_g_pix
loss_dict['l_g_pix'] = l_g_pix
# perceptual loss
if self.cri_perceptual:
l_g_percep, l_g_style = self.cri_perceptual(self.output, percep_gt)
if l_g_percep is not None:
l_g_total += l_g_percep
loss_dict['l_g_percep'] = l_g_percep
if l_g_style is not None:
l_g_total += l_g_style
loss_dict['l_g_style'] = l_g_style
# gan loss
fake_g_preds = self.net_d(self.output)
loss_dict['l_g_gan'] = 0
for fake_g_pred in fake_g_preds:
l_g_gan = self.cri_gan(fake_g_pred, True, is_disc=False)
l_g_total += l_g_gan
loss_dict['l_g_gan'] += l_g_gan
l_g_total.backward()
self.optimizer_g.step()
# optimize net_d
for p in self.net_d.parameters():
p.requires_grad = True
self.optimizer_d.zero_grad()
# real
real_d_preds = self.net_d(gan_gt)
loss_dict['l_d_real'] = 0
loss_dict['out_d_real'] = 0
l_d_real_tot = 0
for real_d_pred in real_d_preds:
l_d_real = self.cri_gan(real_d_pred, True, is_disc=True)
l_d_real_tot += l_d_real
loss_dict['l_d_real'] += l_d_real
loss_dict['out_d_real'] += torch.mean(real_d_pred.detach())
l_d_real_tot.backward()
# fake
loss_dict['l_d_fake'] = 0
loss_dict['out_d_fake'] = 0
l_d_fake_tot = 0
fake_d_preds = self.net_d(self.output.detach().clone()) # clone for pt1.9
for fake_d_pred in fake_d_preds:
l_d_fake = self.cri_gan(fake_d_pred, False, is_disc=True)
l_d_fake_tot += l_d_fake
loss_dict['l_d_fake'] += l_d_fake
loss_dict['out_d_fake'] += torch.mean(fake_d_pred.detach())
l_d_fake_tot.backward()
self.optimizer_d.step()
if self.ema_decay > 0:
self.model_ema(decay=self.ema_decay)
self.log_dict = self.reduce_loss_dict(loss_dict)
| 43.71875 | 118 | 0.557362 |
acf7e8cc5c3a28d2812e31f278a5444d1f6f531d | 1,314 | py | Python | var/spack/repos/builtin/packages/rasdaemon/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/rasdaemon/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8 | 2021-11-09T20:28:40.000Z | 2022-03-15T03:26:33.000Z | var/spack/repos/builtin/packages/rasdaemon/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2019-02-08T20:37:20.000Z | 2019-03-31T15:19:26.000Z | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class Rasdaemon(AutotoolsPackage):
"""Rasdaemon is a RAS (Reliability, Availability and Serviceability)
logging tool. It records memory errors, using the EDAC tracing events.
EDAC is a Linux kernel subsystem with handles detection of ECC errors
from memory controllers for most chipsets on i386 and x86_64
architectures. EDAC drivers for other architectures like arm also
exists."""
homepage = "https://github.com/mchehab/rasdaemon"
url = "https://github.com/mchehab/rasdaemon/archive/v0.6.6.tar.gz"
version('0.6.6', sha256='eea5fefc68583cca2e6daec58508a554553056aeec5eeee0989417c89607eaba')
version('0.6.5', sha256='1d85580778a0b7c0587b42e24dfe6c02f4c07c6ca9bbb80737d50b58ac830c92')
version('0.6.4', sha256='c70e2dae1e15af496873b9e5a4d89847759fffd6cbf5ed1d74d28cd250c0771b')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('m4', type='build')
def setup_run_environment(self, env):
env.prepend_path('PATH', self.prefix.sbin)
| 42.387097 | 95 | 0.746575 |
acf7e8eb65b2a39155de71085319df1cb84c2260 | 2,592 | py | Python | scripts/hand_dapg_random.py | mihdalal/d4rl | 1dec41a455f4905b1a7b4838dd24fb7f7d19df2a | [
"Apache-2.0"
] | null | null | null | scripts/hand_dapg_random.py | mihdalal/d4rl | 1dec41a455f4905b1a7b4838dd24fb7f7d19df2a | [
"Apache-2.0"
] | null | null | null | scripts/hand_dapg_random.py | mihdalal/d4rl | 1dec41a455f4905b1a7b4838dd24fb7f7d19df2a | [
"Apache-2.0"
] | null | null | null | import brenvs
import click
import h5py
import os
import gym
import numpy as np
import pickle
from mjrl.utils.gym_env import GymEnv
DESC = """
Helper script to visualize policy (in mjrl format).\n
USAGE:\n
Visualizes policy on the env\n
$ python utils/visualize_policy --env_name relocate-v0 --policy policies/relocate-v0.pickle --mode evaluation\n
"""
# MAIN =========================================================
@click.command(help=DESC)
@click.option("--env_name", type=str, help="environment to load", required=True)
@click.option("--num_trajs", type=int, help="Num trajectories", default=5000)
def main(env_name, num_trajs):
e = GymEnv(env_name)
# render policy
pol_playback(env_name, num_trajs)
def pol_playback(env_name, num_trajs=100):
e = GymEnv(env_name)
e.reset()
obs_ = []
act_ = []
rew_ = []
term_ = []
info_qpos_ = []
info_qvel_ = []
ravg = []
for n in range(num_trajs):
e.reset()
returns = 0
for t in range(e._horizon):
obs = e.get_obs()
obs_.append(obs)
info_qpos_.append(e.env.data.qpos.ravel().copy())
info_qvel_.append(e.env.data.qvel.ravel().copy())
action = e.action_space.sample()
act_.append(action)
_, rew, _, info = e.step(action)
returns += rew
rew_.append(rew)
done = False
if t == (e._horizon - 1):
done = True
term_.append(done)
# e.env.mj_render() # this is much faster
# e.render()
ravg.append(returns)
# write out hdf5 file
obs_ = np.array(obs_).astype(np.float32)
act_ = np.array(act_).astype(np.float32)
rew_ = np.array(rew_).astype(np.float32)
term_ = np.array(term_).astype(np.bool_)
info_qpos_ = np.array(info_qpos_).astype(np.float32)
info_qvel_ = np.array(info_qvel_).astype(np.float32)
dataset = h5py.File("%s_random.hdf5" % env_name, "w")
# dataset.create_dataset('observations', obs_.shape, dtype='f4')
dataset.create_dataset("observations", data=obs_, compression="gzip")
dataset.create_dataset("actions", data=act_, compression="gzip")
dataset.create_dataset("rewards", data=rew_, compression="gzip")
dataset.create_dataset("terminals", data=term_, compression="gzip")
dataset.create_dataset("infos/qpos", data=info_qpos_, compression="gzip")
dataset.create_dataset("infos/qvel", data=info_qvel_, compression="gzip")
if __name__ == "__main__":
main()
if __name__ == "__main__":
main()
| 29.454545 | 115 | 0.619599 |
acf7e9976b47785fdba0a023068b8fae1052f2c6 | 1,707 | py | Python | proglearn/tests/test_forest.py | PSSF23/progressive-learning | 894d25972636def6726e2c4219dc0dc99e1c5949 | [
"MIT"
] | 18 | 2020-10-13T00:43:06.000Z | 2022-03-28T09:03:52.000Z | proglearn/tests/test_forest.py | PSSF23/progressive-learning | 894d25972636def6726e2c4219dc0dc99e1c5949 | [
"MIT"
] | 234 | 2020-10-04T17:19:15.000Z | 2022-03-17T15:43:57.000Z | proglearn/tests/test_forest.py | PSSF23/progressive-learning | 894d25972636def6726e2c4219dc0dc99e1c5949 | [
"MIT"
] | 31 | 2020-10-05T07:43:50.000Z | 2022-03-03T21:45:57.000Z | import unittest
import pytest
import numpy as np
import random
from proglearn.forest import LifelongClassificationForest
from proglearn.transformers import TreeClassificationTransformer
from proglearn.voters import TreeClassificationVoter
from proglearn.deciders import SimpleArgmaxAverage
class TestLifelongClassificationForest:
def test_initialize(self):
l2f = LifelongClassificationForest()
assert True
def test_correct_default_transformer(self):
l2f = LifelongClassificationForest()
assert l2f.pl_.default_transformer_class == TreeClassificationTransformer
def test_correct_default_voter(self):
l2f = LifelongClassificationForest()
assert l2f.pl_.default_voter_class == TreeClassificationVoter
def test_correct_default_decider(self):
l2f = LifelongClassificationForest()
assert l2f.pl_.default_decider_class == SimpleArgmaxAverage
def test_correct_default_kwargs(self):
l2f = LifelongClassificationForest()
# transformer
assert l2f.pl_.default_transformer_kwargs == {}
# voter
assert len(l2f.pl_.default_voter_kwargs) == 1
assert "kappa" in list(l2f.pl_.default_voter_kwargs.keys())
assert l2f.pl_.default_voter_kwargs["kappa"] == np.inf
# decider
assert l2f.pl_.default_decider_kwargs == {}
def test_correct_default_n_estimators(self):
l2f = LifelongClassificationForest()
assert l2f.default_n_estimators == 100
def test_correct_true_initilization_finite_sample_correction(self):
l2f = LifelongClassificationForest(default_kappa=np.inf)
assert l2f.pl_.default_voter_kwargs == {"kappa": np.inf}
| 34.14 | 81 | 0.746339 |
acf7e9b417a509931f321b7071ac1bbb60c69024 | 1,340 | py | Python | Redis/redis_publisher.py | Dukecat0613/Big-Data | 1afd104ace0bf780beaf2a99ad0e575ccf8826f9 | [
"Apache-2.0"
] | 14 | 2017-02-15T01:10:30.000Z | 2021-02-07T13:11:56.000Z | Redis/redis_publisher.py | Dukecat0613/Big-Data | 1afd104ace0bf780beaf2a99ad0e575ccf8826f9 | [
"Apache-2.0"
] | null | null | null | Redis/redis_publisher.py | Dukecat0613/Big-Data | 1afd104ace0bf780beaf2a99ad0e575ccf8826f9 | [
"Apache-2.0"
] | 4 | 2017-03-24T09:15:42.000Z | 2021-06-02T08:09:45.000Z | # @Author: Hang Wu <Dukecat>
# @Date: 2017-02-11T22:04:34-05:00
# @Email: wuhang0613@gmail.com
# @Last modified by: Dukecat
# @Last modified time: 2017-02-11T22:12:41-05:00
# -read from kafka topic
# - publish to redis pub
from kafka import KafkaConsumer
import redis
import logging
import argparse
import atexit
logging.basicConfig()
logger=logging.getLogger()
logger.setLevel(logging.INFO)
if __name__ == '__main__':
# -set up command line
parser=argparse.ArgumentParser()
parser.add_argument("topic_name",help="the topic kafka consume from")
parser.add_argument("kafka_broker")
parser.add_argument("redis_channel",help='channel to publish to')
parser.add_argument("redis_host")
parser.add_argument("redis_port")
args=parser.parse_args()
topic_name=args.topic_name
kafka_broker=args.kafka_broker
redis_channel=args.redis_channel
redis_host=args.redis_host
redis_port=args.redis_port
# - kafka consumer
kafka_consumer=KafkaConsumer(topic_name,
bootstrap_servers=kafka_broker)
# - redis client
redis_client=redis.StrictRedis(host=redis_host,port=redis_port)
# - publish message to redis hub
for msg in kafka_consumer:
logger.info("receive new data from kafka %s" %msg.value)
redis_client.publish(redis_channel,msg.value)
| 26.27451 | 73 | 0.731343 |
acf7ead791c939e64fb2f496dfd386c7c601426b | 2,313 | py | Python | examples/adspygoogle/dfp/v201208/update_labels.py | krux/adspygoogle | 6505a71122f45fe3e675f27f2c29f67a1768069b | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | examples/adspygoogle/dfp/v201208/update_labels.py | krux/adspygoogle | 6505a71122f45fe3e675f27f2c29f67a1768069b | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | examples/adspygoogle/dfp/v201208/update_labels.py | krux/adspygoogle | 6505a71122f45fe3e675f27f2c29f67a1768069b | [
"Apache-2.0",
"BSD-3-Clause"
] | 2 | 2020-04-02T19:00:31.000Z | 2020-08-06T03:28:38.000Z | #!/usr/bin/python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example updates the description of all active labels, up to the
first 500. To determine which labels exist, run get_all_labels.py.
This feature is only available to DFP premium solution networks."""
__author__ = 'api.shamjeff@gmail.com (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service.
label_service = client.GetService('LabelService', version='v201208')
# Create a statement to select only active labels.
values = [
{
'key': 'isActive',
'value': {
'xsi_type': 'BooleanValue',
'value': 'true'
}
}
]
filter_statement = {
'query': 'WHERE isActive = :isActive LIMIT 500',
'values': values}
# Get labels by filter.
response = label_service.GetLabelsByStatement(filter_statement)[0]
labels = []
if 'results' in response:
labels = response['results']
if labels:
# Update each local label object by changing the description.
for label in labels:
label['description'] = 'These labels are updated.'
# Update labels remotely.
labels = label_service.UpdateLabels(labels)
# Display results.
if labels:
for label in labels:
print ('Label with id \'%s\' and name \'%s\' was updated.'
% (label['id'], label['name']))
else:
print 'No labels were updated.'
else:
print 'No labels found to update.'
| 30.038961 | 80 | 0.693472 |
acf7ebeedc47d97a10a9fe3d1fee3df0335b9aad | 1,223 | py | Python | examples/pybullet/examples/biped2d_pybullet.py | aravindsiv/bullet3 | be5397576db9774d603aa6241958276d30fd162f | [
"Zlib"
] | 3 | 2021-11-12T13:18:46.000Z | 2022-03-31T08:20:30.000Z | examples/pybullet/examples/biped2d_pybullet.py | aravindsiv/bullet3 | be5397576db9774d603aa6241958276d30fd162f | [
"Zlib"
] | 11 | 2021-03-19T18:11:28.000Z | 2021-05-13T18:53:40.000Z | examples/pybullet/examples/biped2d_pybullet.py | aravindsiv/bullet3 | be5397576db9774d603aa6241958276d30fd162f | [
"Zlib"
] | 2 | 2018-07-18T05:35:59.000Z | 2022-03-06T07:04:46.000Z | import pybullet as p
import pybullet_data
import os
import time
GRAVITY = -9.8
dt = 1e-3
iters = 2000
import pybullet_data
p.setAdditionalSearchPath(pybullet_data.getDataPath())
physicsClient = p.connect(p.GUI)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
p.resetSimulation()
#p.setRealTimeSimulation(True)
p.setGravity(0, 0, GRAVITY)
p.setTimeStep(dt)
planeId = p.loadURDF("plane.urdf")
cubeStartPos = [0, 0, 1.13]
cubeStartOrientation = p.getQuaternionFromEuler([0., 0, 0])
botId = p.loadURDF("biped/biped2d_pybullet.urdf", cubeStartPos, cubeStartOrientation)
#disable the default velocity motors
#and set some position control with small force to emulate joint friction/return to a rest pose
jointFrictionForce = 1
for joint in range(p.getNumJoints(botId)):
p.setJointMotorControl2(botId, joint, p.POSITION_CONTROL, force=jointFrictionForce)
#for i in range(10000):
# p.setJointMotorControl2(botId, 1, p.TORQUE_CONTROL, force=1098.0)
# p.stepSimulation()
#import ipdb
#ipdb.set_trace()
import time
p.setRealTimeSimulation(1)
while (1):
#p.stepSimulation()
#p.setJointMotorControl2(botId, 1, p.TORQUE_CONTROL, force=1098.0)
p.setGravity(0, 0, GRAVITY)
time.sleep(1 / 240.)
time.sleep(1000)
| 29.829268 | 95 | 0.77269 |
acf7ed354bbeb70594312201356bff32a57173b4 | 26 | py | Python | makahiki/apps/widgets/status/rsvps/__init__.py | justinslee/Wai-Not-Makahiki | 4b7dd685012ec64758affe0ecee3103596d16aa7 | [
"MIT"
] | 1 | 2015-07-22T11:31:20.000Z | 2015-07-22T11:31:20.000Z | makahiki/apps/widgets/status/users/__init__.py | justinslee/Wai-Not-Makahiki | 4b7dd685012ec64758affe0ecee3103596d16aa7 | [
"MIT"
] | null | null | null | makahiki/apps/widgets/status/users/__init__.py | justinslee/Wai-Not-Makahiki | 4b7dd685012ec64758affe0ecee3103596d16aa7 | [
"MIT"
] | null | null | null | """
analytics package
"""
| 6.5 | 17 | 0.615385 |
acf7efba73e0dfcb8133b9faa1254f010eae6ff1 | 2,748 | py | Python | aliyun-python-sdk-unimkt/aliyunsdkunimkt/request/v20181212/CreateFlowRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-unimkt/aliyunsdkunimkt/request/v20181212/CreateFlowRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-unimkt/aliyunsdkunimkt/request/v20181212/CreateFlowRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkunimkt.endpoint import endpoint_data
class CreateFlowRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'UniMkt', '2018-12-12', 'CreateFlow','1.0.0')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Business(self):
return self.get_query_params().get('Business')
def set_Business(self,Business):
self.add_query_param('Business',Business)
def get_ClientToken(self):
return self.get_query_params().get('ClientToken')
def set_ClientToken(self,ClientToken):
self.add_query_param('ClientToken',ClientToken)
def get_UserId(self):
return self.get_query_params().get('UserId')
def set_UserId(self,UserId):
self.add_query_param('UserId',UserId)
def get_OriginSiteUserId(self):
return self.get_query_params().get('OriginSiteUserId')
def set_OriginSiteUserId(self,OriginSiteUserId):
self.add_query_param('OriginSiteUserId',OriginSiteUserId)
def get_Environment(self):
return self.get_query_params().get('Environment')
def set_Environment(self,Environment):
self.add_query_param('Environment',Environment)
def get_AppName(self):
return self.get_query_params().get('AppName')
def set_AppName(self,AppName):
self.add_query_param('AppName',AppName)
def get_TenantId(self):
return self.get_query_params().get('TenantId')
def set_TenantId(self,TenantId):
self.add_query_param('TenantId',TenantId)
def get_UserSite(self):
return self.get_query_params().get('UserSite')
def set_UserSite(self,UserSite):
self.add_query_param('UserSite',UserSite)
def get_Flow(self):
return self.get_body_params().get('Flow')
def set_Flow(self,Flow):
self.add_body_params('Flow', Flow) | 31.953488 | 74 | 0.755095 |
acf7efc2ddaebe636c69c64de17b1fb98879bfb7 | 900 | py | Python | 00_Code/01_LeetCode/201_BitwiseANDofNumbersRange.py | KartikKannapur/Data_Structures_and_Algorithms_Python | 66e3c8112826aeffb78bd74d02be1a8d1e478de8 | [
"MIT"
] | 1 | 2017-06-11T04:57:07.000Z | 2017-06-11T04:57:07.000Z | 00_Code/01_LeetCode/201_BitwiseANDofNumbersRange.py | KartikKannapur/Data_Structures_and_Algorithms_Python | 66e3c8112826aeffb78bd74d02be1a8d1e478de8 | [
"MIT"
] | null | null | null | 00_Code/01_LeetCode/201_BitwiseANDofNumbersRange.py | KartikKannapur/Data_Structures_and_Algorithms_Python | 66e3c8112826aeffb78bd74d02be1a8d1e478de8 | [
"MIT"
] | null | null | null | """
Given a range [m, n] where 0 <= m <= n <= 2147483647, return the bitwise AND of all numbers in this range, inclusive.
For example, given the range [5, 7], you should return 4.
"""
class Solution(object):
def rangeBitwiseAnd(self, m, n):
"""
:type m: int
:type n: int
:rtype: int
"""
"""
Method 1: Brute Force
The algorithm works. But, produces a Time Limit Exceeded Error
for large inputs
"""
res = m
ele = m+1
while ele < n+1:
res = res&ele
ele += 1
if res == 0:
return 0
return res
"""
Method 2: Bit Manipulation
Your runtime beats 62.55 % of python submissions
"""
ele = 0
while m != n:
m >>= 1
n >>= 1
ele += 1
return n << ele | 22.5 | 117 | 0.462222 |
acf7f00bbf6da2eb9e3b94403560c549952be094 | 1,607 | py | Python | setup.py | IBM/redstone | 61cf72e60f631f2eb435b259838737c1f9faae95 | [
"Apache-2.0"
] | 2 | 2021-02-25T19:33:50.000Z | 2022-02-14T23:53:10.000Z | setup.py | IBM/redstone | 61cf72e60f631f2eb435b259838737c1f9faae95 | [
"Apache-2.0"
] | 10 | 2020-10-22T21:13:08.000Z | 2021-05-24T15:28:15.000Z | setup.py | IBM/redstone | 61cf72e60f631f2eb435b259838737c1f9faae95 | [
"Apache-2.0"
] | 2 | 2020-10-21T16:00:51.000Z | 2021-05-24T17:14:37.000Z | # Copyright 2021 Mathew Odden
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
with open("README.md", "r", encoding="utf8") as fh:
long_desc = fh.read()
setup(
name="redstone",
version="0.5.1",
author="Mathew Odden",
author_email="mathewrodden@gmail.com",
url="https://github.com/IBM/redstone",
description="A Pythonic IBM Cloud SDK",
long_description=long_desc,
long_description_content_type="text/markdown",
packages=find_packages(),
install_requires=["requests[security]", "cryptography"],
extras_require={
"docs": ["sphinx>=3.1", "sphinx_rtd_theme"],
},
entry_points={
"console_scripts": [
"rs-crypto = redstone.crypto.__main__:main",
"rs-keyprotect = redstone.keyprotect.cli:main",
]
},
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: Apache Software License",
],
python_requires=">=3.6",
)
| 33.479167 | 74 | 0.671437 |
acf7f05c80811186a37b6d697e976f1c619bdf1b | 464 | py | Python | prina/utils.py | Ailln/prina | 2f3ba01dd9e5ec8601b2619d8b4ce2d1c7381c96 | [
"MIT"
] | 1 | 2019-12-13T15:06:08.000Z | 2019-12-13T15:06:08.000Z | prina/utils.py | Ailln/prina | 2f3ba01dd9e5ec8601b2619d8b4ce2d1c7381c96 | [
"MIT"
] | null | null | null | prina/utils.py | Ailln/prina | 2f3ba01dd9e5ec8601b2619d8b4ce2d1c7381c96 | [
"MIT"
] | null | null | null | import inspect
def prina(*args):
result_list = []
callers_local_vars = inspect.currentframe().f_back.f_locals.items()
for arg in args:
flag = True
for key, value in callers_local_vars:
if value is arg:
result_list.append(key + ": " + str(value))
flag = False
if flag:
result_list.append(str(arg))
result = " ".join(result_list)
print(result)
return result
| 23.2 | 71 | 0.571121 |
acf7f0bba4aa85def11dc5094e8caf74dd1171ed | 1,014 | py | Python | zolegame/constansts.py | marisabele/Zole | 19ab7b417ad54d1072b010d62a09b7ff9d7c1fd0 | [
"Apache-2.0"
] | 2 | 2017-10-31T21:45:16.000Z | 2018-12-09T15:51:48.000Z | zolegame/constansts.py | marisabele/Zole | 19ab7b417ad54d1072b010d62a09b7ff9d7c1fd0 | [
"Apache-2.0"
] | null | null | null | zolegame/constansts.py | marisabele/Zole | 19ab7b417ad54d1072b010d62a09b7ff9d7c1fd0 | [
"Apache-2.0"
] | null | null | null |
class Cards:
#trumps ["cQ","sQ","hQ","dQ","cJ","sJ","hJ","dJ","dA","d10","dK","d9","d8","d7"]
TRUMPS = tuple(xrange(14))
#clubs ["cA","c10","cK","c9"]
CLUBS = tuple(xrange(14,18))
#spades ["sA","s10","sK","s9"]
SPADES = tuple(xrange(18,22))
#hearts ["hA","h10","hK","h9"]
HEARTS = tuple(xrange(22,26))
ALL = list(xrange(26))
CLUBS_FIRST = TRUMPS + CLUBS + SPADES + HEARTS
SPADES_FIRST = TRUMPS + SPADES + CLUBS + HEARTS
HEARTS_FIRST = TRUMPS + HEARTS + CLUBS + SPADES
#card points
POINTS = [3, 3, 3, 3, 2, 2, 2, 2, 11, 10, 4, 0, 0, 0,
11, 10, 4, 0,
11, 10, 4, 0,
11, 10, 4, 0]
class Contract:
TABLE = 't' #player pick table cards and play one against two
BIG = 'b' #player play against two without table cards
SMALL = 's' #small cannot winn any trick
PARTNER = 'p' # player play with partner
DESK = 'd' # desk game winner is a player with less trick or less points
| 32.709677 | 84 | 0.542406 |
acf7f1a8e10213fa60edbdea3818706aa0d0543a | 1,023 | py | Python | parser.py | justinhaef/audit_endpoints | 74a77c26c3d7f2a12773961a39220139e3e36537 | [
"MIT"
] | null | null | null | parser.py | justinhaef/audit_endpoints | 74a77c26c3d7f2a12773961a39220139e3e36537 | [
"MIT"
] | null | null | null | parser.py | justinhaef/audit_endpoints | 74a77c26c3d7f2a12773961a39220139e3e36537 | [
"MIT"
] | null | null | null | from dataclasses import dataclass
from deepdiff import DeepDiff
import logging
log = logging.getLogger(__name__)
@dataclass
class Parser:
""" Class for Parsing different JSON files """
standard: object
def _endpoint_compare(self, config):
""" Take endpoint json and compare to standard json.
Return difference between the two.
"""
ddiff = DeepDiff(config, self.standard, ignore_order=True)
return ddiff
def compare(self, config: dict, filename: str) -> dict:
diff = self._endpoint_compare(config)
if not config['items']['SystemUnit.Name']['value']:
name = "Unknown"
else:
name = config['items']['SystemUnit.Name']['value']
try:
data = {
'filename': filename,
'name': name,
'difference': diff['values_changed']
}
except KeyError as e:
log.error(f"KeyError of {e} making data object: {name}")
return data
| 30.088235 | 68 | 0.58651 |
acf7f23fac01d0f8a1b7b14a6e4b93c52aaabba0 | 91 | py | Python | venv/backend/lib/python3.7/os.py | bntmai/ip-socket | 6de5f7e75c15c3646bdf698143d5f0baca39b82b | [
"MIT"
] | null | null | null | venv/backend/lib/python3.7/os.py | bntmai/ip-socket | 6de5f7e75c15c3646bdf698143d5f0baca39b82b | [
"MIT"
] | 13 | 2019-07-03T21:28:31.000Z | 2022-02-26T10:42:05.000Z | venv/backend/lib/python3.7/os.py | bntmai/ip-socket | 6de5f7e75c15c3646bdf698143d5f0baca39b82b | [
"MIT"
] | 1 | 2019-12-05T03:07:58.000Z | 2019-12-05T03:07:58.000Z | /usr/local/Cellar/python/3.7.1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/os.py | 91 | 91 | 0.802198 |
acf7f37c312fc52fd27289d7df3d2874ce427201 | 16,904 | py | Python | ResultsUI2/ResultsUI2/application.py | Sharma-Akshaya/COVID19-Risk-Assessment | 55025200c97314291c46238731b8747568edcce6 | [
"Apache-2.0"
] | 1 | 2020-08-10T22:42:28.000Z | 2020-08-10T22:42:28.000Z | ResultsUI2/ResultsUI2/application.py | Sharma-Akshaya/COVID19-Risk-Assessment | 55025200c97314291c46238731b8747568edcce6 | [
"Apache-2.0"
] | null | null | null | ResultsUI2/ResultsUI2/application.py | Sharma-Akshaya/COVID19-Risk-Assessment | 55025200c97314291c46238731b8747568edcce6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
#from cloudant import Cloudant
from flask import Flask, render_template, request, jsonify
import atexit
import os
import json
import csv
from werkzeug.utils import secure_filename
import dateutil.parser, time
import math
# from flask_mysqldb import MySQL
import logging
import ibm_db
application = Flask(__name__)
db_name = 'mydb'
client = None
db = None
db2conn = ibm_db.connect("DATABASE=BLUDB;HOSTNAME=dashdb-txn-sbox-yp-dal09-11.services.dal.bluemix.net;PORT=50000;PROTOCOL=TCPIP;UID=kpg48221;PWD=s8lg350w5wv+jbdh;","kpg48221","s8lg350w5wv+jbdh")
@application.route('/')
def root():
return render_template('login.html')
# return application.send_static_file('login.html')
@application.route('/register')
def register():
return render_template('Register.html')
# def getRiskPatients():
# stmt="SELECT time,mag,place,locationSource FROM quakeData
# WHERE mag>= %s AND mag<= %s AND date(time)>= %s AND date(time)<= %s"
# cur.execute(stmt,(mag1,mag2,startDate,endDate,))
# @application.route('/dashboard', methods=['GET', 'POST'])
# def dashboard():
# # x = [1,2,3,4,5,6,7,8,9,10]
# x=[1,2,3]
# y=["High","Medium","Low"]
# return render_template('basic1.html',x1=x,y1=y)
@application.route('/dashboard', methods=['GET', 'POST'])
def dashboard():
x = [1,2,3,4,5,6,7,8,9,10]
sql = "SELECT FIRSTNAME,LASTNAME FROM PATIENTS WHERE RISK_PERCENTAGE>=?"
stmt1 = ibm_db.prepare(db2conn, sql )
ibm_db.bind_param(stmt1, 1, 65)
ibm_db.execute(stmt1)
rows1=[]
result = ibm_db.fetch_assoc(stmt1)
print(type(result))
while result != False:
rows1.append(result.copy())
result = ibm_db.fetch_assoc(stmt1)
sql = "SELECT FIRSTNAME,LASTNAME FROM PATIENTS WHERE RISK_PERCENTAGE>? AND RISK_PERCENTAGE<?"
stmt2 = ibm_db.prepare(db2conn, sql )
ibm_db.bind_param(stmt2, 1, 35)
ibm_db.bind_param(stmt2, 2, 65)
ibm_db.execute(stmt2)
rows2=[]
result = ibm_db.fetch_assoc(stmt2)
print(type(result))
while result != False:
rows2.append(result.copy())
result = ibm_db.fetch_assoc(stmt2)
sql = "SELECT FIRSTNAME,LASTNAME FROM PATIENTS WHERE RISK_PERCENTAGE<=?"
stmt3 = ibm_db.prepare(db2conn, sql )
ibm_db.bind_param(stmt3, 1, 35)
ibm_db.execute(stmt3)
rows3=[]
result = ibm_db.fetch_assoc(stmt3)
print(type(result))
while result != False:
rows3.append(result.copy())
result = ibm_db.fetch_assoc(stmt3)
x=[1,2,3]
# x=[len(rows1),len(rows2),len(rows3)]
print("nos",x)
y=["High","Medium","Low"]
# return render_template('basic1.html',x1=x,y1=y)
# print(rows)
# print(type(rows))
# print(rows[0]["FIRSTNAME"])
return render_template('dashboard.html',value3=rows3, value2=rows2, value1=rows1 , x1=x,y1=y)
@application.route('/login')
def login():
return render_template('login.html')
@application.route('/patient')
def patient():
x = [[1,2,3,4,5,6,7,8]]
return render_template('PatientDetails.html',value=x)
# #Database Connection
# def connection():
# # db2conn = ibm_db.connect("DATABASE=BLUDB;HOSTNAME=dashdb-txn-sbox-yp-dal09-11.services.dal.bluemix.net;PORT=50000;PROTOCOL=TCPIP;UID=kpg48221;PWD=s8lg350w5wv+jbdh;","kpg48221","s8lg350w5wv+jbdh")
# try:
# db2conn = ibm_db.connect("DATABASE=BLUDB;HOSTNAME=dashdb-txn-sbox-yp-dal09-11.services.dal.bluemix.net;PORT=50000;PROTOCOL=TCPIP;UID=kpg48221;PWD=s8lg350w5wv+jbdh;","kpg48221","s8lg350w5wv+jbdh")
# except:
# print("no connection:", ibm_db.conn_errormsg())
# else:
# print("The connection was successful")
# # conn_str="DATABASE=BLUDB;HOSTNAME=dashdb-txn-sbox-yp-dal09-11.services.dal.bluemix.net;PORT=50000;PROTOCOL=TCPIP;UID=kpg48221;PWD=s8lg350w5wv+jbdh;"
# # db2conn = ibm_db.connect(conn_str,",")
# sql = "SELECT * FROM PATIENTS"
# stmt = ibm_db.exec_immediate(db2conn, sql)
# #ibm_db.execute(sql)
# rows=[]
# # fetch the result
# result = ibm_db.fetch_assoc(stmt)
# while result != False:
# rows.append(result.copy())
# result = ibm_db.fetch_assoc(stmt)
# print(result)
# # close database connection
# ibm_db.close(db2conn)
# jsonData={"userid":"1ID2012","firstname":"Odele","lastname":"Syrett","dob":"1954-12-13","gender":"female","bloodtype":"B-","lung_issues":False,"hypertension":False,"heartdisease":False,"hiv":False,"diabetes":True,"cancer":False,"age":23,"fever":4,"drycough":6,"fatigue":0,"trouble_breathing":0,"muscle_pain":0,"sore_throat":0,"heart_rate":[{"heartrate": 0, "timestamp": "2019-08-24 22:05:09"},{"heartrate": 0, "timestamp": "2019-08-24 22:05:09"},{"heartrate": 0, "timestamp": "2019-08-24 22:05:09"},{"heartrate": 0, "timestamp": "2019-08-24 22:05:09"}],"headache":0,"runnynose":0,"diarrhea":0,"zipcode":76204,"latitude":-6.7594317,"longitude":111.4484559,"risk_percentage":4.9851563425}
jsonData = 'sample.json'
json_data=open(jsonData).read()
json_obj = json.loads(json_data)
#connecting to db
# ibmDB= ibm_db.connect("DATABASE=BLUDB;HOSTNAME=dashdb-txn-sbox-yp-dal09-11.services.dal.bluemix.net;PORT=50001;PROTOCOL=TCPIP;UID=kpg48221;PWD=s8lg350w5wv+jbdh;Security=SSL;","ssljdbcurl","")
#curser= ibmDB.cursor()
conn = ibm_db_dbi.Connection(db2conn)
def risk_calcuation(user_details):
"""
values: json input from app
"""
pre_existing = ["lung_issues","hypertension","heartdisease","hiv","diabetes","cancer"]
weights = {"age":0.02,"fever":0.2,"drycough":0.19,"fatigue":0.16,"trouble_breathing":0.15,"muscle_pain":0.0225,"sore_throat":0.015,"heart_rate":0.0125,"headache":0.01,"runnynose":0.01,"diarrhea":0.005,"gender":0.005}
constant_values = {"male":5.5,"female":4.5,"lung_issues":2.2,"hypertension":2,"heartdisease":1.8,"hiv":1.6,"diabetes":1.4,"cancer":1}
symptoms_list = ["gender","lung_issues","hypertension","heartdisease","hiv","diabetes","cancer","age","fever","drycough","fatigue","trouble_breathing","muscle_pain","sore_throat","heart_rate","headache","runnynose","diarrhea"]
#updating raw_score to pre_exisiting conditions
for condition in pre_existing:
if user_details[condition]:
if condition == "lung_issues":
user_details[condition] = 5
elif condition == "hypertension":
user_details[condition] = 5.5
elif condition == "heartdisease":
user_details[condition] = 5
elif condition == "hiv":
user_details[condition] = 4.5
elif condition == "diabetes":
user_details[condition] = 4
elif condition == "cancer":
user_details[condition] = 3.5
else:
user_details[condition] = 0
#updating raw_score to gender
sex = user_details["gender"].lower()
if sex == "male":
user_details["gender"] = 5.5
elif sex == "female":
user_details["gender"] = 4.5
#updating raw_score to age
age = user_details["age"]
if age >= 75:
user_details["age"] = 6
elif(age>=65 and age<=74):
user_details["age"] = 5.5
elif(age>=45 and age<=64):
user_details["age"] = 5
elif(age>=18 and age<=44):
user_details["age"] = 4
elif(age>=1 and age<=17):
user_details["age"] = 3
#updating heart_rate value
rate_sum = 0
rate_count = 0
for ht_rate in user_details["heart_rate"]:
rate_sum += ht_rate['heartrate']
rate_count += 1
avg_rate = rate_sum/rate_count
if(avg_rate >= 130 or avg_rate <= 50):
user_details["heart_rate"] = 3
elif((avg_rate >= 50 or avg_rate <= 60) or
(avg_rate >= 120 or avg_rate <= 130)):
user_details["heart_rate"] = 2
elif((avg_rate >= 60 or avg_rate <= 65) or
(avg_rate >= 110 or avg_rate <= 120)):
user_details["heart_rate"] = 1
elif(avg_rate >= 65 or avg_rate <= 110):
user_details["heart_rate"] = 0
risk_value = 0
weight = 0
for symptom in symptoms_list:
if symptom in weights.keys():
weight = weights[symptom]
elif symptom in pre_existing:
#pre-existing weights
weight = 0.2
risk_value += (user_details[symptom]*weight)
return (risk_value*10)
def load_data(user_details):
for k2 in jsonData["heart_rate"]:
for k,v in jsonData["heart_rate"]:
# sql = "INSERT INTO Heartrate VALUES (jsonData['userid'],k2['heartrate'],k2['timestamp'])"
sql = "INSERT INTO Heartrate VALUES (?,?,?)"
stmt = ibm_db.prepare(db2conn, sql )
# print(type(jsonData['userid']))
ibm_db.bind_param(sql, 1, str(jsonData['userid']))
ibm_db.bind_param(sql, 2, str(k2['heartrate']))
ibm_db.bind_param(sql, 3, str(k2['timestamp']))
sibm_db.execute(stmt)
# stmt =ibm_db.exec_immediate(ibmDB, sql)
risk_percentageVal= risk_calcuation(jsonData)
sql = "INSERT INTO PATIENTS (userid,firstname,lastname,dob,gender,lung_issues,hypertension,heartdisease,hiv,diabetes,cancer,age,fever,drycough,fatigue,trouble_breathing,muscle_pain,sore_throat,heart_rate,headache,runnynose,diarrhea,zipcode,latitude,longitude,risk_percentage,bloodtype) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)"
stmt = ibm_db.prepare(ibmDB, sql )
# print(type(jsonData['userid']))
ibm_db.bind_param(sql, 1, str(jsonData['userid']))
ibm_db.bind_param(sql, 2, str(jsonData['firstname']))
ibm_db.bind_param(sql, 3, str(jsonData['lastname']))
ibm_db.bind_param(sql, 4, str(jsonData['dob']))
ibm_db.bind_param(sql, 5, str(jsonData['gender']))
ibm_db.bind_param(sql, 6, str(jsonData['lung_issues']))
ibm_db.bind_param(sql, 7, str(jsonData['hypertension']))
ibm_db.bind_param(sql, 8, str(jsonData['heartdisease']))
ibm_db.bind_param(sql, 9, str(jsonData['hiv']))
ibm_db.bind_param(sql, 10, (jsonData['diabetes']))
ibm_db.bind_param(sql, 11, str(jsonData['cancer']))
ibm_db.bind_param(sql, 12, str(jsonData['age']))
ibm_db.bind_param(sql, 13, str(jsonData['fever']))
ibm_db.bind_param(sql, 14, str(jsonData['drycough']))
ibm_db.bind_param(sql, 15, str(jsonData['fatigue']))
ibm_db.bind_param(sql, 16, str(jsonData['trouble_breathing']))
ibm_db.bind_param(sql, 17, str(jsonData['muscle_pain']))
ibm_db.bind_param(sql, 18, str(jsonData['sore_throat']))
ibm_db.bind_param(sql, 19, null,str(jsonData['headache']))
ibm_db.bind_param(sql, 20, str(jsonData['runnynose']))
ibm_db.bind_param(sql, 21, str(jsonData['diarrhea']))
ibm_db.bind_param(sql, 22, str(jsonData['zipcode']))
ibm_db.bind_param(sql, 23, str(jsonData['latitude']))
ibm_db.bind_param(sql, 24, str(jsonData['longitude']))
ibm_db.bind_param(sql, 25, risk_percentageVal)
ibm_db.bind_param(sql, 26, str(jsonData['bloodtype']))
sibm_db.execute(stmt)
# stmt =ibm_db.exec_immediate(ibmDB, sql)
@atexit.register
def shutdown():
if client:
client.disconnect()
if __name__ == '__main__':
application.run(host='127.0.0.1', port=8000, debug=True)
# @application.route('/uploadData')
# def uploadData():
# return render_template("uploadData.html")
# #THIS LOADS FILE (LOCALLY) INTO DATABASE
# def _get_col_datatypes(fin):
# dr = csv.DictReader(fin) # comma is default delimiter
# fieldTypes = {}
# for entry in dr:
# feildslLeft = [f for f in dr.fieldnames if f not in fieldTypes.keys()]
# if not feildslLeft: break # We're done
# for field in feildslLeft:
# data = entry[field]
# # Need data to decide
# if len(data) == 0:
# continue
# if data.isdigit():
# fieldTypes[field] = "INTEGER"
# else:
# fieldTypes[field] = "TEXT"
# # Currently there's no support for DATE in sqllite
# if len(feildslLeft) > 0:
# raise Exception("Failed to find all the columns data types - Maybe some are empty?")
# return fieldTypes
# def escapingGenerator(f):
# for line in f:
# yield line.encode("ascii", "xmlcharrefreplace").decode("ascii")
# def csvToDb(csvFile, outputToFile = False):
# # implement output to file
# with open(csvFile,mode='r', encoding="ISO-8859-1") as fin:
# dt = _get_col_datatypes(fin)
# fin.seek(0)
# reader = csv.DictReader(fin)
# # To keep the order of the columns name just as in the CSV
# fields = reader.fieldnames
# cols = []
# # Setting field and type
# for f in fields:
# cols.append("%s %s" % (f, dt[f]))
# cur = mysql.connection.cursor()
# #create table statement:
# stmt = "CREATE TABLE if not exists quakeData (%s)" % ",".join(cols)
# cur.execute(stmt)
# fin.seek(0)
# reader = csv.DictReader(fin,fieldnames = ("time","latitude","longitude","depth","locationSource","magSource"))
# #cur = con.cursor()
# cur = mysql.connection.cursor()
# next(reader,None)
# for row in reader:
# time = dateutil.parser.parse(row['time'])
# #time= get_timezone_date(row['longitude'],row['latitude'], mytime.strftime("%Y-%m-%d %H:%M:%S.%f"))
# latitude = row['latitude'] if row['latitude'] else float (0)
# longitude = row['longitude'] if row['longitude'] else float (0)
# depth= row['depth'] if row['depth'] else float (0)
# locationSource=row['locationSource'] if row['locationSource'] else ''
# magSource=row['magSource'] if row['magSource'] else ''
# cur = mysql.connection.cursor()
# cur.execute('INSERT INTO quakeData (time,latitude,longitude,depth,locationSource,magSource) VALUES (%s,%s,%s,%s,%s,%s)',(str(time),str(latitude),str(longitude),str(depth),status,locationSource,magSource))
# mysql.connection.commit()
# return "File has been uploaded!"
# # METHOD TO UPLOAD THE FILE
# @application.route("/upload", methods=['POST'])
# def upload():
# files = request.files['data_file']
# files.save(os.path.join('static', files.filename))
# msg = csvToDb(os.path.join('static', files.filename))
# return render_template('display.html', msgRet=msg)
# #METHOD FOR QUERY 2
# @application.route("/display", methods=['POST'])
# def display():
# # #SQLITE Connection
# # con = sqlite3.connect('earth.db')
# # cur = con.cursor()
# cur = mysql.connection.cursor()
# mag1=request.form['mag1']
# mag2=request.form['mag2']
# startDate=request.form['startDate']
# endDate=request.form['endDate']
# stmt="SELECT time,mag,place,locationSource FROM quakeData WHERE mag>= %s AND mag<= %s AND date(time)>= %s AND date(time)<= %s"
# cur.execute(stmt,(mag1,mag2,startDate,endDate,))
# return render_template('displayData.html', result_set = cur.fetchall() )
# @application.route("/calculateRisk", methods=['POST'])
# def calculateRisk():
# # #SQLITE Connection
# # con = sqlite3.connect('earth.db')
# # cur = con.cursor()
# cur = mysql.connection.cursor()
# # mysql.connection.create_function("findacos", 1, findacos)
# # mysql.connection.create_function("findsine", 1, findsine)
# # mysql.connection.create_function("findcosine", 1, findcosine)
# latDeg=float(request.form['lat'])
# lonDeg=float(request.form['lon'])
# dist=float(request.form['dist'])
# R=6370
# r= dist/R
# lat = math.radians(latDeg)
# lon = math.radians(lonDeg)
# #(lat,lon) in radians
# lat_min = math.degrees(lat-r)
# lat_max = math.degrees(lat+r)
# del_lon= math.asin(math.sin(r)/math.cos(lat))
# latT = math.asin(math.sin(lat)/math.cos(r))
# lon_min = math.degrees(lon - del_lon)
# lon_max = math.degrees(lon + del_lon)
# #stmt= "SELECT time,mag,place,locationSource FROM quakeData WHERE (latitude >= %s AND latitude <= %s) AND ((longitude >= %s AND longitude <= %s) OR (longitude*(-1) >= %s AND longitude*(-1) <= %s )) AND findacos(findsine(%s) * findsine(latitude) + findcosine(%s) * findcosine(latitude) * findcosine(longitude - (%s))) <= %s"
# #cur.execute(stmt,(lat_min,lat_max,lon_min,lon_max,lon_min,lon_max,lat,lat,lon,r,))
# stmt= "SELECT time,mag,place,locationSource FROM quakeData WHERE (latitude >= %s AND latitude <= %s) AND ((longitude >= %s AND longitude <= %s) OR (longitude*(-1) >= %s AND longitude*(-1) <= %s )) "
# cur.execute(stmt,(lat_min,lat_max,lon_min,lon_max,lon_min,lon_max,))
# return render_template('displayData.html', result_set2 = cur.fetchall() )
| 38.418182 | 688 | 0.634761 |
acf7f3b6b5a2362a105ac64ee70e633964f1c965 | 10,982 | py | Python | gpssim.py | data-luminosity/gpsd | 780c8a9c6bcdd1f8c904eaf28c11ceb317a0a2f7 | [
"BSD-3-Clause"
] | 1 | 2018-10-14T19:08:07.000Z | 2018-10-14T19:08:07.000Z | gpssim.py | mcb30/gpsd | 82f0fe31322ff55479f0f94bf288891e5f16ee8a | [
"BSD-3-Clause"
] | null | null | null | gpssim.py | mcb30/gpsd | 82f0fe31322ff55479f0f94bf288891e5f16ee8a | [
"BSD-3-Clause"
] | null | null | null | # This file is Copyright (c) 2010 by the GPSD project
# BSD terms apply: see the file COPYING in the distribution root for details.
"""
A GPS simulator.
This is proof-of-concept code, not production ready; some functions are stubs.
"""
import sys, math, random, exceptions, time
import gps, gpslib
# First, the mathematics. We simulate a moving viewpoint on the Earth
# and a satellite with specified orbital elements in the sky.
class ksv:
"Kinematic state vector."
def __init__(self, time=0, lat=0, lon=0, alt=0, course=0,
speed=0, climb=0, h_acc=0, v_acc=0):
self.time = time # Seconds from epoch
self.lat = lat # Decimal degrees
self.lon = lon # Decimal degrees
self.alt = alt # Meters
self.course = course # Degrees from true North
self.speed = speed # Meters per second
self.climb = climb # Meters per second
self.h_acc = h_acc # Meters per second per second
self.v_acc = v_acc # Meters per second per second
def next(self, quantum=1):
"State after quantum."
self.time += quantum
avspeed = (2*self.speed + self.h_acc*quantum)/2
avclimb = (2*self.climb + self.v_acc*quantum)/2
self.alt += avclimb * quantum
self.speed += self.h_acc * quantum
self.climb += self.v_acc * quantum
distance = avspeed * quantum
# Formula from <http://williams.best.vwh.net/avform.htm#Rhumb>
# Initial point cannot be a pole, but GPS doesn't work at high.
# latitudes anyway so it would be OK to fail there.
# Seems to assume a spherical Earth, which means it's going
# to have a slight inaccuracy rising towards the poles.
# The if/then avoids 0/0 indeterminacies on E-W courses.
tc = gps.Deg2Rad(self.course)
lat = gps.Deg2Rad(self.lat)
lon = gps.Deg2Rad(self.lon)
lat += distance * math.cos(tc)
dphi = math.log(math.tan(lat/2+math.pi/4)/math.tan(self.lat/2+math.pi/4))
if abs(lat-self.lat) < math.sqrt(1e-15):
q = math.cos(self.lat)
else:
q = (lat-self.lat)/dphi
dlon = -distance * math.sin(tc) / q
self.lon = gps.Rad2Deg(math.mod(lon + dlon + math.pi, 2 * math.pi) - math.pi)
self.lat = gps.Rad2Deg(lat)
# Satellite orbital elements are available at:
# <http://www.ngs.noaa.gov/orbits/>
# Orbital theory at:
# <http://www.wolffdata.se/gps/gpshtml/anomalies.html>
class satellite:
"Orbital elements of one satellite. PRESENTLY A STUB"
def __init__(self, prn):
self.prn = prn
def position(self, time):
"Return right ascension and declination of satellite,"
pass
# Next, the command interpreter. This is an object that takes an
# input source in the track description language, interprets it into
# sammples that might be reported by a GPS, and calls a reporting
# class to generate output.
class gpssimException(exceptions.Exception):
def __init__(self, message, filename, lineno):
exceptions.Exception.__init__(self)
self.message = message
self.filename = filename
self.lineno = lineno
def __str__(self):
return '"%s", %d:' % (self.filename, self.lineno)
class gpssim:
"Simulate a moving sensor, with skyview."
active_PRNs = range(1, 24+1) + [134,]
def __init__(self, outfmt):
self.ksv = ksv()
self.ephemeris = {}
# This sets up satellites at random. Not really what we want.
for prn in gpssim.active_PRNs:
for (prn, _satellite) in self.ephemeris.items():
self.skyview[prn] = (random.randint(-60, +61),
random.randint(0, 359))
self.have_ephemeris = False
self.channels = {}
self.outfmt = outfmt
self.status = gps.STATUS_NO_FIX
self.mode = gps.MODE_NO_FIX
self.validity = "V"
self.satellites_used = 0
self.filename = None
self.lineno = 0
def parse_tdl(self, line):
"Interpret one TDL directive."
line = line.strip()
if "#" in line:
line = line[:line.find("#")]
if line == '':
return
fields = line.split()
command = fields[0]
if command == "time":
self.ksv.time = gps.isotime(fields[1])
elif command == "location":
(self.lat, self.lon, self.alt) = map(float, fields[1:])
elif command == "course":
self.ksv.time = float(fields[1])
elif command == "speed":
self.ksv.speed = float(fields[1])
elif command == "climb":
self.ksv.climb = float(fields[1])
elif command == "acceleration":
(self.ksv.h_acc, self.ksv.h_acc) = map(float, fields[1:])
elif command == "snr":
self.channels[int(fields[1])] = float(fields[2])
elif command == "go":
self.go(int(fields[1]))
elif command == "status":
try:
code = fields[1]
self.status = {"no_fix":0, "fix":1, "dgps_fix":2}[code.lower()]
except KeyError:
raise gpssimException("invalid status code '%s'" % code,
self.filename, self.lineno)
elif command == "mode":
try:
code = fields[1]
self.status = {"no_fix":1, "2d":2, "3d":3}[code.lower()]
except KeyError:
raise gpssimException("invalid mode code '%s'" % code,
self.filename, self.lineno)
elif command == "satellites":
self.satellites_used = int(fields[1])
elif command == "validity":
self.validity = fields[1]
else:
raise gpssimException("unknown command '%s'" % fields[1],
self.filename, self.lineno)
# FIX-ME: add syntax for ephemeris elements
self.lineno += 1
def filter(self, inp, outp):
"Make this a filter for file-like objects."
self.filename = input.name
self.lineno = 1
self.output = outp
for line in inp:
self.execute(line)
def go(self, seconds):
"Run the simulation for a specified number of seconds."
for i in range(seconds):
self.ksv.next()
if self.have_ephemeris:
self.skyview = {}
for (prn, satellite) in self.ephemeris.items():
self.skyview[prn] = satellite.position(i)
self.output.write(self.gpstype.report(self))
# Reporting classes need to have a report() method returning a string
# that is a sentence (or possibly several sentences) reporting the
# state of the simulation. Presently we have only one, for NMEA
# devices, but the point of the architecture is so that we could simulate
# others - SirF, Evermore, whatever.
MPS_TO_KNOTS = 1.9438445 # Meters per second to knots
class NMEA:
"NMEA output generator."
def __init__(self):
self.sentences = ("RMC", "GGA",)
self.counter = 0
def add_checksum(self, mstr):
"Concatenate NMEA checksum and trailer to a string"
csum = 0
for (i, c) in enumerate(mstr):
if i == 0 and c == "$":
continue
csum ^= ord(c)
mstr += "*%02X\r\n" % csum
return mstr
def degtodm(self, angle):
"Decimal degrees to GPS-style, degrees first followed by minutes."
(fraction, _integer) = math.modf(angle)
return math.floor(angle) * 100 + fraction * 60
def GGA(self, sim):
"Emit GGA sentence describing the simulation state."
tm = time.gmtime(sim.ksv.time)
gga = \
"$GPGGA,%02d%02d%02d,%09.4f,%c,%010.4f,%c,%d,%02d," % (
tm.tm_hour,
tm.tm_min,
tm.tm_sec,
self.degtodm(abs(sim.ksv.lat)), "SN"[sim.ksv.lat > 0],
self.degtodm(abs(sim.ksv.lon)), "WE"[sim.ksv.lon > 0],
sim.status,
sim.satellites_used)
# HDOP calculation goes here
gga += ","
if sim.mode == gps.MODE_3D:
gga += "%.1f,M" % self.ksv.lat
gga += ","
gga += "%.3f,M," % gpslib.wg84_separation(sim.ksv.lat, sim.ksv.lon)
# Magnetic variation goes here
# gga += "%3.2f,M," % mag_var
gga += ",,"
# Time in seconds since last DGPS update goes here
gga += ","
# DGPS station ID goes here
return self.add_checksum(gga)
def GLL(self, sim):
"Emit GLL sentence describing the simulation state."
tm = time.gmtime(sim.ksv.time)
gll = \
"$GPLL,%09.4f,%c,%010.4f,%c,%02d%02d%02d,%s," % (
self.degtodm(abs(sim.ksv.lat)), "SN"[sim.ksv.lat > 0],
self.degtodm(abs(sim.ksv.lon)), "WE"[sim.ksv.lon > 0],
tm.tm_hour,
tm.tm_min,
tm.tm_sec,
sim.validity,
)
# FAA mode indicator could go after these fields.
return self.add_checksum(gll)
def RMC(self, sim):
"Emit RMC sentence describing the simulation state."
tm = time.gmtime(sim.ksv.time)
rmc = \
"GPRMC,%02d%02d%02d,%s,%09.4f,%c,%010.4f,%c,%.1f,%02d%02d%02d," % (
tm.tm_hour,
tm.tm_min,
tm.tm_sec,
sim.validity,
self.degtodm(abs(sim.ksv.lat)), "SN"[sim.ksv.lat > 0],
self.degtodm(abs(sim.ksv.lon)), "WE"[sim.ksv.lon > 0],
sim.course * MPS_TO_KNOTS,
tm.tm_mday,
tm.tm_mon,
tm.tm_year % 100)
# Magnetic variation goes here
# rmc += "%3.2f,M," % mag_var
rmc += ",,"
# FAA mode goes here
return self.add_checksum(rmc)
def ZDA(self, sim):
"Emit ZDA sentence describing the simulation state."
tm = time.gmtime(sim.ksv.time)
zda = "$GPZDA,%02d%2d%02d,%02d,%02d,%04d" % (
tm.tm_hour,
tm.tm_min,
tm.tm_sec,
tm.tm_mday,
tm.tm_mon,
tm.tm_year,
)
# Local zone description, 00 to +- 13 hours, goes here
zda += ","
# Local zone minutes description goes here
zda += ","
return self.add_checksum(zda)
def report(self, sim):
"Report the simulation state."
out = ""
for sentence in self.sentences:
if type(sentence) == type(()):
(interval, sentence) = sentence
if self.counter % interval:
continue
out += apply(getattr(self, sentence), [sim])
self.counter += 1
return out
# The very simple main line.
if __name__ == "__main__":
try:
gpssim(NMEA).filter(sys.stdin, sys.stdout)
except gpssimException, e:
print >>sys.stderr, e
# gpssim.py ends here.
| 38 | 85 | 0.559552 |
acf7f4ee339f69199351f68eb47d3b589e60d696 | 3,950 | py | Python | d2l-zh/convolutional-neural-network/batch-norm.py | hyschn/practice-code | cfa1eb373f723488a11af1107af16956a5851905 | [
"Apache-2.0"
] | 2 | 2018-11-15T02:49:57.000Z | 2019-02-22T08:56:14.000Z | d2l-zh/convolutional-neural-network/batch-norm.py | hyschn/practice-code | cfa1eb373f723488a11af1107af16956a5851905 | [
"Apache-2.0"
] | 7 | 2020-07-16T09:26:45.000Z | 2022-03-02T02:48:47.000Z | d2l-zh/convolutional-neural-network/batch-norm.py | hyschn/practice-code | cfa1eb373f723488a11af1107af16956a5851905 | [
"Apache-2.0"
] | 1 | 2018-11-15T02:49:58.000Z | 2018-11-15T02:49:58.000Z | # encoding:utf-8
"""
@Time : 2020-05-21 11:07
@Author : yshhuang@foxmail.com
@File : batch-norm.py
@Software: PyCharm
"""
import d2lzh as d2l
from mxnet import autograd, gluon, init, nd
from mxnet.gluon import nn
def batch_norm(X, gamma, beta, moving_mean, moving_var,
eps, momentum):
# 通过autograd来判断当前模式是训练模式还是预测模式
if not autograd.is_training():
# 如果是在预测模式下,直接使用传入的移动平均所得的均值和方差
X_hat = (X - moving_mean) / nd.sqrt(moving_var + eps)
else:
assert len(X.shape) in (2, 4)
if len(X.shape) == 2:
# 使用全连接层的情况,计算特征维上的均值和方差
mean = X.mean(axis=0)
var = ((X - mean) ** 2).mean(axis=0)
else:
# 使用二维卷积层的情况,计算通道维上(axis=1)的均值和方差
mean = X.mean(axis=(0, 2, 3), keepdims=True)
var = ((X - mean) ** 2).mean(axis=0)
X_hat = (X - mean) / nd.sqrt(var + eps)
# 更新移动平均的均值和方差
moving_mean = momentum * moving_mean + (1.0 - momentum) * mean
moving_var = momentum * moving_var + (1.0 - momentum) * var
Y = gamma * X_hat + beta
return Y, moving_mean, moving_var
class BatchNorm(nn.Block):
def __init__(self, num_features, num_dims, **kwargs):
super(BatchNorm, self).__init__(**kwargs)
if num_dims == 2:
shape = (1, num_features)
else:
shape = (1, num_features, 1, 1)
self.gamma = self.params.get('gamma', shape=shape, init=init.One())
self.beta = self.params.get('beta', shape=shape, init=init.Zero())
self.moving_mean = nd.zeros(shape)
self.moving_var = nd.zeros(shape)
def forward(self, X):
if self.moving_mean.context != X.context:
self.moving_mean = self.moving_mean.copyto(X.context)
self.moving_var = self.moving_var.copyto(X.context)
Y, self.moving_mean, self.moving_var = batch_norm(
X, self.gamma.data(), self.beta.data(), self.moving_mean,
self.moving_var, eps=1e-5, momentum=0.9)
return Y
if __name__ == '__main__':
net = nn.Sequential()
net.add(nn.Conv2D(6, kernel_size=5),
BatchNorm(6, num_dims=4),
nn.Activation('sigmoid'),
nn.MaxPool2D(pool_size=2, strides=2),
nn.Conv2D(16, kernel_size=5),
BatchNorm(16, num_dims=4),
nn.Activation('sigmoid'),
nn.MaxPool2D(pool_size=2, strides=2),
nn.Dense(120),
BatchNorm(120, num_dims=2),
nn.Activation('sigmoid'),
nn.Dense(84),
BatchNorm(84, num_dims=2),
nn.Activation('sigmoid'),
nn.Dense(10))
lr, num_epochs, batch_size, ctx = 1.0, 5, 256, d2l.try_gpu()
net.initialize(ctx=ctx, init=init.Xavier())
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
d2l.train_ch5(net, train_iter, test_iter, batch_size, trainer,
ctx, num_epochs)
print(net[1].gamma.data().reshape((-1,)), net[1].beta.data().reshape((-1,)))
net = nn.Sequential()
net.add(nn.Conv2D(6, kernel_size=5),
nn.BatchNorm(),
nn.Activation('sigmoid'),
nn.MaxPool2D(pool_size=2, strides=2),
nn.Conv2D(16, kernel_size=5),
nn.BatchNorm(),
nn.Activation('sigmoid'),
nn.MaxPool2D(pool_size=2, strides=2),
nn.Dense(120),
nn.BatchNorm(),
nn.Activation('sigmoid'),
nn.Dense(84),
nn.BatchNorm(),
nn.Activation('sigmoid'),
nn.Dense(10))
net.initialize(ctx=ctx, init=init.Xavier())
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})
d2l.train_ch5(net, train_iter, test_iter, batch_size, trainer, ctx,
num_epochs)
print(net[1].gamma.data().reshape((-1,)), net[1].beta.data().reshape((-1,)))
| 37.264151 | 80 | 0.577722 |
acf7f57a93fda5b470c3c85126fdb24a8d60cc80 | 11,020 | py | Python | tests/wallet/rpc/test_wallet_rpc.py | littlechare/keepool-chia-blockchain | 87542572b232ccada9e5446a764979b9df8e5233 | [
"Apache-2.0"
] | 1 | 2021-06-30T13:09:31.000Z | 2021-06-30T13:09:31.000Z | tests/wallet/rpc/test_wallet_rpc.py | littlechare/keepool-chia-blockchain | 87542572b232ccada9e5446a764979b9df8e5233 | [
"Apache-2.0"
] | 26 | 2021-07-20T12:04:35.000Z | 2022-03-29T12:10:06.000Z | tests/wallet/rpc/test_wallet_rpc.py | littlechare/keepool-chia-blockchain | 87542572b232ccada9e5446a764979b9df8e5233 | [
"Apache-2.0"
] | null | null | null | import asyncio
import logging
from pathlib import Path
import pytest
from chia.consensus.block_rewards import calculate_base_farmer_reward, calculate_pool_reward
from chia.rpc.full_node_rpc_api import FullNodeRpcApi
from chia.rpc.full_node_rpc_client import FullNodeRpcClient
from chia.rpc.rpc_server import start_rpc_server
from chia.rpc.wallet_rpc_api import WalletRpcApi
from chia.rpc.wallet_rpc_client import WalletRpcClient
from chia.simulator.simulator_protocol import FarmNewBlockProtocol
from chia.types.peer_info import PeerInfo
from chia.util.bech32m import encode_puzzle_hash
from chia.util.ints import uint16, uint32
from chia.wallet.transaction_record import TransactionRecord
from tests.setup_nodes import bt, setup_simulators_and_wallets, self_hostname
from tests.time_out_assert import time_out_assert
log = logging.getLogger(__name__)
class TestWalletRpc:
@pytest.fixture(scope="function")
async def two_wallet_nodes(self):
async for _ in setup_simulators_and_wallets(1, 2, {}):
yield _
@pytest.mark.asyncio
async def test_wallet_make_transaction(self, two_wallet_nodes):
test_rpc_port = uint16(21529)
test_rpc_port_node = uint16(21530)
num_blocks = 5
full_nodes, wallets = two_wallet_nodes
full_node_api = full_nodes[0]
full_node_server = full_node_api.full_node.server
wallet_node, server_2 = wallets[0]
wallet_node_2, server_3 = wallets[1]
wallet = wallet_node.wallet_state_manager.main_wallet
wallet_2 = wallet_node_2.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
ph_2 = await wallet_2.get_new_puzzlehash()
await server_2.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)
for i in range(0, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
initial_funds = sum(
[calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks)]
)
initial_funds_eventually = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks + 1)
]
)
wallet_rpc_api = WalletRpcApi(wallet_node)
config = bt.config
hostname = config["self_hostname"]
daemon_port = config["daemon_port"]
def stop_node_cb():
pass
full_node_rpc_api = FullNodeRpcApi(full_node_api.full_node)
rpc_cleanup_node = await start_rpc_server(
full_node_rpc_api,
hostname,
daemon_port,
test_rpc_port_node,
stop_node_cb,
bt.root_path,
config,
connect_to_daemon=False,
)
rpc_cleanup = await start_rpc_server(
wallet_rpc_api,
hostname,
daemon_port,
test_rpc_port,
stop_node_cb,
bt.root_path,
config,
connect_to_daemon=False,
)
await time_out_assert(5, wallet.get_confirmed_balance, initial_funds)
await time_out_assert(5, wallet.get_unconfirmed_balance, initial_funds)
client = await WalletRpcClient.create(self_hostname, test_rpc_port, bt.root_path, config)
client_node = await FullNodeRpcClient.create(self_hostname, test_rpc_port_node, bt.root_path, config)
try:
addr = encode_puzzle_hash(await wallet_node_2.wallet_state_manager.main_wallet.get_new_puzzlehash(), "xch")
tx_amount = 15600000
try:
await client.send_transaction("1", 100000000000000001, addr)
raise Exception("Should not create high value tx")
except ValueError:
pass
# Tests sending a basic transaction
tx = await client.send_transaction("1", tx_amount, addr)
transaction_id = tx.name
async def tx_in_mempool():
tx = await client.get_transaction("1", transaction_id)
return tx.is_in_mempool()
await time_out_assert(5, tx_in_mempool, True)
await time_out_assert(5, wallet.get_unconfirmed_balance, initial_funds - tx_amount)
assert (await client.get_wallet_balance("1"))["unconfirmed_wallet_balance"] == initial_funds - tx_amount
assert (await client.get_wallet_balance("1"))["confirmed_wallet_balance"] == initial_funds
for i in range(0, 5):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph_2))
async def eventual_balance():
return (await client.get_wallet_balance("1"))["confirmed_wallet_balance"]
await time_out_assert(5, eventual_balance, initial_funds_eventually - tx_amount)
# Tests offline signing
ph_3 = await wallet_node_2.wallet_state_manager.main_wallet.get_new_puzzlehash()
ph_4 = await wallet_node_2.wallet_state_manager.main_wallet.get_new_puzzlehash()
ph_5 = await wallet_node_2.wallet_state_manager.main_wallet.get_new_puzzlehash()
# Test basic transaction to one output
signed_tx_amount = 888000
tx_res: TransactionRecord = await client.create_signed_transaction(
[{"amount": signed_tx_amount, "puzzle_hash": ph_3}]
)
assert tx_res.fee_amount == 0
assert tx_res.amount == signed_tx_amount
assert len(tx_res.additions) == 2 # The output and the change
assert any([addition.amount == signed_tx_amount for addition in tx_res.additions])
push_res = await client_node.push_tx(tx_res.spend_bundle)
assert push_res["success"]
assert (await client.get_wallet_balance("1"))[
"confirmed_wallet_balance"
] == initial_funds_eventually - tx_amount
for i in range(0, 5):
await client.farm_block(encode_puzzle_hash(ph_2, "xch"))
await asyncio.sleep(0.5)
await time_out_assert(5, eventual_balance, initial_funds_eventually - tx_amount - signed_tx_amount)
# Test transaction to two outputs, from a specified coin, with a fee
coin_to_spend = None
for addition in tx_res.additions:
if addition.amount != signed_tx_amount:
coin_to_spend = addition
assert coin_to_spend is not None
tx_res = await client.create_signed_transaction(
[{"amount": 444, "puzzle_hash": ph_4}, {"amount": 999, "puzzle_hash": ph_5}],
coins=[coin_to_spend],
fee=100,
)
assert tx_res.fee_amount == 100
assert tx_res.amount == 444 + 999
assert len(tx_res.additions) == 3 # The outputs and the change
assert any([addition.amount == 444 for addition in tx_res.additions])
assert any([addition.amount == 999 for addition in tx_res.additions])
assert sum([rem.amount for rem in tx_res.removals]) - sum([ad.amount for ad in tx_res.additions]) == 100
push_res = await client_node.push_tx(tx_res.spend_bundle)
assert push_res["success"]
for i in range(0, 5):
await client.farm_block(encode_puzzle_hash(ph_2, "xch"))
await asyncio.sleep(0.5)
new_balance = initial_funds_eventually - tx_amount - signed_tx_amount - 444 - 999 - 100
await time_out_assert(5, eventual_balance, new_balance)
send_tx_res: TransactionRecord = await client.send_transaction_multi(
"1", [{"amount": 555, "puzzle_hash": ph_4}, {"amount": 666, "puzzle_hash": ph_5}], fee=200
)
assert send_tx_res is not None
assert send_tx_res.fee_amount == 200
assert send_tx_res.amount == 555 + 666
assert len(send_tx_res.additions) == 3 # The outputs and the change
assert any([addition.amount == 555 for addition in send_tx_res.additions])
assert any([addition.amount == 666 for addition in send_tx_res.additions])
assert (
sum([rem.amount for rem in send_tx_res.removals]) - sum([ad.amount for ad in send_tx_res.additions])
== 200
)
await asyncio.sleep(3)
for i in range(0, 5):
await client.farm_block(encode_puzzle_hash(ph_2, "xch"))
await asyncio.sleep(0.5)
new_balance = new_balance - 555 - 666 - 200
await time_out_assert(5, eventual_balance, new_balance)
address = await client.get_next_address("1", True)
assert len(address) > 10
transactions = await client.get_transactions("1")
assert len(transactions) > 1
pks = await client.get_public_keys()
assert len(pks) == 1
assert (await client.get_height_info()) > 0
sk_dict = await client.get_private_key(pks[0])
assert sk_dict["fingerprint"] == pks[0]
assert sk_dict["sk"] is not None
assert sk_dict["pk"] is not None
assert sk_dict["seed"] is not None
mnemonic = await client.generate_mnemonic()
assert len(mnemonic) == 24
await client.add_key(mnemonic)
pks = await client.get_public_keys()
assert len(pks) == 2
await client.log_in_and_skip(pks[1])
sk_dict = await client.get_private_key(pks[1])
assert sk_dict["fingerprint"] == pks[1]
await client.delete_key(pks[0])
await client.log_in_and_skip(pks[1])
assert len(await client.get_public_keys()) == 1
assert not (await client.get_sync_status())
wallets = await client.get_wallets()
assert len(wallets) == 1
balance = await client.get_wallet_balance(wallets[0]["id"])
assert balance["unconfirmed_wallet_balance"] == 0
test_wallet_backup_path = Path("test_wallet_backup_file")
await client.create_backup(test_wallet_backup_path)
assert test_wallet_backup_path.exists()
test_wallet_backup_path.unlink()
try:
await client.send_transaction(wallets[0]["id"], 100, addr)
raise Exception("Should not create tx if no balance")
except ValueError:
pass
await client.delete_all_keys()
assert len(await client.get_public_keys()) == 0
finally:
# Checks that the RPC manages to stop the node
client.close()
client_node.close()
await client.await_closed()
await client_node.await_closed()
await rpc_cleanup()
await rpc_cleanup_node()
| 41.273408 | 119 | 0.634392 |
acf7f580edc65eaf647da034f06473819f5abcf3 | 16,246 | py | Python | dueling_network.py | musyoku/dueling-network | b41231ce70d156af8cebd7fca89e37f28ac174f4 | [
"MIT"
] | 6 | 2016-03-09T07:02:19.000Z | 2017-02-22T03:14:41.000Z | dueling_network.py | musyoku/dueling-network | b41231ce70d156af8cebd7fca89e37f28ac174f4 | [
"MIT"
] | null | null | null | dueling_network.py | musyoku/dueling-network | b41231ce70d156af8cebd7fca89e37f28ac174f4 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import numpy as np
import chainer, math, copy, os
from chainer import cuda, Variable, optimizer, optimizers, serializers, function
from chainer import functions as F
from chainer import links as L
from chainer.utils import type_check
from activations import activations
from config import config
class ConvolutionalNetwork(chainer.Chain):
def __init__(self, **layers):
super(ConvolutionalNetwork, self).__init__(**layers)
self.activation_function = "elu"
self.n_hidden_layers = 0
self.apply_batchnorm = True
self.apply_batchnorm_to_input = False
def forward_one_step(self, x, test):
f = activations[self.activation_function]
chain = [x]
# Hidden convolutinal layers
for i in range(self.n_hidden_layers):
u = getattr(self, "layer_%i" % i)(chain[-1])
if self.apply_batchnorm:
if i == 0 and self.apply_batchnorm_to_input is False:
pass
else:
u = getattr(self, "batchnorm_%i" % i)(u, test=test)
chain.append(f(u))
return chain[-1]
def __call__(self, x, test=False):
return self.forward_one_step(x, test=test)
class FullyConnectedNetwork(chainer.Chain):
def __init__(self, **layers):
super(FullyConnectedNetwork, self).__init__(**layers)
self.n_hidden_layers = 0
self.activation_function = "elu"
self.apply_batchnorm_to_input = False
self.projection_type = "fully_connection"
self.conv_output_filter_size = (1, 1)
def forward_one_step(self, x, test):
f = activations[self.activation_function]
chain = [x]
# Convert feature maps to a vector
if self.projection_type == "fully_connection":
pass
elif self.projection_type == "global_average_pooling":
batch_size = chain[-1].data.shape[0]
n_maps = chain[-1].data[0].shape[0]
chain.append(F.average_pooling_2d(chain[-1], self.conv_output_filter_size))
chain.append(F.reshape(chain[-1], (batch_size, n_maps)))
else:
raise NotImplementedError()
# Hidden layers
for i in range(self.n_hidden_layers):
u = getattr(self, "layer_%i" % i)(chain[-1])
if self.apply_batchnorm:
if i == 0 and self.apply_batchnorm_to_input is False:
pass
else:
u = getattr(self, "batchnorm_%i" % i)(u, test=test)
output = f(u)
if self.apply_dropout:
output = F.dropout(output, train=not test)
chain.append(output)
# Output
u = getattr(self, "layer_%i" % self.n_hidden_layers)(chain[-1])
if self.apply_batchnorm:
u = getattr(self, "batchnorm_%i" % self.n_hidden_layers)(u, test=test)
chain.append(f(u))
return chain[-1]
def __call__(self, x, test=False):
return self.forward_one_step(x, test=test)
class Aggregator(function.Function):
def as_mat(self, x):
if x.ndim == 2:
return x
return x.reshape(len(x), -1)
def check_type_forward(self, in_types):
n_in = in_types.size()
type_check.expect(n_in == 3)
value_type, advantage_type, mean_type = in_types
type_check.expect(
value_type.dtype == np.float32,
advantage_type.dtype == np.float32,
mean_type.dtype == np.float32,
value_type.ndim == 2,
advantage_type.ndim == 2,
mean_type.ndim == 1,
)
def forward(self, inputs):
value, advantage, mean = inputs
mean = self.as_mat(mean)
sub = advantage - mean
output = value + sub
return output,
def backward(self, inputs, grad_outputs):
xp = cuda.get_array_module(inputs[0])
gx1 = xp.sum(grad_outputs[0], axis=1)
gx2 = grad_outputs[0]
return self.as_mat(gx1), gx2, -gx1
def aggregate(value, advantage, mean):
return Aggregator()(value, advantage, mean)
class DuelingNetwork:
def __init__(self):
print "Initializing DQN..."
self.exploration_rate = config.rl_initial_exploration
# Q Network
self.conv, self.fc_value, self.fc_advantage = build_q_network(config)
self.load()
self.update_target()
# Optimizer
## RMSProp, ADAM, AdaGrad, AdaDelta, ...
## See http://docs.chainer.org/en/stable/reference/optimizers.html
self.optimizer_conv = optimizers.Adam(alpha=config.rl_learning_rate, beta1=config.rl_gradient_momentum)
self.optimizer_fc_value = optimizers.Adam(alpha=config.rl_learning_rate, beta1=config.rl_gradient_momentum)
self.optimizer_fc_advantage = optimizers.Adam(alpha=config.rl_learning_rate, beta1=config.rl_gradient_momentum)
self.optimizer_conv.setup(self.conv)
self.optimizer_fc_value.setup(self.fc_value)
self.optimizer_fc_advantage.setup(self.fc_advantage)
self.optimizer_conv.add_hook(optimizer.GradientClipping(10.0))
self.optimizer_fc_value.add_hook(optimizer.GradientClipping(10.0))
self.optimizer_fc_advantage.add_hook(optimizer.GradientClipping(10.0))
# Replay Memory
## (state, action, reward, next_state, episode_ends)
shape_state = (config.rl_replay_memory_size, config.rl_agent_history_length * config.ale_screen_channels, config.ale_scaled_screen_size[1], config.ale_scaled_screen_size[0])
shape_action = (config.rl_replay_memory_size,)
self.replay_memory = [
np.zeros(shape_state, dtype=np.float32),
np.zeros(shape_action, dtype=np.uint8),
np.zeros(shape_action, dtype=np.int8),
np.zeros(shape_state, dtype=np.float32),
np.zeros(shape_action, dtype=np.bool)
]
self.total_replay_memory = 0
self.no_op_count = 0
def eps_greedy(self, state, exploration_rate):
prop = np.random.uniform()
q_max = None
q_min = None
if prop < exploration_rate:
# Select a random action
action_index = np.random.randint(0, len(config.ale_actions))
else:
# Select a greedy action
state = Variable(state)
if config.use_gpu:
state.to_gpu()
q = self.compute_q_variable(state, test=True)
if config.use_gpu:
action_index = cuda.to_cpu(cuda.cupy.argmax(q.data))
q_max = cuda.to_cpu(cuda.cupy.max(q.data))
q_min = cuda.to_cpu(cuda.cupy.min(q.data))
else:
action_index = np.argmax(q.data)
q_max = np.max(q.data)
q_min = np.min(q.data)
action = self.get_action_with_index(action_index)
# No-op
self.no_op_count = self.no_op_count + 1 if action == 0 else 0
if self.no_op_count > config.rl_no_op_max:
no_op_index = np.argmin(np.asarray(config.ale_actions))
actions_without_no_op = []
for i in range(len(config.ale_actions)):
if i == no_op_index:
continue
actions_without_no_op.append(config.ale_actions[i])
action_index = np.random.randint(0, len(actions_without_no_op))
action = actions_without_no_op[action_index]
print "Reached no_op_max.", "New action:", action
return action, q_max, q_min
def store_transition_in_replay_memory(self, state, action, reward, next_state, episode_ends):
index = self.total_replay_memory % config.rl_replay_memory_size
self.replay_memory[0][index] = state[0]
self.replay_memory[1][index] = action
self.replay_memory[2][index] = reward
if episode_ends is False:
self.replay_memory[3][index] = next_state[0]
self.replay_memory[4][index] = episode_ends
self.total_replay_memory += 1
def forward_one_step(self, state, action, reward, next_state, episode_ends, test=False):
xp = cuda.cupy if config.use_gpu else np
n_batch = state.shape[0]
state = Variable(state)
next_state = Variable(next_state)
if config.use_gpu:
state.to_gpu()
next_state.to_gpu()
q = self.compute_q_variable(state, test=test)
q_ = self.compute_q_variable(next_state, test=test)
max_action_indices = xp.argmax(q_.data, axis=1)
if config.use_gpu:
max_action_indices = cuda.to_cpu(max_action_indices)
# Generate target
target_q = self.compute_target_q_variable(next_state, test=test)
# Initialize target signal
# 教師信号を現在のQ値で初期化
target = q.data.copy()
for i in xrange(n_batch):
# Clip all positive rewards at 1 and all negative rewards at -1
# プラスの報酬はすべて1にし、マイナスの報酬はすべて-1にする
if episode_ends[i] is True:
target_value = np.sign(reward[i])
else:
max_action_index = max_action_indices[i]
target_value = np.sign(reward[i]) + config.rl_discount_factor * target_q.data[i][max_action_indices[i]]
action_index = self.get_index_with_action(action[i])
# 現在選択した行動に対してのみ誤差を伝播する。
# それ以外の行動を表すユニットの2乗誤差は0となる。(target=qとなるため)
old_value = target[i, action_index]
diff = target_value - old_value
# target is an one-hot vector in which the non-zero element(= target signal) corresponds to the taken action.
# targetは実際にとった行動に対してのみ誤差を考え、それ以外の行動に対しては誤差が0となるone-hotなベクトルです。
# Clip the error to be between -1 and 1.
# 1を超えるものはすべて1にする。(-1も同様)
if diff > 1.0:
target_value = 1.0 + old_value
elif diff < -1.0:
target_value = -1.0 + old_value
target[i, action_index] = target_value
target = Variable(target)
# Compute error
loss = F.mean_squared_error(target, q)
return loss, q
def replay_experience(self):
if self.total_replay_memory == 0:
return
# Sample random minibatch of transitions from replay memory
if self.total_replay_memory < config.rl_replay_memory_size:
replay_index = np.random.randint(0, self.total_replay_memory, (config.rl_minibatch_size, 1))
else:
replay_index = np.random.randint(0, config.rl_replay_memory_size, (config.rl_minibatch_size, 1))
shape_state = (config.rl_minibatch_size, config.rl_agent_history_length * config.ale_screen_channels, config.ale_scaled_screen_size[1], config.ale_scaled_screen_size[0])
shape_action = (config.rl_minibatch_size,)
state = np.empty(shape_state, dtype=np.float32)
action = np.empty(shape_action, dtype=np.uint8)
reward = np.empty(shape_action, dtype=np.int8)
next_state = np.empty(shape_state, dtype=np.float32)
episode_ends = np.empty(shape_action, dtype=np.bool)
for i in xrange(config.rl_minibatch_size):
state[i] = self.replay_memory[0][replay_index[i]]
action[i] = self.replay_memory[1][replay_index[i]]
reward[i] = self.replay_memory[2][replay_index[i]]
next_state[i] = self.replay_memory[3][replay_index[i]]
episode_ends[i] = self.replay_memory[4][replay_index[i]]
self.optimizer_conv.zero_grads()
self.optimizer_fc_value.zero_grads()
self.optimizer_fc_advantage.zero_grads()
loss, _ = self.forward_one_step(state, action, reward, next_state, episode_ends, test=False)
loss.backward()
self.optimizer_fc_value.update()
self.optimizer_fc_advantage.update()
self.optimizer_conv.update()
def compute_q_variable(self, state, test=False):
output = self.conv(state, test=test)
value = self.fc_value(output, test=test)
advantage = self.fc_advantage(output, test=test)
mean = F.sum(advantage, axis=1) / float(len(config.ale_actions))
return aggregate(value, advantage, mean)
def compute_target_q_variable(self, state, test=True):
output = self.target_conv(state, test=test)
value = self.target_fc_value(output, test=test)
advantage = self.target_fc_advantage(output, test=test)
mean = F.sum(advantage, axis=1) / float(len(config.ale_actions))
return aggregate(value, advantage, mean)
def update_target(self):
self.target_conv = copy.deepcopy(self.conv)
self.target_fc_value = copy.deepcopy(self.fc_value)
self.target_fc_advantage = copy.deepcopy(self.fc_advantage)
def get_action_with_index(self, i):
return config.ale_actions[i]
def get_index_with_action(self, action):
return config.ale_actions.index(action)
def decrease_exploration_rate(self):
# Exploration rate is linearly annealed to its final value
self.exploration_rate -= 1.0 / config.rl_final_exploration_frame
if self.exploration_rate < config.rl_final_exploration:
self.exploration_rate = config.rl_final_exploration
def load(self):
filename = "conv.model"
if os.path.isfile(filename):
serializers.load_hdf5(filename, self.conv)
print "convolutional network loaded."
filename = "fc_value.model"
if os.path.isfile(filename):
serializers.load_hdf5(filename, self.fc_value)
print "value network loaded."
filename = "fc_advantage.model"
if os.path.isfile(filename):
serializers.load_hdf5(filename, self.fc_advantage)
print "action advantage network loaded."
def save(self):
serializers.save_hdf5("conv.model", self.conv)
serializers.save_hdf5("fc_value.model", self.fc_value)
serializers.save_hdf5("fc_advantage.model", self.fc_advantage)
def build_q_network(config):
config.check()
wscale = config.q_wscale
# Convolutional part of Q-Network
conv_attributes = {}
conv_channels = [(config.rl_agent_history_length * config.ale_screen_channels, config.q_conv_hidden_channels[0])]
conv_channels += zip(config.q_conv_hidden_channels[:-1], config.q_conv_hidden_channels[1:])
output_map_width = config.ale_scaled_screen_size[0]
output_map_height = config.ale_scaled_screen_size[1]
for n in xrange(len(config.q_conv_hidden_channels)):
output_map_width = (output_map_width - config.q_conv_filter_sizes[n]) / config.q_conv_strides[n] + 1
output_map_height = (output_map_height - config.q_conv_filter_sizes[n]) / config.q_conv_strides[n] + 1
for i, (n_in, n_out) in enumerate(conv_channels):
conv_attributes["layer_%i" % i] = L.Convolution2D(n_in, n_out, config.q_conv_filter_sizes[i], stride=config.q_conv_strides[i], wscale=wscale)
conv_attributes["batchnorm_%i" % i] = L.BatchNormalization(n_out)
conv = ConvolutionalNetwork(**conv_attributes)
conv.n_hidden_layers = len(config.q_conv_hidden_channels)
conv.activation_function = config.q_conv_activation_function
conv.apply_batchnorm = config.apply_batchnorm
conv.apply_batchnorm_to_input = config.q_conv_apply_batchnorm_to_input
if config.use_gpu:
conv.to_gpu()
# Value network
fc_attributes = {}
if config.q_conv_output_projection_type == "fully_connection":
fc_units = [(output_map_width * output_map_height * config.q_conv_hidden_channels[-1], config.q_fc_hidden_units[0])]
fc_units += [(config.q_fc_hidden_units[0], config.q_fc_hidden_units[1])]
elif config.q_conv_output_projection_type == "global_average_pooling":
fc_units = [(config.q_conv_hidden_channels[-1], config.q_fc_hidden_units[0])]
fc_units += [(config.q_fc_hidden_units[0], config.q_fc_hidden_units[1])]
else:
raise NotImplementedError()
fc_units += zip(config.q_fc_hidden_units[1:-1], config.q_fc_hidden_units[2:])
fc_units += [(config.q_fc_hidden_units[-1], 1)]
for i, (n_in, n_out) in enumerate(fc_units):
fc_attributes["layer_%i" % i] = L.Linear(n_in, n_out, wscale=wscale)
fc_attributes["batchnorm_%i" % i] = L.BatchNormalization(n_out)
fc_value = FullyConnectedNetwork(**fc_attributes)
fc_value.n_hidden_layers = len(fc_units) - 1
fc_value.activation_function = config.q_fc_activation_function
fc_value.apply_batchnorm = config.apply_batchnorm
fc_value.apply_dropout = config.q_fc_apply_dropout
fc_value.apply_batchnorm_to_input = config.q_fc_apply_batchnorm_to_input
fc_value.conv_output_filter_size = (output_map_width, output_map_height)
fc_value.projection_type = config.q_conv_output_projection_type
if config.use_gpu:
fc_value.to_gpu()
# Action advantage network
fc_attributes = {}
if config.q_conv_output_projection_type == "fully_connection":
fc_units = [(output_map_width * output_map_height * config.q_conv_hidden_channels[-1], config.q_fc_hidden_units[0])]
fc_units += [(config.q_fc_hidden_units[0], config.q_fc_hidden_units[1])]
elif config.q_conv_output_projection_type == "global_average_pooling":
fc_units = [(config.q_conv_hidden_channels[-1], config.q_fc_hidden_units[0])]
fc_units += [(config.q_fc_hidden_units[0], config.q_fc_hidden_units[1])]
else:
raise NotImplementedError()
fc_units += zip(config.q_fc_hidden_units[1:-1], config.q_fc_hidden_units[2:])
fc_units += [(config.q_fc_hidden_units[-1], len(config.ale_actions))]
for i, (n_in, n_out) in enumerate(fc_units):
fc_attributes["layer_%i" % i] = L.Linear(n_in, n_out, wscale=wscale)
fc_attributes["batchnorm_%i" % i] = L.BatchNormalization(n_out)
fc_advantage = FullyConnectedNetwork(**fc_attributes)
fc_advantage.n_hidden_layers = len(fc_units) - 1
fc_advantage.activation_function = config.q_fc_activation_function
fc_advantage.apply_batchnorm = config.apply_batchnorm
fc_advantage.apply_dropout = config.q_fc_apply_dropout
fc_advantage.apply_batchnorm_to_input = config.q_fc_apply_batchnorm_to_input
fc_advantage.conv_output_filter_size = (output_map_width, output_map_height)
fc_advantage.projection_type = config.q_conv_output_projection_type
if config.use_gpu:
fc_advantage.to_gpu()
return conv, fc_value, fc_advantage | 37.693735 | 175 | 0.754278 |
acf7f62e574d8d0a5dd444a69b457eb6f4dcb1cb | 4,330 | py | Python | objects/CSCG/_3d/forms/edge/_0eg/main.py | mathischeap/mifem | 3242e253fb01ca205a76568eaac7bbdb99e3f059 | [
"MIT"
] | 1 | 2020-10-14T12:48:35.000Z | 2020-10-14T12:48:35.000Z | objects/CSCG/_3d/forms/edge/_0eg/main.py | mathischeap/mifem | 3242e253fb01ca205a76568eaac7bbdb99e3f059 | [
"MIT"
] | null | null | null | objects/CSCG/_3d/forms/edge/_0eg/main.py | mathischeap/mifem | 3242e253fb01ca205a76568eaac7bbdb99e3f059 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
@author: Yi Zhang.
Department of Aerodynamics
Faculty of Aerospace Engineering
TU Delft, Delft, Netherlands
"""
import sys
if './' not in sys.path: sys.path.append('./')
from abc import ABC
import numpy as np
from objects.CSCG._3d.forms.edge.base.main import _3dCSCG_Edge
from objects.CSCG._3d.forms.edge._0eg.discretize.main import _3dCSCG_Edge0Form_Discretize
class _3dCSCG_0Edge(_3dCSCG_Edge, ABC):
"""
Edge 0-form.
:param mesh:
:param space:
:param orientation:
:param numbering_parameters:
:param name:
"""
def __init__(self, mesh, space, orientation='outer',
numbering_parameters='Naive', name='outer-oriented-0-edge-form'):
super().__init__(mesh, space, orientation, numbering_parameters, name)
self._k_ = 0
self.standard_properties.___PRIVATE_add_tag___('3dCSCG_edge_0form')
self.___PRIVATE_reset_cache___()
self._discretize_ = _3dCSCG_Edge0Form_Discretize(self)
self._freeze_self_()
def ___PRIVATE_reset_cache___(self):
super().___PRIVATE_reset_cache___()
def ___PRIVATE_TW_FUNC_body_checker___(self, func_body):
assert func_body.mesh.domain == self.mesh.domain
assert func_body.ndim == self.ndim == 3
if func_body.__class__.__name__ == '_3dCSCG_ScalarField':
assert func_body.ftype in ('standard',), \
f"3dCSCG 0edge FUNC do not accept func _3dCSCG_ScalarField of ftype {func_body.ftype}."
else:
raise Exception(f"3dCSCG 0form FUNC do not accept func {func_body.__class__}")
def ___PRIVATE_TW_BC_body_checker___(self, func_body):
assert func_body.mesh.domain == self.mesh.domain
assert func_body.ndim == self.ndim == 3
if func_body.__class__.__name__ == '_3dCSCG_ScalarField':
assert func_body.ftype in ('standard','boundary-wise'), \
f"3dCSCG 0edge BC do not accept func _3dCSCG_ScalarField of ftype {func_body.ftype}."
else:
raise Exception(f"3dCSCG 1edge BC do not accept func {func_body.__class__}")
@property
def discretize(self):
return self._discretize_
def reconstruct(self, xi, eta, sigma, i=None):
"""
:param xi: 1d array in [-1, 1]
:param eta: 1d array in [-1, 1]
:param sigma: 1d array in [-1, 1]
:param i: Reconstruct in edge-element #i; when it is None, reconstruct in all edge elements.
:return: two dictionaries.
"""
if i is None:
indices = self.mesh.edge.elements._locations_.keys()
else:
if not isinstance(i, (list, tuple)):
indices = [i, ]
else:
indices = i
basis = self.do.evaluate_basis_at_meshgrid(xi, eta, sigma)
xyz = dict()
v = dict()
for i in indices:
if i in self.mesh.edge.elements:
ee = self.mesh.edge.elements[i]
mesh_element = ee.CHARACTERISTIC_element
corner_edge = ee.CHARACTERISTIC_corner_edge
xyz_i = ee.coordinate_transformation.mapping(xi, eta, sigma,
from_element=mesh_element,
corner_edge=corner_edge)
prime_cochain = self.cochain.local_EEW[i]
vi = np.einsum('i, ij -> j', prime_cochain,
basis[corner_edge][0], optimize='greedy')
xyz[i] = xyz_i
v[i] = [vi, ]
return xyz, v
if __name__ == '__main__':
# mpiexec -n 6 python _3dCSCG\forms\edge\_0_edge.py
from objects.CSCG._3d.master import MeshGenerator, SpaceInvoker, FormCaller#, ExactSolutionSelector
mesh = MeshGenerator('crazy', c=0.25)([5,6,7])
space = SpaceInvoker('polynomials')([('Lobatto',10), ('Lobatto',10), ('Lobatto',10)])
FC = FormCaller(mesh, space)
e0 = FC('0-e')
def p(t, x, y, z): return - 6 * np.pi * np.sin(2*np.pi*x) * np.sin(2*np.pi*y) * np.sin(2*np.pi*z) + 0 * t
scalar = FC('scalar', p)
e0.TW.func.do.set_func_body_as(scalar)
e0.TW.current_time = 0
e0.TW.do.push_all_to_instant()
e0.discretize()
print(e0.error.L()) | 32.80303 | 109 | 0.602771 |
acf7f63b38425a94bbcda9791696396086db5b4d | 5,020 | py | Python | assignments/assignment1/knn_solution.py | tbb/dlcourse_ai | d8a14d30f7174b449c9bb79f3b87d4822d4f0f4b | [
"MIT"
] | null | null | null | assignments/assignment1/knn_solution.py | tbb/dlcourse_ai | d8a14d30f7174b449c9bb79f3b87d4822d4f0f4b | [
"MIT"
] | null | null | null | assignments/assignment1/knn_solution.py | tbb/dlcourse_ai | d8a14d30f7174b449c9bb79f3b87d4822d4f0f4b | [
"MIT"
] | null | null | null | import numpy as np
class KNN:
"""
K-neariest-neighbor classifier using L1 loss
"""
def __init__(self, k=1):
self.k = k
def fit(self, X, y):
self.train_X = X
self.train_y = y
def predict(self, X, num_loops=0):
'''
Uses the KNN model to predict clases for the data samples provided
Arguments:
X, np array (num_samples, num_features) - samples to run
through the model
num_loops, int - which implementation to use
Returns:
predictions, np array of ints (num_samples) - predicted class
for each sample
'''
if num_loops == 0:
dists = self.compute_distances_no_loops(X)
elif num_loops == 1:
dists = self.compute_distances_one_loop(X)
else:
dists = self.compute_distances_two_loops(X)
if self.train_y.dtype == np.bool:
return self.predict_labels_binary(dists)
else:
return self.predict_labels_multiclass(dists)
def compute_distances_two_loops(self, X):
'''
Computes distance from every sample of X to every training sample
Uses simplest implementation with 2 Python loops
Arguments:
X, np array (num_test_samples, num_features) - samples to run
Returns:
dists, np array (num_test_samples, num_train_samples) - array
with distances between each test and each train sample
'''
num_train = self.train_X.shape[0]
num_test = X.shape[0]
dists = np.zeros((num_test, num_train), np.float32)
for i in range(num_test):
for j in range(num_train):
# TODO: Fill dists[i_test][i_train]
dists[i, j] = np.sum(np.abs(self.train_X[j] - X[i]))
return dists
def compute_distances_one_loop(self, X):
'''
Computes distance from every sample of X to every training sample
Vectorizes some of the calculations, so only 1 loop is used
Arguments:
X, np array (num_test_samples, num_features) - samples to run
Returns:
dists, np array (num_test_samples, num_train_samples) - array
with distances between each test and each train sample
'''
num_train = self.train_X.shape[0]
num_test = X.shape[0]
dists = np.zeros((num_test, num_train), np.float32)
for i in range(num_test):
# TODO: Fill the whole row of dists[i_test]
# without additional loops
dists[i] = np.sum(np.abs(self.train_X - X[i]), axis=1)
return dists
def compute_distances_no_loops(self, X):
'''
Computes distance from every sample of X to every training sample
Fully vectorizes the calculations
Arguments:
X, np array (num_test_samples, num_features) - samples to run
Returns:
dists, np array (num_test_samples, num_train_samples) - array
with distances between each test and each train sample
'''
num_train = self.train_X.shape[0]
num_test = X.shape[0]
# Using float32 to to save memory - the default is float64
dists = np.zeros((num_test, num_train), np.float32)
# TODO: Implement computing all distances with no loops!
dists = (np.abs(X[:, None] - self.train_X)).sum(axis=-1)
return dists
def predict_labels_binary(self, dists):
'''
Returns model predictions for binary classification case
Arguments:
dists, np array (num_test_samples, num_train_samples) - array
with distances between each test and each train sample
Returns:
pred, np array of bool (num_test_samples) - binary predictions
for every test sample
'''
num_test = dists.shape[0]
pred = np.zeros(num_test, np.bool)
for i in range(num_test):
nn_indices = dists[i].argsort()[:self.k]
nn_labels = self.train_y[nn_indices]
pred[i] = np.bincount(nn_labels).argmax() == 1
return pred
def predict_labels_multiclass(self, dists):
'''
Returns model predictions for multi-class classification case
Arguments:
dists, np array (num_test_samples, num_train_samples) - array
with distances between each test and each train sample
Returns:
pred, np array of int (num_test_samples) - predicted class index
for every test sample
'''
num_test = dists.shape[0]
num_test = dists.shape[0]
pred = np.zeros(num_test, np.int)
for i in range(num_test):
# TODO: Implement choosing best class based on k
# nearest training samples
nn_indices = dists[i].argsort()[:self.k]
nn_labels = self.train_y[nn_indices]
pred[i] = np.bincount(nn_labels).argmax()
return pred
| 34.861111 | 74 | 0.600797 |
acf7f7d1df319bfa055845091d03057f0cc799e6 | 8,894 | py | Python | source/control_plane/python/lambda/ttl_checker/ttl_checker.py | kirillsc/aws-htc-grid | d1dd8068c3aebc3c04904b3daefc142a4b96872b | [
"Apache-2.0"
] | 24 | 2021-04-14T11:57:42.000Z | 2022-03-23T17:09:12.000Z | source/control_plane/python/lambda/ttl_checker/ttl_checker.py | kirillsc/aws-htc-grid | d1dd8068c3aebc3c04904b3daefc142a4b96872b | [
"Apache-2.0"
] | 9 | 2021-04-23T08:44:13.000Z | 2021-09-15T13:37:42.000Z | source/control_plane/python/lambda/ttl_checker/ttl_checker.py | kirillsc/aws-htc-grid | d1dd8068c3aebc3c04904b3daefc142a4b96872b | [
"Apache-2.0"
] | 15 | 2021-04-14T11:53:58.000Z | 2022-02-28T16:45:47.000Z | # Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# Licensed under the Apache License, Version 2.0 https://aws.amazon.com/apache-2-0/
import logging
import boto3
import time
import os
from botocore.exceptions import ClientError
from boto3.dynamodb.conditions import Key, Attr
from utils.performance_tracker import EventsCounter, performance_tracker_initializer
from utils import grid_error_logger as errlog
from utils.state_table_common import TASK_STATE_RETRYING, TASK_STATE_INCONSISTENT, TASK_STATE_FAILED
from api.queue_manager import queue_manager
region = os.environ["REGION"]
perf_tracker = performance_tracker_initializer(
os.environ["METRICS_ARE_ENABLED"],
os.environ["METRICS_TTL_CHECKER_LAMBDA_CONNECTION_STRING"],
os.environ["METRICS_GRAFANA_PRIVATE_IP"])
from api.state_table_manager import state_table_manager
state_table = state_table_manager(
os.environ['STATE_TABLE_SERVICE'],
os.environ['STATE_TABLE_CONFIG'],
os.environ['STATE_TABLE_NAME'])
queue = queue_manager(
task_queue_service=os.environ['TASK_QUEUE_SERVICE'],
task_queue_config=os.environ['TASK_QUEUE_CONFIG'],
tasks_queue_name=os.environ['TASKS_QUEUE_NAME'],
region=region)
dlq = queue_manager(
task_queue_service=os.environ['TASK_QUEUE_SERVICE'],
task_queue_config=os.environ['TASK_QUEUE_CONFIG'],
tasks_queue_name=os.environ['TASKS_QUEUE_DLQ_NAME'],
region=region)
TTL_LAMBDA_ID = 'TTL_LAMBDA'
TTL_LAMBDA_TMP_STATE = TASK_STATE_RETRYING
TTL_LAMBDA_FAILED_STATE = TASK_STATE_FAILED
TTL_LAMBDA_INCONSISTENT_STATE = TASK_STATE_INCONSISTENT
MAX_RETRIES = 5
RETRIEVE_EXPIRED_TASKS_LIMIT = 200
# TODO: implement archival after 10 days in S3
def lambda_handler(event, context):
"""Handler called by AWS Lambda runtime
Args:
event(dict): a CloudWatch Event generated every minute
context:
Returns:
"""
stats_obj = {'01_invocation_tstmp': {"label": "None", "tstmp": int(round(time.time() * 1000))}}
event_counter = EventsCounter(
["counter_expired_tasks", "counter_failed_to_acquire",
"counter_failed_tasks", "counter_released_tasks", "counter_inconsistent_state", "counter_tasks_queue_size"])
for expired_tasks in state_table.query_expired_tasks():
event_counter.increment("counter_expired_tasks", len(expired_tasks))
event_counter.increment("counter_tasks_queue_size", queue.get_queue_length())
for item in expired_tasks:
print("Processing expired task: {}".format(item))
task_id = item.get('task_id')
owner_id = item.get('task_owner')
current_heartbeat_timestamp = item.get('heartbeat_expiration_timestamp')
try:
is_acquired = state_table.acquire_task_for_ttl_lambda(
task_id, owner_id, current_heartbeat_timestamp)
if not is_acquired:
# task has been updated at the very last second...
event_counter.increment("counter_failed_to_acquire")
continue
# retreive current number of retries and task message handler
retries, task_handler_id, task_priority = retreive_retries_and_task_handler_and_priority(task_id)
print("Number of retires for task[{}]: {} Priority: {}".format(task_id, retries, task_priority))
print("Last owner for task [{}]: {}".format(task_id, owner_id))
# TODO: MAX_RETRIES should be extracted from task definition... Store in DDB?
if retries == MAX_RETRIES:
print("Failing task {} after {} retries".format(task_id, retries))
event_counter.increment("counter_failed_tasks")
fail_task(task_id, task_handler_id, task_priority)
continue
event_counter.increment("counter_released_tasks")
# else
state_table.retry_task(task_id, retries + 1)
try:
# Task can be acquired by an agent from this point
reset_task_msg_vto(task_handler_id, task_priority)
print("SUCCESS FIX for {}".format(task_id))
except ClientError:
try:
errlog.log('Failed to reset VTO trying to delete: {} '.format(task_id))
delete_message_from_queue(task_handler_id)
except ClientError:
errlog.log('Inconsistent task: {} sending do DLQ'.format(task_id))
event_counter.increment("counter_inconsistent_state")
set_task_inconsistent(task_id)
send_to_dlq(item)
except ClientError as e:
errlog.log('Lambda ttl error: {}'.format(e.response['Error']['Message']))
print("Cannot process task {} : {}".format(task_id, e))
print("Sending task {} to DLQ...".format(task_id))
send_to_dlq(item)
except Exception as e:
print("Cannot process task {} : {}".format(task_id, e))
print("Sending task {} to DLQ...".format(task_id))
errlog.log('Lambda ttl error: {}'.format(e))
send_to_dlq(item)
stats_obj['02_completion_tstmp'] = {"label": "ttl_execution_time", "tstmp": int(round(time.time() * 1000))}
perf_tracker.add_metric_sample(
stats_obj,
event_counter=event_counter,
from_event="01_invocation_tstmp",
to_event="02_completion_tstmp"
)
perf_tracker.submit_measurements()
def fail_task(task_id, task_handler_id, task_priority):
"""This function set the task_status of task to fail
Args:
task_id(str): the id of the task to update
task_handler_id(str): the task handler associated to this task
task_priority(int): the priority of the task.
Returns:
Nothing
Raises:
ClientError: if DynamoDB table cannot be updated
"""
try:
delete_message_from_queue(task_handler_id, task_priority)
state_table.update_task_status_to_failed(task_id)
except ClientError as e:
errlog.log("Cannot fail task {} : {}".format(task_id, e))
raise e
def set_task_inconsistent(task_id):
"""This function set the task_status of task to inconsistent
Args:
task_id(str): the id of the task to update
Returns:
Nothing
Raises:
ClientError: if DynamoDB table cannot be updated
"""
try:
state_table.update_task_status_to_inconsistent(task_id)
except ClientError as e:
errlog.log("Cannot set task to inconsistent {} : {}".format(task_id, e))
raise e
def delete_message_from_queue(task_handler_id, task_priority):
"""This function delete the message from the task queue
Args:
task_handler_id(str): the task handler associated of the message to be deleted
task_priority(int): priority of the task
Returns:
Nothing
Raises:
ClientError: if task queue cannot be updated
"""
try:
queue.delete_message(task_handler_id, task_priority)
except ClientError as e:
errlog.log("Cannot delete message {} : {}".format(task_handler_id, e))
raise e
def retreive_retries_and_task_handler_and_priority(task_id):
"""This function retrieve (i) the number of retries,
(ii) the task's handler associated to an expired task
and (iii) and the priority under which this task was executed.
Args:
task_id(str): the id of the expired task
Returns:
rtype: 3 variables
Raises:
ClientError: if DynamoDB query failed
"""
try:
resp_task = state_table.get_task_by_id(task_id)
# CHeck if 1 and only 1
return resp_task.get('retries'),\
resp_task.get('task_handler_id'),\
resp_task.get('task_priority')
except ClientError as e:
errlog.log("Cannot retreive retries and handler for task {} : {}".format(task_id, e))
raise e
def reset_task_msg_vto(handler_id, task_priority):
"""Function makes message re-appear in the tasks queue.
Args:
handler_id: reference of the message/task.
task_priority: priority of the task. Identifies which queue to use (if applicable)
Returns: Nothing
"""
try:
visibility_timeout_sec = 0
queue.change_visibility(handler_id, visibility_timeout_sec, task_priority)
except ClientError as e:
errlog.log("Cannot reset VTO for message {} : {}".format(handler_id, e))
raise e
def send_to_dlq(task):
"""
Args:
task:
Returns:
"""
logging.warning(f"Sending task [{task}] to DLQ")
dlq.send_message(message_bodies=[str(task)])
| 32.341818 | 117 | 0.661232 |
acf7f8f7502c925d64e03c00262067a2afc78e87 | 14,630 | py | Python | representation_batch_rl/representation_batch_rl/fisher_brac_pixels.py | pedersor/google-research | 6fa751dd261b3f6d918fd2cd35efef5d8bf3eea6 | [
"Apache-2.0"
] | null | null | null | representation_batch_rl/representation_batch_rl/fisher_brac_pixels.py | pedersor/google-research | 6fa751dd261b3f6d918fd2cd35efef5d8bf3eea6 | [
"Apache-2.0"
] | null | null | null | representation_batch_rl/representation_batch_rl/fisher_brac_pixels.py | pedersor/google-research | 6fa751dd261b3f6d918fd2cd35efef5d8bf3eea6 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of Fisher-BRAC from pixels."""
import typing
from dm_env import specs as dm_env_specs
import numpy as np
import tensorflow as tf
from tf_agents.specs.tensor_spec import TensorSpec
from representation_batch_rl.batch_rl import critic
from representation_batch_rl.batch_rl.encoders import ConvStack
from representation_batch_rl.batch_rl.encoders import ImageEncoder
from representation_batch_rl.batch_rl.encoders import make_impala_cnn_network
from representation_batch_rl.representation_batch_rl import behavioral_cloning_pixels as behavioral_cloning
from representation_batch_rl.representation_batch_rl import policies_pixels as policies
from representation_batch_rl.representation_batch_rl import tf_utils
class FBRAC(object):
"""Class performing BRAC training."""
def __init__(self,
observation_spec,
action_spec,
actor_lr = 3e-4,
critic_lr = 3e-4,
alpha_lr = 3e-4,
discount = 0.99,
tau = 0.005,
target_entropy = 0.0,
f_reg = 1.0,
reward_bonus = 5.0,
num_augmentations = 1,
env_name = '',
batch_size = 256):
"""Creates networks.
Args:
observation_spec: environment observation spec.
action_spec: Action spec.
actor_lr: Actor learning rate.
critic_lr: Critic learning rate.
alpha_lr: Temperature learning rate.
discount: MDP discount.
tau: Soft target update parameter.
target_entropy: Target entropy.
f_reg: Critic regularization weight.
reward_bonus: Bonus added to the rewards.
num_augmentations: Number of DrQ augmentations (crops)
env_name: Env name
batch_size: Batch size
"""
self.num_augmentations = num_augmentations
self.discrete_actions = False if len(action_spec.shape) else True
self.batch_size = batch_size
actor_kwargs = {'hidden_dims': (1024, 1024)}
critic_kwargs = {'hidden_dims': (1024, 1024)}
# DRQ encoder params.
# https://github.com/denisyarats/drq/blob/master/config.yaml#L73
# Make 4 sets of weights:
# - BC
# - Actor
# - Critic
# - Critic (target)
if observation_spec.shape == (64, 64, 3):
# IMPALA for Procgen
def conv_stack():
return make_impala_cnn_network(
depths=[16, 32, 32], use_batch_norm=False, dropout_rate=0.)
state_dim = 256
else:
# Reduced architecture for DMC
def conv_stack():
return ConvStack(observation_spec.shape)
state_dim = 50
conv_stack_bc = conv_stack()
conv_stack_actor = conv_stack()
conv_stack_critic = conv_stack()
conv_target_stack_critic = conv_stack()
if observation_spec.shape == (64, 64, 3):
conv_stack_bc.output_size = state_dim
conv_stack_actor.output_size = state_dim
conv_stack_critic.output_size = state_dim
conv_target_stack_critic.output_size = state_dim
# Combine and stop_grad some of the above conv stacks
actor_kwargs['encoder_bc'] = ImageEncoder(
conv_stack_bc, feature_dim=state_dim, bprop_conv_stack=True)
actor_kwargs['encoder'] = ImageEncoder(
conv_stack_critic, feature_dim=state_dim, bprop_conv_stack=False)
critic_kwargs['encoder'] = ImageEncoder(
conv_stack_critic, feature_dim=state_dim, bprop_conv_stack=True)
# Note: the target critic does not share any weights.
critic_kwargs['encoder_target'] = ImageEncoder(
conv_target_stack_critic, feature_dim=state_dim, bprop_conv_stack=True)
if self.num_augmentations == 0:
dummy_state = tf.constant(
np.zeros(shape=[1] + list(observation_spec.shape)))
else: # account for padding of +4 everywhere and then cropping out 68
dummy_state = tf.constant(np.zeros(shape=[1, 68, 68, 3]))
@tf.function
def init_models():
actor_kwargs['encoder_bc'](dummy_state)
actor_kwargs['encoder'](dummy_state)
critic_kwargs['encoder'](dummy_state)
critic_kwargs['encoder_target'](dummy_state)
init_models()
if self.discrete_actions:
hidden_dims = ()
self.actor = policies.CategoricalPolicy(
state_dim,
action_spec,
hidden_dims=hidden_dims,
encoder=actor_kwargs['encoder'])
action_dim = action_spec.maximum.item() + 1
else:
hidden_dims = (256, 256, 256)
self.actor = policies.DiagGuassianPolicy(
state_dim,
action_spec,
hidden_dims=hidden_dims,
encoder=actor_kwargs['encoder'])
action_dim = action_spec.shape[0]
self.action_dim = action_dim
self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=actor_lr)
self.log_alpha = tf.Variable(tf.math.log(1.0), trainable=True)
self.alpha_optimizer = tf.keras.optimizers.Adam(learning_rate=alpha_lr)
self.target_entropy = target_entropy
self.discount = discount
self.tau = tau
self.bc = behavioral_cloning.BehavioralCloning(
observation_spec,
action_spec,
mixture=True,
encoder=actor_kwargs['encoder_bc'],
num_augmentations=self.num_augmentations,
env_name=env_name,
batch_size=batch_size)
self.critic = critic.Critic(
state_dim,
action_dim,
hidden_dims=hidden_dims,
encoder=critic_kwargs['encoder'])
self.critic_target = critic.Critic(
state_dim,
action_dim,
hidden_dims=hidden_dims,
encoder=critic_kwargs['encoder_target'])
critic.soft_update(self.critic, self.critic_target, tau=1.0)
self.critic_optimizer = tf.keras.optimizers.Adam(learning_rate=critic_lr)
self.f_reg = f_reg
self.reward_bonus = reward_bonus
self.model_dict = {
'critic': self.critic,
'critic_target': self.critic_target,
'actor': self.actor,
'bc': self.bc,
'critic_optimizer': self.critic_optimizer,
'alpha_optimizer': self.alpha_optimizer,
'actor_optimizer': self.actor_optimizer
}
def dist_critic(self, states, actions, target=False, stop_gradient=False):
"""Distribution critic (via offset).
Args:
states: batch of states
actions: batch of actions
target: whether to use target for q1,q2
stop_gradient: whether to stop_grad log-probs
Returns:
dist
"""
if target:
q1, q2 = self.critic_target(states, actions)
else:
q1, q2 = self.critic(states, actions)
if self.discrete_actions:
# expects (n_batch,) tensor instead of (n_batch x n_actions)
actions = tf.argmax(actions, 1)
log_probs = self.bc.policy.log_probs(states, actions)
if stop_gradient:
log_probs = tf.stop_gradient(log_probs)
return (q1 + log_probs, q2 + log_probs)
def fit_critic(self, states, actions,
next_states, rewards,
discounts):
"""Updates critic parameters.
Args:
states: Batch of states.
actions: Batch of actions.
next_states: Batch of next states.
rewards: Batch of rewards.
discounts: Batch of masks indicating the end of the episodes.
Returns:
Dictionary with information to track.
"""
if self.num_augmentations > 0:
next_actions = self.actor(next_states[0], sample=True)
policy_actions = self.actor(states[0], sample=True)
target_q = 0.
for i in range(self.num_augmentations):
next_target_q1_i, next_target_q2_i = self.dist_critic(
next_states[i], next_actions, target=True)
target_q_i = rewards + self.discount * discounts * tf.minimum(
next_target_q1_i, next_target_q2_i)
target_q += target_q_i
target_q /= self.num_augmentations
else:
next_actions = self.actor(next_states, sample=True)
policy_actions = self.actor(states, sample=True)
if self.discrete_actions:
next_actions = tf.cast(
tf.one_hot(next_actions, depth=self.action_dim), tf.float32)
policy_actions = tf.cast(
tf.one_hot(policy_actions, depth=self.action_dim), tf.float32)
next_target_q1, next_target_q2 = self.dist_critic(
next_states, next_actions, target=True)
target_q = rewards + self.discount * discounts * tf.minimum(
next_target_q1, next_target_q2)
critic_variables = self.critic.trainable_variables
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(critic_variables)
if self.num_augmentations > 0:
critic_loss = 0.
for i in range(self.num_augmentations):
q1, q2 = self.dist_critic(states[i], actions, stop_gradient=True)
with tf.GradientTape(
watch_accessed_variables=False, persistent=True) as tape2:
tape2.watch([policy_actions])
q1_reg, q2_reg = self.critic(states[i], policy_actions)
q1_grads = tape2.gradient(q1_reg, policy_actions)
q2_grads = tape2.gradient(q2_reg, policy_actions)
q1_grad_norm = tf.reduce_sum(tf.square(q1_grads), axis=-1)
q2_grad_norm = tf.reduce_sum(tf.square(q2_grads), axis=-1)
del tape2
q_reg = tf.reduce_mean(q1_grad_norm + q2_grad_norm)
critic_loss_i = (
tf.losses.mean_squared_error(target_q, q1) +
tf.losses.mean_squared_error(target_q, q2) + self.f_reg * q_reg)
critic_loss += critic_loss_i
critic_loss /= self.num_augmentations
else:
q1, q2 = self.dist_critic(states, actions, stop_gradient=True)
with tf.GradientTape(
watch_accessed_variables=False, persistent=True) as tape2:
tape2.watch([policy_actions])
q1_reg, q2_reg = self.critic(states, policy_actions)
q1_grads = tape2.gradient(q1_reg, policy_actions)
q2_grads = tape2.gradient(q2_reg, policy_actions)
q1_grad_norm = tf.reduce_sum(tf.square(q1_grads), axis=-1)
q2_grad_norm = tf.reduce_sum(tf.square(q2_grads), axis=-1)
del tape2
q_reg = tf.reduce_mean(q1_grad_norm + q2_grad_norm)
critic_loss = (tf.losses.mean_squared_error(target_q, q1) +
tf.losses.mean_squared_error(target_q, q2) +
self.f_reg * q_reg)
critic_grads = tape.gradient(critic_loss, critic_variables)
self.critic_optimizer.apply_gradients(zip(critic_grads, critic_variables))
critic.soft_update(self.critic, self.critic_target, tau=self.tau)
return {
'q1': tf.reduce_mean(q1),
'q2': tf.reduce_mean(q2),
'critic_loss': critic_loss,
'q1_grad': tf.reduce_mean(q1_grad_norm),
'q2_grad': tf.reduce_mean(q2_grad_norm)
}
@property
def alpha(self):
return tf.constant(0.)
# return tf.exp(self.log_alpha)
def fit_actor(self, states):
"""Updates critic parameters.
Args:
states: A batch of states.
Returns:
Actor loss.
"""
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(self.actor.trainable_variables)
actions, log_probs = self.actor(states, sample=True, with_log_probs=True)
if self.discrete_actions:
actions = tf.cast(
tf.one_hot(actions, depth=self.action_dim), tf.float32)
q1, q2 = self.dist_critic(states, actions)
q = tf.minimum(q1, q2)
actor_loss = tf.reduce_mean(self.alpha * log_probs - q)
actor_grads = tape.gradient(actor_loss, self.actor.trainable_variables)
self.actor_optimizer.apply_gradients(
zip(actor_grads, self.actor.trainable_variables))
# with tf.GradientTape(watch_accessed_variables=False) as tape:
# tape.watch([self.log_alpha])
# alpha_loss = tf.reduce_mean(self.alpha *
# (-log_probs - self.target_entropy))
# alpha_grads = tape.gradient(alpha_loss, [self.log_alpha])
# self.alpha_optimizer.apply_gradients(zip(alpha_grads, [self.log_alpha]))
alpha_loss = tf.consant(0.)
return {
'actor_loss': actor_loss,
'alpha': self.alpha,
'alpha_loss': alpha_loss
}
@tf.function
def update_step(self, replay_buffer_iter,
numpy_dataset):
"""Performs a single training step for critic and actor.
Args:
replay_buffer_iter: A tensorflow graph iteratable object.
numpy_dataset: Is the dataset a NumPy array?
Returns:
Dictionary with losses to track.
"""
transition = next(replay_buffer_iter)
# observation: n_batch x n_timesteps x 1 x H*W*3*n_frames x 1 ->
# n_batch x H x W x 3*n_frames
if not numpy_dataset:
states = transition.observation[:, 0]
next_states = transition.observation[:, 1]
actions = transition.action[:, 0]
rewards = transition.reward[:, 0]
discounts = transition.discount[:, 0]
if transition.observation.dtype == tf.uint8:
states = tf.cast(states, tf.float32) / 255.
next_states = tf.cast(next_states, tf.float32) / 255.
else:
states, actions, rewards, next_states, discounts = transition
if self.num_augmentations > 0:
states, next_states = tf_utils.image_aug(
states,
next_states,
img_pad=4,
num_augmentations=self.num_augmentations,
obs_dim=64,
channels=3,
cropped_shape=[self.batch_size, 68, 68, 3])
# states, actions, rewards, discounts, next_states = next(replay_buffer_iter
rewards = rewards + self.reward_bonus
if self.discrete_actions:
actions = tf.cast(tf.one_hot(actions, depth=self.action_dim), tf.float32)
critic_dict = self.fit_critic(states, actions, next_states, rewards,
discounts)
actor_dict = self.fit_actor(
states[0] if self.num_augmentations > 0 else states)
return {**actor_dict, **critic_dict}
@tf.function
def act(self, states):
return self.actor(states, sample=False)
| 34.262295 | 107 | 0.668558 |
acf7f9667a6734b621d3738ba8b81f6b538b2550 | 36,199 | py | Python | salt/cloud/clouds/xen.py | markgras/salt | d66cd3c935533c63870b83228b978ce43e0ef70d | [
"Apache-2.0"
] | null | null | null | salt/cloud/clouds/xen.py | markgras/salt | d66cd3c935533c63870b83228b978ce43e0ef70d | [
"Apache-2.0"
] | 1 | 2017-07-10T21:44:39.000Z | 2017-07-10T21:44:39.000Z | salt/cloud/clouds/xen.py | markgras/salt | d66cd3c935533c63870b83228b978ce43e0ef70d | [
"Apache-2.0"
] | null | null | null | """
XenServer Cloud Driver
======================
The XenServer driver is designed to work with a Citrix XenServer.
Requires XenServer SDK
(can be downloaded from https://www.citrix.com/downloads/xenserver/product-software/ )
Place a copy of the XenAPI.py in the Python site-packages folder.
:depends: XenAPI
Example provider configuration:
.. code-block:: yaml
# /etc/salt/cloud.providers.d/myxen.conf
myxen:
driver: xen
url: http://10.0.0.120
user: root
password: p@ssw0rd
Example profile configuration:
.. code-block:: yaml
# /etc/salt/cloud.profiles.d/myxen.conf
suse:
provider: myxen
user: root
password: p@ssw0rd
image: opensuseleap42_2-template
storage_repo: 'Local storage'
resource_pool: default_pool
clone: True
minion:
master: 10.0.0.18
sles:
provider: myxen
user: root
clone: False
image: sles12sp2-template
deploy: False
w2k12:
provider: myxen
image: w2k12svr-template
clone: True
userdata_file: /srv/salt/win/files/windows-firewall.ps1
win_installer: /srv/salt/win/files/Salt-Minion-2016.11.3-AMD64-Setup.exe
win_username: Administrator
win_password: p@ssw0rd
use_winrm: False
ipv4_cidr: 10.0.0.215/24
ipv4_gw: 10.0.0.1
"""
import logging
import time
from datetime import datetime
import salt.config as config
import salt.utils.cloud
from salt.exceptions import SaltCloudException, SaltCloudSystemExit
# Get logging started
log = logging.getLogger(__name__)
try:
import XenAPI
HAS_XEN_API = True
except ImportError:
HAS_XEN_API = False
__virtualname__ = "xen"
cache = None
def __virtual__():
"""
Only load if Xen configuration and XEN SDK is found.
"""
if get_configured_provider() is False:
return False
if _get_dependencies() is False:
return False
global cache # pylint: disable=global-statement,invalid-name
cache = salt.cache.Cache(__opts__)
return __virtualname__
def _get_active_provider_name():
try:
return __active_provider_name__.value()
except AttributeError:
return __active_provider_name__
def _get_dependencies():
"""
Warn if dependencies aren't met.
Checks for the XenAPI.py module
"""
return config.check_driver_dependencies(__virtualname__, {"XenAPI": HAS_XEN_API})
def get_configured_provider():
"""
Return the first configured instance.
"""
return config.is_provider_configured(
__opts__, _get_active_provider_name() or __virtualname__, ("url",)
)
def _get_session():
"""
Get a connection to the XenServer host
"""
api_version = "1.0"
originator = "salt_cloud_{}_driver".format(__virtualname__)
url = config.get_cloud_config_value(
"url", get_configured_provider(), __opts__, search_global=False
)
user = config.get_cloud_config_value(
"user", get_configured_provider(), __opts__, search_global=False
)
password = config.get_cloud_config_value(
"password", get_configured_provider(), __opts__, search_global=False
)
ignore_ssl = config.get_cloud_config_value(
"ignore_ssl",
get_configured_provider(),
__opts__,
default=False,
search_global=False,
)
try:
session = XenAPI.Session(url, ignore_ssl=ignore_ssl)
log.debug(
"url: %s user: %s password: %s, originator: %s",
url,
user,
"XXX-pw-redacted-XXX",
originator,
)
session.xenapi.login_with_password(user, password, api_version, originator)
except XenAPI.Failure as ex:
pool_master_addr = str(ex.__dict__["details"][1])
slash_parts = url.split("/")
new_url = "/".join(slash_parts[:2]) + "/" + pool_master_addr
session = XenAPI.Session(new_url)
log.debug(
"session is -> url: %s user: %s password: %s, originator:%s",
new_url,
user,
"XXX-pw-redacted-XXX",
originator,
)
session.xenapi.login_with_password(user, password, api_version, originator)
return session
def list_nodes():
"""
List virtual machines
.. code-block:: bash
salt-cloud -Q
"""
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record["is_a_template"] and not record["is_control_domain"]:
try:
base_template_name = record["other_config"]["base_template_name"]
except Exception: # pylint: disable=broad-except
base_template_name = None
log.debug(
"VM %s, does not have base_template_name attribute",
record["name_label"],
)
ret[record["name_label"]] = {
"id": record["uuid"],
"image": base_template_name,
"name": record["name_label"],
"size": record["memory_dynamic_max"],
"state": record["power_state"],
"private_ips": get_vm_ip(record["name_label"], session),
"public_ips": None,
}
return ret
def get_vm_ip(name=None, session=None, call=None):
"""
Get the IP address of the VM
.. code-block:: bash
salt-cloud -a get_vm_ip xenvm01
.. note:: Requires xen guest tools to be installed in VM
"""
if call == "function":
raise SaltCloudException("This function must be called with -a or --action.")
if session is None:
log.debug("New session being created")
session = _get_session()
vm = _get_vm(name, session=session)
ret = None
# -- try to get ip from vif
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
for vif in vifs:
if session.xenapi.VIF.get_ipv4_addresses(vif):
cidr = session.xenapi.VIF.get_ipv4_addresses(vif).pop()
ret, subnet = cidr.split("/")
log.debug("VM vif returned for instance: %s ip: %s", name, ret)
return ret
# -- try to get ip from get tools metrics
vgm = session.xenapi.VM.get_guest_metrics(vm)
try:
net = session.xenapi.VM_guest_metrics.get_networks(vgm)
if "0/ip" in net.keys():
log.debug(
"VM guest metrics returned for instance: %s 0/ip: %s", name, net["0/ip"]
)
ret = net["0/ip"]
# except Exception as ex: # pylint: disable=broad-except
except XenAPI.Failure:
log.info("Could not get vm metrics at this time")
return ret
def set_vm_ip(name=None, ipv4_cidr=None, ipv4_gw=None, session=None, call=None):
"""
Set the IP address on a virtual interface (vif)
"""
mode = "static"
# TODO: Need to add support for IPv6
if call == "function":
raise SaltCloudException("The function must be called with -a or --action.")
log.debug(
"Setting name: %s ipv4_cidr: %s ipv4_gw: %s mode: %s",
name,
ipv4_cidr,
ipv4_gw,
mode,
)
if session is None:
log.debug("New session being created")
session = _get_session()
vm = _get_vm(name, session)
# -- try to get ip from vif
# TODO: for now will take first interface
# addition consideration needed for
# multiple interface(vif) VMs
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
log.debug("There are %s vifs.", len(vifs))
for vif in vifs:
record = session.xenapi.VIF.get_record(vif)
log.debug(record)
try:
session.xenapi.VIF.configure_ipv4(vif, mode, ipv4_cidr, ipv4_gw)
except XenAPI.Failure:
log.info("Static IP assignment could not be performed.")
return True
def list_nodes_full(session=None):
"""
List full virtual machines
.. code-block:: bash
salt-cloud -F
"""
if session is None:
session = _get_session()
ret = {}
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record["is_a_template"] and not record["is_control_domain"]:
# deal with cases where the VM doesn't have 'base_template_name' attribute
try:
base_template_name = record["other_config"]["base_template_name"]
except Exception: # pylint: disable=broad-except
base_template_name = None
log.debug(
"VM %s, does not have base_template_name attribute",
record["name_label"],
)
vm_cfg = session.xenapi.VM.get_record(vm)
vm_cfg["id"] = record["uuid"]
vm_cfg["name"] = record["name_label"]
vm_cfg["image"] = base_template_name
vm_cfg["size"] = None
vm_cfg["state"] = record["power_state"]
vm_cfg["private_ips"] = get_vm_ip(record["name_label"], session)
vm_cfg["public_ips"] = None
if "snapshot_time" in vm_cfg.keys():
del vm_cfg["snapshot_time"]
ret[record["name_label"]] = vm_cfg
provider = _get_active_provider_name() or "xen"
if ":" in provider:
comps = provider.split(":")
provider = comps[0]
log.debug("ret: %s", ret)
log.debug("provider: %s", provider)
log.debug("__opts__: %s", __opts__)
__utils__["cloud.cache_node_list"](ret, provider, __opts__)
return ret
def list_nodes_select(call=None):
"""
Perform a select query on Xen VM instances
.. code-block:: bash
salt-cloud -S
"""
return salt.utils.cloud.list_nodes_select(
list_nodes_full(), __opts__["query.selection"], call,
)
def vdi_list(call=None, kwargs=None):
"""
Return available Xen VDI images
If this function is called with the ``-f`` or ``--function`` then
it can return a list with minimal deatil using the ``terse=True`` keyword
argument.
.. code-block:: bash
salt-cloud -f vdi_list myxen terse=True
"""
if call == "action":
raise SaltCloudException("This function must be called with -f or --function.")
log.debug("kwargs is %s", kwargs)
if kwargs is not None:
if "terse" in kwargs:
if kwargs["terse"] == "True":
terse = True
else:
terse = False
else:
terse = False
else:
kwargs = {}
terse = False
session = _get_session()
vdis = session.xenapi.VDI.get_all()
ret = {}
for vdi in vdis:
data = session.xenapi.VDI.get_record(vdi)
log.debug(type(terse))
if terse is True:
ret[data.get("name_label")] = {"uuid": data.get("uuid"), "OpqueRef": vdi}
else:
data.update({"OpaqueRef": vdi})
ret[data.get("name_label")] = data
return ret
def avail_locations(session=None, call=None):
"""
Return available Xen locations (not implemented)
.. code-block:: bash
salt-cloud --list-locations myxen
"""
# TODO: need to figure out a good meaning of locations in Xen
if call == "action":
raise SaltCloudException(
"The avail_locations function must be called with -f or --function."
)
return pool_list()
def avail_sizes(session=None, call=None):
"""
Return a list of Xen template definitions
.. code-block:: bash
salt-cloud --list-sizes myxen
"""
if call == "action":
raise SaltCloudException(
"The avail_sizes function must be called with -f or --function."
)
return {
"STATUS": "Sizes are build into templates. Consider running --list-images to see sizes"
}
def template_list(call=None):
"""
Return available Xen template information.
This returns the details of
each template to show number cores, memory sizes, etc..
.. code-block:: bash
salt-cloud -f template_list myxen
"""
templates = {}
session = _get_session()
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record["is_a_template"]:
templates[record["name_label"]] = record
return templates
def show_instance(name, session=None, call=None):
"""
Show information about a specific VM or template
.. code-block:: bash
salt-cloud -a show_instance xenvm01
.. note:: memory is memory_dynamic_max
"""
if call == "function":
raise SaltCloudException(
"The show_instnce function must be called with -a or --action."
)
log.debug("show_instance-> name: %s session: %s", name, session)
if session is None:
session = _get_session()
vm = _get_vm(name, session=session)
record = session.xenapi.VM.get_record(vm)
if not record["is_a_template"] and not record["is_control_domain"]:
try:
base_template_name = record["other_config"]["base_template_name"]
except Exception: # pylint: disable=broad-except
base_template_name = None
log.debug(
"VM %s, does not have base_template_name attribute",
record["name_label"],
)
ret = {
"id": record["uuid"],
"image": base_template_name,
"name": record["name_label"],
"size": record["memory_dynamic_max"],
"state": record["power_state"],
"private_ips": get_vm_ip(name, session),
"public_ips": None,
}
__utils__["cloud.cache_node"](ret, _get_active_provider_name(), __opts__)
return ret
def _determine_resource_pool(session, vm_):
"""
Called by create() used to determine resource pool
"""
resource_pool = ""
if "resource_pool" in vm_.keys():
resource_pool = _get_pool(vm_["resource_pool"], session)
else:
pool = session.xenapi.pool.get_all()
if not pool:
resource_pool = None
else:
first_pool = session.xenapi.pool.get_all()[0]
resource_pool = first_pool
pool_record = session.xenapi.pool.get_record(resource_pool)
log.debug("resource pool: %s", pool_record["name_label"])
return resource_pool
def _determine_storage_repo(session, resource_pool, vm_):
"""
Called by create() used to determine storage repo for create
"""
storage_repo = ""
if "storage_repo" in vm_.keys():
storage_repo = _get_sr(vm_["storage_repo"], session)
else:
storage_repo = None
if resource_pool:
default_sr = session.xenapi.pool.get_default_SR(resource_pool)
sr_record = session.xenapi.SR.get_record(default_sr)
log.debug("storage repository: %s", sr_record["name_label"])
storage_repo = default_sr
else:
storage_repo = None
log.debug("storage repository: %s", storage_repo)
return storage_repo
def create(vm_):
"""
Create a VM in Xen
The configuration for this function is read from the profile settings.
.. code-block:: bash
salt-cloud -p some_profile xenvm01
"""
name = vm_["name"]
record = {}
ret = {}
# fire creating event
__utils__["cloud.fire_event"](
"event",
"starting create",
"salt/cloud/{}/creating".format(name),
args={"name": name, "profile": vm_["profile"], "provider": vm_["driver"]},
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
)
log.debug("Adding %s to cloud cache.", name)
__utils__["cloud.cachedir_index_add"](
vm_["name"], vm_["profile"], "xen", vm_["driver"]
)
# connect to xen
session = _get_session()
# determine resource pool
resource_pool = _determine_resource_pool(session, vm_)
# determine storage repo
storage_repo = _determine_storage_repo(session, resource_pool, vm_)
# build VM
image = vm_.get("image")
clone = vm_.get("clone")
if clone is None:
clone = True
log.debug("Clone: %s ", clone)
# fire event to read new vm properties (requesting)
__utils__["cloud.fire_event"](
"event",
"requesting instance",
"salt/cloud/{}/requesting".format(name),
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
)
# create by cloning template
if clone:
_clone_vm(image, name, session)
else:
_copy_vm(image, name, session, storage_repo)
# provision template to vm
_provision_vm(name, session)
vm = _get_vm(name, session)
# start vm
start(name, None, session)
# get new VM
vm = _get_vm(name, session)
# wait for vm to report IP via guest tools
_wait_for_ip(name, session)
# set static IP if configured
_set_static_ip(name, session, vm_)
# if not deploying salt then exit
deploy = vm_.get("deploy", True)
log.debug("delopy is set to %s", deploy)
if deploy:
record = session.xenapi.VM.get_record(vm)
if record is not None:
_deploy_salt_minion(name, session, vm_)
else:
log.debug("The Salt minion will not be installed, deploy: %s", vm_["deploy"])
record = session.xenapi.VM.get_record(vm)
ret = show_instance(name)
ret.update({"extra": record})
__utils__["cloud.fire_event"](
"event",
"created instance",
"salt/cloud/{}/created".format(name),
args={"name": name, "profile": vm_["profile"], "provider": vm_["driver"]},
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
)
return ret
def _deploy_salt_minion(name, session, vm_):
"""
Deploy salt minion during create()
"""
# Get bootstrap values
vm_["ssh_host"] = get_vm_ip(name, session)
vm_["user"] = vm_.get("user", "root")
vm_["password"] = vm_.get("password", "p@ssw0rd!")
vm_["provider"] = vm_.get("provider", "xen")
log.debug("%s has IP of %s", name, vm_["ssh_host"])
# Bootstrap Salt minion!
if vm_["ssh_host"] is not None:
log.info("Installing Salt minion on %s", name)
boot_ret = __utils__["cloud.bootstrap"](vm_, __opts__)
log.debug("boot return: %s", boot_ret)
def _set_static_ip(name, session, vm_):
"""
Set static IP during create() if defined
"""
ipv4_cidr = ""
ipv4_gw = ""
if "ipv4_gw" in vm_.keys():
log.debug("ipv4_gw is found in keys")
ipv4_gw = vm_["ipv4_gw"]
if "ipv4_cidr" in vm_.keys():
log.debug("ipv4_cidr is found in keys")
ipv4_cidr = vm_["ipv4_cidr"]
log.debug("attempting to set IP in instance")
set_vm_ip(name, ipv4_cidr, ipv4_gw, session, None)
def _wait_for_ip(name, session):
"""
Wait for IP to be available during create()
"""
start_time = datetime.now()
status = None
while status is None:
status = get_vm_ip(name, session)
if status is not None:
# ignore APIPA address
if status.startswith("169"):
status = None
check_time = datetime.now()
delta = check_time - start_time
log.debug(
"Waited %s seconds for %s to report ip address...", delta.seconds, name
)
if delta.seconds > 180:
log.warning("Timeout getting IP address")
break
time.sleep(5)
def _run_async_task(task=None, session=None):
"""
Run XenAPI task in asynchronous mode to prevent timeouts
"""
if task is None or session is None:
return None
task_name = session.xenapi.task.get_name_label(task)
log.debug("Running %s", task_name)
while session.xenapi.task.get_status(task) == "pending":
progress = round(session.xenapi.task.get_progress(task), 2) * 100
log.debug("Task progress %.2f%%", progress)
time.sleep(1)
log.debug("Cleaning up task %s", task_name)
session.xenapi.task.destroy(task)
def _clone_vm(image=None, name=None, session=None):
"""
Create VM by cloning
This is faster and should be used if source and target are
in the same storage repository
"""
if session is None:
session = _get_session()
log.debug("Creating VM %s by cloning %s", name, image)
source = _get_vm(image, session)
task = session.xenapi.Async.VM.clone(source, name)
_run_async_task(task, session)
def _copy_vm(template=None, name=None, session=None, sr=None):
"""
Create VM by copy
This is slower and should be used if source and target are
NOT in the same storage repository
template = object reference
name = string name of new VM
session = object reference
sr = object reference
"""
if session is None:
session = _get_session()
log.debug("Creating VM %s by copying %s", name, template)
source = _get_vm(template, session)
task = session.xenapi.Async.VM.copy(source, name, sr)
_run_async_task(task, session)
def _provision_vm(name=None, session=None):
"""
Provision vm right after clone/copy
"""
if session is None:
session = _get_session()
log.info("Provisioning VM %s", name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.provision(vm)
_run_async_task(task, session)
def start(name, call=None, session=None):
"""
Start a vm
.. code-block:: bash
salt-cloud -a start xenvm01
"""
if call == "function":
raise SaltCloudException(
"The show_instnce function must be called with -a or --action."
)
if session is None:
session = _get_session()
log.info("Starting VM %s", name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.start(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def pause(name, call=None, session=None):
"""
Pause a vm
.. code-block:: bash
salt-cloud -a pause xenvm01
"""
if call == "function":
raise SaltCloudException(
"The show_instnce function must be called with -a or --action."
)
if session is None:
session = _get_session()
log.info("Pausing VM %s", name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.pause(vm)
_run_async_task(task, session)
return show_instance(name)
def unpause(name, call=None, session=None):
"""
UnPause a vm
.. code-block:: bash
salt-cloud -a unpause xenvm01
"""
if call == "function":
raise SaltCloudException(
"The show_instnce function must be called with -a or --action."
)
if session is None:
session = _get_session()
log.info("Unpausing VM %s", name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.unpause(vm)
_run_async_task(task, session)
return show_instance(name)
def suspend(name, call=None, session=None):
"""
Suspend a vm to disk
.. code-block:: bash
salt-cloud -a suspend xenvm01
"""
if call == "function":
raise SaltCloudException(
"The show_instnce function must be called with -a or --action."
)
if session is None:
session = _get_session()
log.info("Suspending VM %s", name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.suspend(vm)
_run_async_task(task, session)
return show_instance(name)
def resume(name, call=None, session=None):
"""
Resume a vm from disk
.. code-block:: bash
salt-cloud -a resume xenvm01
"""
if call == "function":
raise SaltCloudException(
"The show_instnce function must be called with -a or --action."
)
if session is None:
session = _get_session()
log.info("Resuming VM %s", name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.resume(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def stop(name, call=None, session=None):
"""
Stop a vm
.. code-block:: bash
salt-cloud -a stop xenvm01
"""
if call == "function":
raise SaltCloudException(
"The show_instnce function must be called with -a or --action."
)
return shutdown(name, call, session)
def shutdown(name, call=None, session=None):
"""
Shutdown a vm
.. code-block:: bash
salt-cloud -a shutdown xenvm01
"""
if call == "function":
raise SaltCloudException(
"The show_instnce function must be called with -a or --action."
)
if session is None:
session = _get_session()
log.info("Starting VM %s", name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.shutdown(vm)
_run_async_task(task, session)
return show_instance(name)
def reboot(name, call=None, session=None):
"""
Reboot a vm
.. code-block:: bash
salt-cloud -a reboot xenvm01
"""
if call == "function":
raise SaltCloudException(
"The show_instnce function must be called with -a or --action."
)
if session is None:
session = _get_session()
log.info("Starting VM %s", name)
vm = _get_vm(name, session)
power_state = session.xenapi.VM.get_power_state(vm)
if power_state == "Running":
task = session.xenapi.Async.VM.clean_reboot(vm)
_run_async_task(task, session)
return show_instance(name)
else:
return "{} is not running to be rebooted".format(name)
def _get_vm(name=None, session=None):
"""
Get XEN vm instance object reference
"""
if session is None:
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
vms = [x for x in vms if not session.xenapi.VM.get_is_a_template(x)]
if len(vms) == 1:
return vms[0]
else:
log.error("VM %s returned %s matches. 1 match expected.", name, len(vms))
return None
def _get_sr(name=None, session=None):
"""
Get XEN sr (storage repo) object reference
"""
if session is None:
session = _get_session()
srs = session.xenapi.SR.get_by_name_label(name)
if len(srs) == 1:
return srs[0]
return None
def _get_pool(name=None, session=None):
"""
Get XEN resource pool object reference
"""
if session is None:
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
if name in pool_record.get("name_label"):
return pool
return None
def destroy(name=None, call=None):
"""
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -d xenvm01
"""
if call == "function":
raise SaltCloudSystemExit(
"The destroy action must be called with -d, --destroy, " "-a or --action."
)
ret = {}
__utils__["cloud.fire_event"](
"event",
"destroying instance",
"salt/cloud/{}/destroying".format(name),
args={"name": name},
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
)
session = _get_session()
vm = _get_vm(name)
if vm:
# get vm
record = session.xenapi.VM.get_record(vm)
log.debug("power_state: %s", record["power_state"])
# shut down
if record["power_state"] != "Halted":
task = session.xenapi.Async.VM.hard_shutdown(vm)
_run_async_task(task, session)
# destroy disk (vdi) by reading vdb on vm
ret["vbd"] = destroy_vm_vdis(name, session)
# destroy vm
task = session.xenapi.Async.VM.destroy(vm)
_run_async_task(task, session)
ret["destroyed"] = True
__utils__["cloud.fire_event"](
"event",
"destroyed instance",
"salt/cloud/{}/destroyed".format(name),
args={"name": name},
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
)
if __opts__.get("update_cachedir", False) is True:
__utils__["cloud.delete_minion_cachedir"](
name, _get_active_provider_name().split(":")[0], __opts__
)
__utils__["cloud.cachedir_index_del"](name)
return ret
def sr_list(call=None):
"""
Geta list of storage repositories
.. code-block:: bash
salt-cloud -f sr_list myxen
"""
if call != "function":
raise SaltCloudSystemExit(
"This function must be called with -f, --function argument."
)
ret = {}
session = _get_session()
srs = session.xenapi.SR.get_all()
for sr in srs:
sr_record = session.xenapi.SR.get_record(sr)
ret[sr_record["name_label"]] = sr_record
return ret
def host_list(call=None):
"""
Get a list of Xen Servers
.. code-block:: bash
salt-cloud -f host_list myxen
"""
if call == "action":
raise SaltCloudSystemExit(
"This function must be called with -f, --function argument."
)
ret = {}
session = _get_session()
hosts = session.xenapi.host.get_all()
for host in hosts:
host_record = session.xenapi.host.get_record(host)
ret[host_record["name_label"]] = host_record
return ret
def pool_list(call=None):
"""
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
"""
if call == "action":
raise SaltCloudSystemExit(
"This function must be called with -f, --function argument."
)
ret = {}
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
ret[pool_record["name_label"]] = pool_record
return ret
def pif_list(call=None):
"""
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
"""
if call != "function":
raise SaltCloudSystemExit(
"This function must be called with -f, --function argument."
)
ret = {}
session = _get_session()
pifs = session.xenapi.PIF.get_all()
for pif in pifs:
record = session.xenapi.PIF.get_record(pif)
ret[record["uuid"]] = record
return ret
def vif_list(name, call=None, kwargs=None):
"""
Get a list of virtual network interfaces on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vif_list xenvm01
"""
if call == "function":
raise SaltCloudSystemExit(
"This function must be called with -a, --action argument."
)
if name is None:
return "A name kwarg is rquired"
ret = {}
data = {}
session = _get_session()
vm = _get_vm(name)
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
x = 0
for vif in vifs:
vif_record = session.xenapi.VIF.get_record(vif)
data["vif-{}".format(x)] = vif_record
x += 1
ret[name] = data
return ret
def vbd_list(name=None, call=None):
"""
Get a list of VBDs on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vbd_list xenvm01
"""
if call == "function":
raise SaltCloudSystemExit(
"This function must be called with -a, --action argument."
)
if name is None:
return "A name kwarg is rquired"
ret = {}
data = {}
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
vm = vms[0]
vbds = session.xenapi.VM.get_VBDs(vm)
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
data["vbd-{}".format(x)] = vbd_record
x += 1
ret = data
return ret
def avail_images(call=None):
"""
Get a list of images from Xen
If called with the `--list-images` then it returns
images with all details.
.. code-block:: bash
salt-cloud --list-images myxen
"""
if call == "action":
raise SaltCloudSystemExit(
"This function must be called with -f, --function argument."
)
return template_list()
def destroy_vm_vdis(name=None, session=None, call=None):
"""
Get virtual block devices on VM
.. code-block:: bash
salt-cloud -a destroy_vm_vdis xenvm01
"""
if session is None:
session = _get_session()
ret = {}
# get vm object
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
# read virtual block device (vdb)
vbds = session.xenapi.VM.get_VBDs(vms[0])
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
if vbd_record["VDI"] != "OpaqueRef:NULL":
# read vdi on vdb
vdi_record = session.xenapi.VDI.get_record(vbd_record["VDI"])
if "iso" not in vdi_record["name_label"]:
session.xenapi.VDI.destroy(vbd_record["VDI"])
ret["vdi-{}".format(x)] = vdi_record["name_label"]
x += 1
return ret
def destroy_template(name=None, call=None, kwargs=None):
"""
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -f destroy_template myxen name=testvm2
"""
if call == "action":
raise SaltCloudSystemExit(
"The destroy_template function must be called with -f."
)
if kwargs is None:
kwargs = {}
name = kwargs.get("name", None)
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
found = False
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record["is_a_template"]:
if record["name_label"] == name:
found = True
# log.debug(record['name_label'])
session.xenapi.VM.destroy(vm)
ret[name] = {"status": "destroyed"}
if not found:
ret[name] = {"status": "not found"}
return ret
def get_pv_args(name, session=None, call=None):
"""
Get PV arguments for a VM
.. code-block:: bash
salt-cloud -a get_pv_args xenvm01
"""
if call == "function":
raise SaltCloudException("This function must be called with -a or --action.")
if session is None:
log.debug("New session being created")
session = _get_session()
vm = _get_vm(name, session=session)
pv_args = session.xenapi.VM.get_PV_args(vm)
if pv_args:
return pv_args
return None
def set_pv_args(name, kwargs=None, session=None, call=None):
"""
Set PV arguments for a VM
.. code-block:: bash
salt-cloud -a set_pv_args xenvm01 pv_args="utf-8 graphical"
"""
if call == "function":
raise SaltCloudException("This function must be called with -a or --action.")
if session is None:
log.debug("New session being created")
session = _get_session()
vm = _get_vm(name, session=session)
try:
log.debug("Setting PV Args: %s", kwargs["pv_args"])
session.xenapi.VM.set_PV_args(vm, str(kwargs["pv_args"]))
except KeyError:
log.error("No pv_args parameter found.")
return False
except XenAPI.Failure:
log.info("Setting PV Args failed.")
return False
return True
| 27.823982 | 95 | 0.597199 |
acf7fa4903de671df51b2d01de11e35b3bb24184 | 255 | py | Python | HJ 58/code.py | swy20190/NiuKe | d9dbbbbac403f5b4fe37efe82f9708aff614f018 | [
"MIT"
] | null | null | null | HJ 58/code.py | swy20190/NiuKe | d9dbbbbac403f5b4fe37efe82f9708aff614f018 | [
"MIT"
] | null | null | null | HJ 58/code.py | swy20190/NiuKe | d9dbbbbac403f5b4fe37efe82f9708aff614f018 | [
"MIT"
] | null | null | null | while True:
try:
n, k = list(map(int, input().strip().split()))
arr = list(map(int, input().strip().split()))
arr.sort()
for i in range(k):
print(arr[i], end=' ')
print('')
except:
break
| 23.181818 | 54 | 0.447059 |
acf7fb42a118020bf958c6eceb36cade7c1512dc | 8,326 | py | Python | flumine/backtest/backtest.py | vincentmele/flumine | e4e3932532aceacf152c872f51991cc1945f8ae5 | [
"MIT"
] | null | null | null | flumine/backtest/backtest.py | vincentmele/flumine | e4e3932532aceacf152c872f51991cc1945f8ae5 | [
"MIT"
] | 15 | 2021-07-09T04:21:44.000Z | 2021-11-13T23:49:47.000Z | flumine/backtest/backtest.py | vincentmele/flumine | e4e3932532aceacf152c872f51991cc1945f8ae5 | [
"MIT"
] | null | null | null | import logging
import datetime
from collections import defaultdict
from ..baseflumine import BaseFlumine
from ..events import events
from .. import config, utils
from ..clients import ExchangeType
from ..exceptions import RunError
from ..order.order import OrderTypes
logger = logging.getLogger(__name__)
class FlumineBacktest(BaseFlumine):
"""
Single threaded implementation of flumine
for backtesting strategies with betfair
historic (or self recorded) streaming data.
"""
BACKTEST = True
def __init__(self, client):
super(FlumineBacktest, self).__init__(client)
self.handler_queue = []
def run(self) -> None:
if self.client.EXCHANGE != ExchangeType.SIMULATED:
raise RunError(
"Incorrect client provided, only a Simulated client can be used when backtesting"
)
with self:
self._monkey_patch_datetime()
"""
list of either single stream or complete events depending
on event_processing flag:
single: {None: [<Stream 1>], [<Stream 2>, ..]}
event: {123: [<Stream 1>, <Stream 2>, ..], 456: [..]}
Event data to be muxed/processed chronologically as per
live rather than single which is per market in isolation.
"""
event_streams = defaultdict(list) # eventId: [<Stream>, ..]
for stream in self.streams:
event_id = stream.event_id if stream.event_processing else None
event_streams[event_id].append(stream)
for event_id, streams in event_streams.items():
if event_id and len(streams) > 1:
logger.info(
"Starting historical event '{0}'".format(event_id),
extra={
"event_id": event_id,
"markets": [s.market_filter for s in streams],
},
)
# create cycles
cycles = [] # [[epoch, [MarketBook], gen], ..]
for stream in streams:
stream_gen = stream.create_generator()()
market_book = next(stream_gen)
publish_time_epoch = market_book[0].publish_time_epoch
cycles.append([publish_time_epoch, market_book, stream_gen])
# process cycles
while cycles:
# order by epoch
cycles.sort(key=lambda x: x[0])
# get current
_, market_book, stream_gen = cycles.pop(0)
# process current
self._process_market_books(events.MarketBookEvent(market_book))
# gen next
try:
market_book = next(stream_gen)
except StopIteration:
continue
publish_time_epoch = market_book[0].publish_time_epoch
# add back
cycles.append([publish_time_epoch, market_book, stream_gen])
self.handler_queue.clear()
logger.info("Completed historical event '{0}'".format(event_id))
else:
for stream in streams:
logger.info(
"Starting historical market '{0}'".format(
stream.market_filter
),
extra={
"market": stream.market_filter,
},
)
stream_gen = stream.create_generator()
for event in stream_gen():
self._process_market_books(events.MarketBookEvent(event))
self.handler_queue.clear()
logger.info(
"Completed historical market '{0}'".format(
stream.market_filter
)
)
self._process_end_flumine()
logger.info("Backtesting complete")
self._unpatch_datetime()
def _process_market_books(self, event: events.MarketBookEvent) -> None:
# todo DRY!
for market_book in event.event:
market_id = market_book.market_id
config.current_time = market_book.publish_time
# check if there are orders to process (limited to current market only)
if self.handler_queue:
self._check_pending_packages(market_id)
if market_book.status == "CLOSED":
self._process_close_market(event=events.CloseMarketEvent(market_book))
continue
# get market
market = self.markets.markets.get(market_id)
if market is None:
market = self._add_market(market_id, market_book)
self.log_control(events.MarketEvent(market))
elif market.closed:
self.markets.add_market(market_id, market)
# process market
market(market_book)
# process middleware
for middleware in self._market_middleware:
utils.call_middleware_error_handling(middleware, market)
# process current orders
if market.blotter.active:
self._process_backtest_orders(market)
for strategy in self.strategies:
if utils.call_strategy_error_handling(
strategy.check_market, market, market_book
):
utils.call_strategy_error_handling(
strategy.process_market_book, market, market_book
)
def process_order_package(self, order_package) -> None:
# place in pending list (wait for latency+delay)
self.handler_queue.append(order_package)
def _process_backtest_orders(self, market) -> None:
"""Remove order from blotter live
orders if complete and process
orders through strategies
"""
blotter = market.blotter
for order in blotter.live_orders:
if order.complete:
blotter.complete_order(order)
else:
if order.order_type.ORDER_TYPE == OrderTypes.LIMIT:
if order.size_remaining == 0:
order.execution_complete()
blotter.complete_order(order)
elif order.order_type.ORDER_TYPE in [
OrderTypes.LIMIT_ON_CLOSE,
OrderTypes.MARKET_ON_CLOSE,
]:
if order.current_order.status == "EXECUTION_COMPLETE":
order.execution_complete()
blotter.complete_order(order)
for strategy in self.strategies:
strategy_orders = blotter.strategy_orders(strategy)
utils.call_process_orders_error_handling(strategy, market, strategy_orders)
def _check_pending_packages(self, market_id: str) -> None:
processed = []
for order_package in self.handler_queue:
if (
order_package.market_id == market_id
and order_package.elapsed_seconds > order_package.simulated_delay
):
order_package.client.execution.handler(order_package)
processed.append(order_package)
for p in processed:
self.handler_queue.remove(p)
def _monkey_patch_datetime(self) -> None:
config.current_time = datetime.datetime.utcnow()
class NewDateTime(datetime.datetime):
@classmethod
def utcnow(cls):
return config.current_time
self._old_datetime = datetime.datetime
datetime.datetime = NewDateTime
def _unpatch_datetime(self) -> None:
datetime.datetime = self._old_datetime
def __repr__(self) -> str:
return "<FlumineBacktest>"
def __str__(self) -> str:
return "<FlumineBacktest>"
| 39.837321 | 97 | 0.541797 |
acf7fbbba9d25234439a8f9b03a93a9df21ea878 | 9,004 | py | Python | luna/pytorch.py | dugu9sword/siamese-triplet | bee6dfeb4b7cdff337c12c6f9e827eb05b54b3a9 | [
"BSD-3-Clause"
] | null | null | null | luna/pytorch.py | dugu9sword/siamese-triplet | bee6dfeb4b7cdff337c12c6f9e827eb05b54b3a9 | [
"BSD-3-Clause"
] | null | null | null | luna/pytorch.py | dugu9sword/siamese-triplet | bee6dfeb4b7cdff337c12c6f9e827eb05b54b3a9 | [
"BSD-3-Clause"
] | null | null | null | from .public import *
import torch
import random
import numpy as np
from torch.nn.utils.rnn import PackedSequence
import torch.nn.functional as F
from .logging import log
__model_path__ = "saved/models"
def get_device(model):
return next(model.parameters()).device
def cast_list(array, squeeze=True):
if isinstance(array, torch.Tensor):
return cast_list(array.detach().cpu().numpy(),squeeze)
if isinstance(array, list):
return cast_list(np.array(array),squeeze)
if isinstance(array, np.ndarray):
if squeeze:
array = array.squeeze().tolist()
return array if isinstance(array, list) else [array]
else:
return array.tolist()
def flt2str(flt, fmt=":8.4f", cat=None):
fmter = "{{{}}}".format(fmt)
if isinstance(flt, (float, int)):
return fmter.format(flt)
elif isinstance(flt, list):
str_lst = [fmter.format(ele) for ele in flt]
if cat is None:
return str_lst
else:
return cat.join(str_lst)
elif isinstance(flt, (torch.Tensor, np.ndarray)):
return flt2str(cast_list(flt), fmt, cat)
else:
raise Exception('WTF objects are you passing?')
def allocate_cuda_device(cuda_idx) -> torch.device:
if torch.cuda.is_available() and cuda_idx >= 0:
device = torch.device("cuda:{}".format(cuda_idx))
else:
device = torch.device("cpu")
return device
def set_gpu_device(device_id):
torch.cuda.set_device(device_id)
def gpu(*x):
if torch.cuda.is_available():
if len(x) == 1:
return x[0].cuda()
else:
return map(lambda m: m.cuda(), x)
else:
if len(x) == 1:
return x[0]
else:
return x
def exist_model(saved_model_name):
if not os.path.exists(__model_path__):
os.makedirs(__model_path__, exist_ok=True)
for file in os.listdir(__model_path__):
file = file[:-5]
name = file.split('@')[0]
ckpt = int(file.split('@')[1])
if name == saved_model_name:
return True
return False
def load_model(model, saved_model_name, checkpoint=-1):
if not os.path.exists(__model_path__):
os.makedirs(__model_path__, exist_ok=True)
if checkpoint == -1:
for file in os.listdir(__model_path__):
file = file[:-5]
name = file.split('@')[0]
ckpt = int(file.split('@')[1])
if name == saved_model_name and ckpt > checkpoint:
checkpoint = ckpt
path = "{}/{}@{}.ckpt".format(__model_path__, saved_model_name, checkpoint)
if not os.path.exists(path):
log("Checkpoint {} not found.".format(path))
else:
log("Restore model from checkpoint {}.".format(path))
if not torch.cuda.is_available():
model.load_state_dict(torch.load(path, map_location=lambda storage, loc: storage))
else:
model.load_state_dict(torch.load(path))
return checkpoint
def save_model(model, saved_model_name, checkpoint=-1):
if not os.path.exists(__model_path__):
os.makedirs(__model_path__, exist_ok=True)
if checkpoint == -1:
checkpoint = 0
path = "{}/{}@{}.ckpt".format(__model_path__, saved_model_name, checkpoint)
torch.save(model.state_dict(), path)
log("Model saved to {}.".format(path))
return checkpoint + 1
class ModelManager:
def __init__(self, model, model_name, seconds=-1, init_ckpt=-1):
self.model = model
self.model_name = model_name
self.seconds = seconds
self.last_time = time.time()
self.ckpt = load_model(model=self.model,
saved_model_name=self.model_name,
checkpoint=init_ckpt)
def save(self):
curr_time = time.time()
if curr_time - self.last_time > self.seconds:
save_model(model=self.model,
saved_model_name=self.model_name,
checkpoint=self.ckpt)
self.last_time = curr_time
def next_ckpt(self):
self.ckpt += 1
@deprecated
def ten2var(x):
return gpu(torch.autograd.Variable(x))
@deprecated
def long2var(x):
return gpu(torch.autograd.Variable(torch.LongTensor(x)))
@deprecated
def float2var(x):
return gpu(torch.autograd.Variable(torch.FloatTensor(x)))
def var2list(x):
return x.cpu().data.numpy().tolist()
def var2num(x):
return x.cpu().data[0]
def load_word2vec(embedding: torch.nn.Embedding,
word_dict: Dict[str, int],
word2vec_path,
norm=True,
cached_name=None):
cache = "{}{}".format(cached_name, ".norm" if norm else "")
if cached_name and exist_var(cache):
log("Load word2vec from cache {}".format(cache))
pre_embedding = load_var(cache)
else:
log("Load word2vec from {}".format(word2vec_path))
pre_embedding = np.random.normal(0, 1, embedding.weight.size())
word2vec_file = open(word2vec_path, errors='ignore')
# x = 0
found = 0
emb_size = -1
error_num = 0
for line in word2vec_file.readlines():
# x += 1
# log("Process line {} in file {}".format(x, word2vec_path))
split = re.split(r"\s+", line.strip())
if emb_size == -1:
emb_size = len(split) - 1
if len(split) != emb_size + 1 or len(split) < 10:
error_num += 1
continue
word = split[0]
if word in word_dict:
found += 1
pre_embedding[word_dict[word]] = np.array(list(map(float, split[1:])))
log("Error line: ", error_num)
log("Pre_train match case: {:.4f}".format(found / len(word_dict)))
if norm:
pre_embedding = pre_embedding / np.std(pre_embedding)
if cached_name:
save_var(pre_embedding, cache)
embedding.weight.data.copy_(torch.from_numpy(pre_embedding))
def show_mean_std(tensor, name=""):
print("[INFO] {} Mean {:.4f} Std {:.4f} Max {:.4f}".format(
name,
tensor.mean().item(),
tensor.std().item(),
tensor.max().item()
))
def idx_to_msk(idx, num_classes):
"""
idx: [1, 2, 3]
msk: [[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]]
"""
assert idx.dim() == 1, "the dimension of idx must be 1, e.g. [1, 2, 3]"
msk = torch.zeros((idx.size(0), num_classes), device=idx.device)
msk.scatter_(1, idx.view(-1, 1), 1)
msk = msk.byte()
return msk
def msk_to_idx(msk):
assert msk.sum() == msk.size(0), \
"only one element is allowed to be 1 in each row"
return msk.nonzero()[:, 1].flatten()
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# def flip_by_length(inputs, lengths):
# rev_inputs = []
# for it_id, it_input in enumerate(inputs):
# it_len = lengths[it_id]
# rev_input = torch.cat([
# it_input.index_select(0, torch.tensor(list(reversed(range(it_len)))).to(inputs.device)),
# torch.zeros_like(it_input[it_len:]).to(inputs.device)
# ])
# rev_inputs.append(rev_input)
# rev_inputs = torch.stack(rev_inputs)
# return rev_inputs
# def focal_loss(inputs,
# targets,
# gamma=2, alpha=None, reduction="mean"):
# batch_size = inputs.size(0)
# num_classes = inputs.size(1)
# prob = F.softmax(inputs, dim=1).clamp(1e-10, 1.)
# # prob = inputs.exp()
#
# class_mask = inputs.data.new(batch_size, num_classes).fill_(0)
# ids = targets.view(-1, 1)
# class_mask.scatter_(1, ids.data, 1.)
# if alpha is None:
# alpha = torch.ones(num_classes, 1).to(inputs.device)
# alpha = alpha[ids.data.view(-1)]
#
# probs = (prob * class_mask).sum(1).view(-1, 1)
#
# log_p = probs.log()
#
# batch_loss = -alpha * (torch.pow((1 - probs), gamma)) * log_p
#
# if reduction == "mean":
# loss = batch_loss.mean()
# elif reduction == "sum":
# loss = batch_loss.sum()
# elif reduction == "zheng":
# pred = torch.argmax(inputs, dim=1)
# ce_mask = pred != targets
# loss = torch.mean(batch_loss * ce_mask)
# elif reduction == "none":
# loss = batch_loss
# else:
# raise Exception()
# return loss
# class NonLinearLayerWithRes(torch.nn.Module):
# def __init__(self, d_in, d_hidden, dropout):
# super(NonLinearLayerWithRes, self).__init__()
# self.fc1 = torch.nn.Linear(d_in, d_hidden)
# self.fc2 = torch.nn.Linear(d_hidden, d_in)
# self.drop = torch.nn.Dropout(dropout)
#
# def forward(self, x):
# out = self.fc2(F.relu(self.fc1(x)))
# out += x
# out = self.drop(out)
# # out = torch.nn.LayerNorm(out)
# return out
| 30.522034 | 102 | 0.584851 |
acf7fd300a63376e52ab4df49ada66ec4fe5884c | 6,359 | py | Python | LibSource/mpir/msvc/_msvc_solution.py | ekzyis/CrypTool-2 | 1af234b4f74486fbfeb3b3c49228cc36533a8c89 | [
"Apache-2.0"
] | 12 | 2021-09-29T14:50:06.000Z | 2022-03-31T15:01:21.000Z | LibSource/mpir/msvc/_msvc_solution.py | ekzyis/CrypTool-2 | 1af234b4f74486fbfeb3b3c49228cc36533a8c89 | [
"Apache-2.0"
] | 15 | 2021-12-24T22:53:49.000Z | 2021-12-25T10:03:13.000Z | LibSource/mpir/msvc/_msvc_solution.py | ekzyis/CrypTool-2 | 1af234b4f74486fbfeb3b3c49228cc36533a8c89 | [
"Apache-2.0"
] | 10 | 2021-10-17T19:46:51.000Z | 2022-03-18T02:57:57.000Z | # add a project file to the solution
from collections import defaultdict, OrderedDict
from os.path import join, exists, relpath, split
from uuid import uuid4
from re import compile
folder_guid = "{2150E333-8FDC-42A3-9474-1A3956D46DE8}"
vcxproj_guid = "{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}"
pyproj_guid = "{888888A0-9F3D-457C-B088-3A5042F75D52}"
sol_11 = r'''
Microsoft Visual Studio Solution File, Format Version {0}.00
# Visual Studio {1}
'''
sol_12 = r'''VisualStudioVersion = {0}
MinimumVisualStudioVersion = 10.0.40219.1
'''
sol_2 = '''Project("{}") = "{}", "{}", "{}"
EndProject
'''
sol_3 = r'''Global
'''
sol_4 = r''' GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Win32 = Debug|Win32
Debug|x64 = Debug|x64
Release|Win32 = Release|Win32
Release|x64 = Release|x64
EndGlobalSection
'''
sol_5 = r''' GlobalSection(ProjectConfigurationPlatforms) = postSolution
'''
sol_6 = r''' GlobalSection(NestedProjects) = preSolution
'''
sol_7 = ''' {} = {}
'''
sol_8 = r''' EndGlobalSection
'''
sol_9 = r''' GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
EndGlobalSection
'''
sol_10 = r'''EndGlobal
'''
s_guid = r'\s*(\{\w{8}-\w{4}-\w{4}-\w{4}-\w{12}\})\s*'
s_name = r'\s*\"([a-zA-Z][-.\\_a-zA-Z0-9]*\s*)\"\s*'
re_guid = compile(r'\s*\"\s*' + s_guid + r'\"\s*')
re_proj = compile(r'Project\s*\(\s*\"' + s_guid + r'\"\)\s*=\s*'
+ s_name + r'\s*,\s*' + s_name + r'\s*,\s*\"' + s_guid + r'\"')
re_fmap = compile(r'\s*' + s_guid + r'\s*=\s*' + s_guid)
acfg = r'(Debug|Release)\|(WIn32|x64)'
re_acfg = compile(s_guid + r'\.' + acfg + r'\.ActiveCfg\s*=\s*' + acfg)
class msvc_solution(object):
def __init__(self, path):
self.soln_path, self.soln_file = split(path)
self.g2fldr = {}
self.g2proj = OrderedDict()
self.gf2gpl = defaultdict(list)
self.g2acfg = OrderedDict()
if exists(path):
lines = open(path).readlines()
for i, ln in enumerate(lines):
m = re_proj.search(ln)
if m:
if m.group(1) == folder_guid and m.group(2) == m.group(3):
# folder guid -> folder name
self.g2fldr[m.group(4)] = m.group(2)
elif (m.group(1) == vcxproj_guid and m.group(3).endswith('.vcxproj') or
m.group(1) == pyproj_guid and m.group(3).endswith('.pyproj')):
# project guid -> proj_type_guid, proj_name, vcx_path
self.g2proj[m.group(4)] = (m.group(1), m.group(2), m.group(3))
for i, ln in enumerate(lines):
m = re_fmap.search(ln)
if m:
if m.group(1) in self.g2proj and m.group(2) in self.g2fldr:
self.gf2gpl[m.group(2)].append(m.group(1))
m = re_acfg.search(ln)
if m:
g = m.group(1)
if not g in self.g2acfg.keys():
self.g2acfg[g] = [0, 1, 2, 3]
ix = 2 * (m.group(2) == 'Release') + (m.group(3) == 'x64')
ac = 2 * (m.group(4) == 'Release') + (m.group(5) == 'x64')
self.g2acfg[g][ix] = ac
for g in self.g2proj:
for _, gpl in self.gf2gpl.items():
if g in gpl:
break
else:
self.gf2gpl[''].append(g)
assert len(self.g2fldr.keys()) == len(self.gf2gpl.keys()) - (1 if '' in self.gf2gpl.keys() else 0)
assert sum(len(gpl) for gf, gpl in self.gf2gpl.items()) == len(self.g2proj.keys())
def write_solution(self, vs_info):
if len(self.g2fldr.keys()) > len(self.gf2gpl.keys()):
for g in list(self.g2fldr.keys()):
if g not in self.gf2gpl.keys():
del self.g2fldr[g]
assert len(self.g2fldr.keys()) == len(self.gf2gpl.keys()) - (1 if '' in self.gf2gpl.keys() else 0)
assert sum(len(gpl) for gf, gpl in self.gf2gpl.items()) == len(self.g2proj.keys())
vs_str = sol_11.format(vs_info['solution'], vs_info['visual studio'])
if int(vs_info['msvc']) > 12:
vs_str += sol_12.format(vs_info['msvc_long'])
with open(join(self.soln_path, self.soln_file), 'w') as outf:
outf.write(vs_str)
for g, f in self.g2fldr.items():
outf.write(sol_2.format(folder_guid, f, f, g))
for g, (gg, f, n) in self.g2proj.items():
outf.write(sol_2.format(gg, f, n, g))
outf.write(sol_3)
outf.write(sol_4)
outf.write(sol_5)
wx, dr = ['Win32', 'x64'], ['Debug', 'Release']
for g in self.g2proj.keys():
if g in self.g2acfg.keys():
ac4 = self.g2acfg.get(g, [0, 1, 2, 3])
for cf in (0, 1):
for pl in (0, 1):
ac = ac4[2 * cf + pl]
awx = 'AnyCPU' if gg == pyproj_guid else wx[ac & 1]
adr = dr[(ac > 1) & 1]
outf.write(' {0:s}.{1:s}|{2:s}.ActiveCfg = {3:s}|{4:s}\n'.format(g, dr[cf], wx[pl], adr, awx))
outf.write(sol_8)
del self.gf2gpl['']
if len(self.gf2gpl.keys()):
outf.write(sol_6)
for gf, gpl in self.gf2gpl.items():
for gp in gpl:
outf.write(sol_7.format(gp, gf))
outf.write(sol_8)
outf.write(sol_9)
outf.write(sol_10)
def get_project_guid(self, proj_name, file_path):
relp = relpath(file_path, self.soln_path)
p_guid = None
for g in list(self.g2proj.keys()):
if self.g2proj[g] == (vcxproj_guid, proj_name, relp):
p_guid = g
break
if not p_guid:
p_guid = '{' + str(uuid4()).upper() + '}'
return p_guid
def set_acfg(self, g, mode):
if g not in self.g2acfg.keys():
proj_name = self.g2proj[g][1]
if len(mode) == 2 or proj_name.endswith('gc') or proj_name.endswith('cxx'):
cfg = [0, 1, 2, 3]
else:
cfg = [0, 0, 2, 2] if mode[0] == 'Win32' else [1, 1, 3, 3]
self.g2acfg[g] = cfg
def add_project(self, soln_folder, proj_name, file_path, p_guid, mode):
relp = relpath(file_path, self.soln_path)
if soln_folder:
for g, f in self.g2fldr.items():
if f == soln_folder:
f_guid = g
break
else:
f_guid = '{' + str(uuid4()).upper() + '}'
self.g2fldr[f_guid] = soln_folder
for g in list(self.g2proj.keys()):
if self.g2proj[g] == (vcxproj_guid, proj_name, relp):
break
else:
self.g2proj[p_guid.upper()] = (vcxproj_guid, proj_name, relp)
self.gf2gpl[f_guid if soln_folder else ''].append(p_guid.upper())
self.set_acfg(p_guid.upper(), mode)
| 31.954774 | 109 | 0.575877 |
acf7fe0d313aced5cf03d484caa6d714ecf1fea7 | 6,202 | py | Python | electrum/invoices.py | p3ngu19z/electrum | 427b396c24ec1a3cfdca8e1a70c94537b35ad882 | [
"MIT"
] | 79 | 2017-11-10T03:00:57.000Z | 2022-02-27T16:35:04.000Z | electrum/invoices.py | p3ngu19z/electrum | 427b396c24ec1a3cfdca8e1a70c94537b35ad882 | [
"MIT"
] | 70 | 2017-12-25T05:28:26.000Z | 2022-03-26T22:31:47.000Z | electrum/invoices.py | p3ngu19z/electrum | 427b396c24ec1a3cfdca8e1a70c94537b35ad882 | [
"MIT"
] | 64 | 2017-12-19T09:09:23.000Z | 2022-02-08T10:26:13.000Z | import time
from typing import TYPE_CHECKING, List, Optional, Union, Dict, Any
from decimal import Decimal
import attr
from .json_db import StoredObject
from .i18n import _
from .util import age
from .lnaddr import lndecode, LnAddr
from . import constants
from .bitcoin import COIN
from .transaction import PartialTxOutput
if TYPE_CHECKING:
from .paymentrequest import PaymentRequest
# convention: 'invoices' = outgoing , 'request' = incoming
# types of payment requests
PR_TYPE_ONCHAIN = 0
PR_TYPE_LN = 2
# status of payment requests
PR_UNPAID = 0
PR_EXPIRED = 1
PR_UNKNOWN = 2 # sent but not propagated
PR_PAID = 3 # send and propagated
PR_INFLIGHT = 4 # unconfirmed
PR_FAILED = 5
PR_ROUTING = 6
pr_color = {
PR_UNPAID: (.7, .7, .7, 1),
PR_PAID: (.2, .9, .2, 1),
PR_UNKNOWN: (.7, .7, .7, 1),
PR_EXPIRED: (.9, .2, .2, 1),
PR_INFLIGHT: (.9, .6, .3, 1),
PR_FAILED: (.9, .2, .2, 1),
PR_ROUTING: (.9, .6, .3, 1),
}
pr_tooltips = {
PR_UNPAID:_('Unpaid'),
PR_PAID:_('Paid'),
PR_UNKNOWN:_('Unknown'),
PR_EXPIRED:_('Expired'),
PR_INFLIGHT:_('In progress'),
PR_FAILED:_('Failed'),
PR_ROUTING: _('Computing route...'),
}
PR_DEFAULT_EXPIRATION_WHEN_CREATING = 24*60*60 # 1 day
pr_expiration_values = {
0: _('Never'),
10*60: _('10 minutes'),
60*60: _('1 hour'),
24*60*60: _('1 day'),
7*24*60*60: _('1 week'),
}
assert PR_DEFAULT_EXPIRATION_WHEN_CREATING in pr_expiration_values
def _decode_outputs(outputs) -> List[PartialTxOutput]:
ret = []
for output in outputs:
if not isinstance(output, PartialTxOutput):
output = PartialTxOutput.from_legacy_tuple(*output)
ret.append(output)
return ret
# hack: BOLT-11 is not really clear on what an expiry of 0 means.
# It probably interprets it as 0 seconds, so already expired...
# Our higher level invoices code however uses 0 for "never".
# Hence set some high expiration here
LN_EXPIRY_NEVER = 100 * 365 * 24 * 60 * 60 # 100 years
@attr.s
class Invoice(StoredObject):
type = attr.ib(type=int, kw_only=True)
message: str
exp: int
time: int
def is_lightning(self):
return self.type == PR_TYPE_LN
def get_status_str(self, status):
status_str = pr_tooltips[status]
if status == PR_UNPAID:
if self.exp > 0 and self.exp != LN_EXPIRY_NEVER:
expiration = self.exp + self.time
status_str = _('Expires') + ' ' + age(expiration, include_seconds=True)
return status_str
def get_amount_sat(self) -> Union[int, Decimal, str, None]:
"""Returns a decimal satoshi amount, or '!' or None."""
raise NotImplementedError()
@classmethod
def from_json(cls, x: dict) -> 'Invoice':
# note: these raise if x has extra fields
if x.get('type') == PR_TYPE_LN:
return LNInvoice(**x)
else:
return OnchainInvoice(**x)
@attr.s
class OnchainInvoice(Invoice):
message = attr.ib(type=str, kw_only=True)
amount_sat = attr.ib(kw_only=True) # type: Union[int, str] # in satoshis. can be '!'
exp = attr.ib(type=int, kw_only=True, validator=attr.validators.instance_of(int))
time = attr.ib(type=int, kw_only=True, validator=attr.validators.instance_of(int))
id = attr.ib(type=str, kw_only=True)
outputs = attr.ib(kw_only=True, converter=_decode_outputs) # type: List[PartialTxOutput]
bip70 = attr.ib(type=str, kw_only=True) # type: Optional[str]
requestor = attr.ib(type=str, kw_only=True) # type: Optional[str]
def get_address(self) -> str:
assert len(self.outputs) == 1
return self.outputs[0].address
def get_amount_sat(self) -> Union[int, str]:
return self.amount_sat or 0
@classmethod
def from_bip70_payreq(cls, pr: 'PaymentRequest') -> 'OnchainInvoice':
return OnchainInvoice(
type=PR_TYPE_ONCHAIN,
amount_sat=pr.get_amount(),
outputs=pr.get_outputs(),
message=pr.get_memo(),
id=pr.get_id(),
time=pr.get_time(),
exp=pr.get_expiration_date() - pr.get_time(),
bip70=pr.raw.hex(),
requestor=pr.get_requestor(),
)
@attr.s
class LNInvoice(Invoice):
invoice = attr.ib(type=str)
amount_msat = attr.ib(kw_only=True) # type: Optional[int] # needed for zero amt invoices
__lnaddr = None
@invoice.validator
def check(self, attribute, value):
lndecode(value) # this checks the str can be decoded
@property
def _lnaddr(self) -> LnAddr:
if self.__lnaddr is None:
self.__lnaddr = lndecode(self.invoice)
return self.__lnaddr
@property
def rhash(self) -> str:
return self._lnaddr.paymenthash.hex()
def get_amount_msat(self) -> Optional[int]:
amount_btc = self._lnaddr.amount
amount = int(amount_btc * COIN * 1000) if amount_btc else None
return amount or self.amount_msat
def get_amount_sat(self) -> Union[Decimal, None]:
amount_msat = self.get_amount_msat()
if amount_msat is None:
return None
return Decimal(amount_msat) / 1000
@property
def exp(self) -> int:
return self._lnaddr.get_expiry()
@property
def time(self) -> int:
return self._lnaddr.date
@property
def message(self) -> str:
return self._lnaddr.get_description()
@classmethod
def from_bech32(cls, invoice: str) -> 'LNInvoice':
amount_msat = lndecode(invoice).get_amount_msat()
return LNInvoice(
type=PR_TYPE_LN,
invoice=invoice,
amount_msat=amount_msat,
)
def to_debug_json(self) -> Dict[str, Any]:
d = self.to_json()
d.update({
'pubkey': self._lnaddr.pubkey.serialize().hex(),
'amount_BTC': self._lnaddr.amount,
'rhash': self._lnaddr.paymenthash.hex(),
'description': self._lnaddr.get_description(),
'exp': self._lnaddr.get_expiry(),
'time': self._lnaddr.date,
# 'tags': str(lnaddr.tags),
})
return d
| 29.674641 | 94 | 0.62625 |
acf7ff29ecd2935f4269ff5b423e3717fed70bf7 | 2,881 | py | Python | perfkitbenchmarker/scripts/spark_sql_runner.py | pdeyhim/PerfKitBenchmarker | 1f32d1c4b3b9f388f9c35bf2128f5552d92ded4b | [
"Apache-2.0"
] | 2 | 2021-01-15T09:40:28.000Z | 2021-01-15T09:40:36.000Z | perfkitbenchmarker/scripts/spark_sql_runner.py | pdeyhim/PerfKitBenchmarker | 1f32d1c4b3b9f388f9c35bf2128f5552d92ded4b | [
"Apache-2.0"
] | null | null | null | perfkitbenchmarker/scripts/spark_sql_runner.py | pdeyhim/PerfKitBenchmarker | 1f32d1c4b3b9f388f9c35bf2128f5552d92ded4b | [
"Apache-2.0"
] | null | null | null | # Lint as: python2, python3
"""Runs a Spark SQL query with preloaded temp views.
Views can be BigQuery tables or HCFS directories containing Parquet.
This is useful for Storage formats not expressible as External Hive Tables.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import logging
import time
import py4j
from pyspark import sql
def parse_args():
"""Parse argv."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--sql-scripts',
type=lambda csv: csv.split(','),
required=True,
help='List of SQL scripts staged in object storage to run')
parser.add_argument(
'--table-metadata',
metavar='METADATA',
type=lambda s: json.loads(s).items(),
default={},
help="""\
JSON Object mappiing table names to arrays of length 2. The arrays contain the
format of the data and the options to pass to the dataframe reader. e.g.:
{
"my_bq_table": ["bigquery", {"table": "bigquery_public_data:dataset.table"}],
"my_parquet_table": ["parquet", {"path": "gs://some/directory"}]
}""")
parser.add_argument(
'--report-dir',
required=True,
help='Directory to write out query timings to.')
return parser.parse_args()
def main(args):
spark = (sql.SparkSession.builder
.appName('Spark SQL Query')
.enableHiveSupport()
.getOrCreate())
for name, (fmt, options) in args.table_metadata:
logging.info('Loading %s', name)
spark.read.format(fmt).options(**options).load().createTempView(name)
results = []
for script in args.sql_scripts:
# Read script from object storage using rdd API
query = '\n'.join(spark.sparkContext.textFile(script).collect())
try:
logging.info('Running %s', script)
start = time.time()
# spark-sql does not limit its output. Replicate that here by setting
# limit to max Java Integer. Hopefully you limited the output in SQL or
# you are going to have a bad time. Note this is not true of all TPC-DS or
# TPC-H queries and they may crash with small JVMs.
# pylint: disable=protected-access
spark.sql(query).show(spark._jvm.java.lang.Integer.MAX_VALUE)
# pylint: enable=protected-access
duration = time.time() - start
results.append(sql.Row(script=script, duration=duration))
# These correspond to errors in low level Spark Excecution.
# Let ParseException and AnalysisException fail the job.
except (sql.utils.QueryExecutionException,
py4j.protocol.Py4JJavaError) as e:
logging.error('Script %s failed', script, exc_info=e)
logging.info('Writing results to %s', args.report_dir)
spark.createDataFrame(results).coalesce(1).write.json(args.report_dir)
if __name__ == '__main__':
main(parse_args())
| 32.738636 | 80 | 0.693856 |
acf7ff42b98413adaab8a106e8be4a2a08cd26d8 | 260 | py | Python | python/lambda-cloudwatch-dashboard/app.py | marclyo/aws-cdk-examples | f041f07ebd4c94897e16d37ff813a38eb32645a1 | [
"Apache-2.0"
] | 2,941 | 2019-02-08T15:29:36.000Z | 2022-03-31T23:57:42.000Z | python/lambda-cloudwatch-dashboard/app.py | marclyo/aws-cdk-examples | f041f07ebd4c94897e16d37ff813a38eb32645a1 | [
"Apache-2.0"
] | 558 | 2019-02-14T23:32:02.000Z | 2022-03-30T00:35:11.000Z | python/lambda-cloudwatch-dashboard/app.py | marclyo/aws-cdk-examples | f041f07ebd4c94897e16d37ff813a38eb32645a1 | [
"Apache-2.0"
] | 1,409 | 2019-02-12T19:13:04.000Z | 2022-03-31T18:46:21.000Z | #!/usr/bin/env python3
from aws_cdk import core as cdk
from lambda_cloudwatch_dashboard.lambda_cloudwatch_dashboard_stack import LambdaCloudwatchDashboardStack
app = cdk.App()
LambdaCloudwatchDashboardStack(app, "LambdaCloudwatchDashboardStack")
app.synth()
| 28.888889 | 104 | 0.853846 |
acf7ff46e2ba8dd2cc8fe357758710ba89318e0f | 1,627 | py | Python | glucosetracker/subscribers/migrations/0001_initial.py | arhanair/glucose-tracker-monitor | 1ca05e8b2ce9106c0018f26a15e07b056dc3c5e4 | [
"MIT"
] | 134 | 2015-01-06T21:00:42.000Z | 2021-02-01T12:31:41.000Z | glucosetracker/subscribers/migrations/0001_initial.py | arhanair/glucose-tracker-monitor | 1ca05e8b2ce9106c0018f26a15e07b056dc3c5e4 | [
"MIT"
] | 17 | 2015-01-10T17:24:31.000Z | 2021-03-09T18:52:53.000Z | glucosetracker/subscribers/migrations/0001_initial.py | sebastian-code/glucose-tracker | 7b80883d715d49b03f6d4d315d02d8a3b3e67d61 | [
"MIT"
] | 88 | 2015-01-02T04:35:38.000Z | 2022-01-14T16:56:03.000Z | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Subscriber'
db.create_table(u'subscribers_subscriber', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75)),
('source_ip', self.gf('django.db.models.fields.IPAddressField')(max_length=15)),
))
db.send_create_signal(u'subscribers', ['Subscriber'])
def backwards(self, orm):
# Deleting model 'Subscriber'
db.delete_table(u'subscribers_subscriber')
models = {
u'subscribers.subscriber': {
'Meta': {'ordering': "['-created']", 'object_name': 'Subscriber'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'source_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'})
}
}
complete_apps = ['subscribers'] | 42.815789 | 112 | 0.614014 |
acf7fffd8d86d20a4ea2966e270834a69993d11b | 436 | py | Python | Python/zmq-zeromq/ps_inv_binding_sub.py | egustafson/sandbox | 9804e966347b33558b0497a04edb1a591d2d7773 | [
"Apache-2.0"
] | 2 | 2019-09-27T21:25:26.000Z | 2019-12-29T11:26:54.000Z | Python/zmq-zeromq/ps_inv_binding_sub.py | egustafson/sandbox | 9804e966347b33558b0497a04edb1a591d2d7773 | [
"Apache-2.0"
] | 7 | 2020-08-11T17:32:14.000Z | 2020-08-11T17:32:39.000Z | Python/zmq-zeromq/ps_inv_binding_sub.py | egustafson/sandbox | 9804e966347b33558b0497a04edb1a591d2d7773 | [
"Apache-2.0"
] | 2 | 2016-07-18T10:55:50.000Z | 2020-08-19T01:46:08.000Z | #!/usr/bin/env python
# ########################################
#
# Exmaple: zmq Pub/Sub
#
import zmq
print("libzmq version: {}".format(zmq.zmq_version()))
print(" pyzmq version: {}".format(zmq.__version__))
context = zmq.Context()
print("Creating subscriber...")
socket = context.socket(zmq.SUB)
socket.bind("tcp://*:5557")
socket.setsockopt_string(zmq.SUBSCRIBE, u"")
while True:
msg = socket.recv_string()
print(msg)
| 18.956522 | 53 | 0.619266 |
acf800c26aa2c0c9092a674d5280487db06e6c0c | 1,030 | py | Python | venv/share/pyshared/scapy/layers/hsrp.py | pengwu/scapy_env | 3db9c5dea2e219048a2387649d6d89be342903d9 | [
"MIT"
] | 60 | 2017-12-01T14:46:01.000Z | 2022-02-22T19:17:42.000Z | scapy-2.2.0/scapy/layers/hsrp.py | Aliced3645/DataCenterMarketing | 67bc485e73cf538498a89b28465afb822717affb | [
"Apache-2.0"
] | null | null | null | scapy-2.2.0/scapy/layers/hsrp.py | Aliced3645/DataCenterMarketing | 67bc485e73cf538498a89b28465afb822717affb | [
"Apache-2.0"
] | 16 | 2018-11-05T13:21:30.000Z | 2021-04-26T17:36:30.000Z | ## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <phil@secdev.org>
## This program is published under a GPLv2 license
"""
HSRP (Hot Standby Router Protocol): proprietary redundancy protocol for Cisco routers.
"""
from scapy.fields import *
from scapy.packet import *
from scapy.layers.inet import UDP
class HSRP(Packet):
name = "HSRP"
fields_desc = [
ByteField("version", 0),
ByteEnumField("opcode", 0, { 0:"Hello", 1:"Coup", 2:"Resign", 3:"Advertise"}),
ByteEnumField("state", 16, { 0:"Initial", 1:"Learn", 2:"Listen", 4:"Speak", 8:"Standby", 16:"Active"}),
ByteField("hellotime", 3),
ByteField("holdtime", 10),
ByteField("priority", 120),
ByteField("group", 1),
ByteField("reserved", 0),
StrFixedLenField("auth","cisco",8),
IPField("virtualIP","192.168.1.1") ]
bind_layers( UDP, HSRP, dport=1985, sport=1985)
| 31.212121 | 111 | 0.61165 |
acf801c12299926ec3816ae18ae083f70e6a031b | 1,687 | py | Python | app.py | ashleyadrias/netflix-recommender | 35fc5f0de10ae7d2994e30fa3249cb5e4ae04050 | [
"MIT"
] | null | null | null | app.py | ashleyadrias/netflix-recommender | 35fc5f0de10ae7d2994e30fa3249cb5e4ae04050 | [
"MIT"
] | null | null | null | app.py | ashleyadrias/netflix-recommender | 35fc5f0de10ae7d2994e30fa3249cb5e4ae04050 | [
"MIT"
] | null | null | null | import dash
import dash_bootstrap_components as dbc
"""
https://github.com/facultyai/dash-bootstrap-components
dash-bootstrap-components provides Bootstrap components.
Plotly Dash is great! However, creating the initial layout can require a lot
of boilerplate. dash-bootstrap-components reduces this boilerplate by providing
standard layouts and high-level components.
A good way to start customising the stylesheet is to use an alternative
pre-compiled theme. Bootswatch is a great place to find new themes. Links to
CDNs for each of the Bootswatch styles are also included , and can be used
with the external_stylesheets argument of the Dash constructor:
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.CERULEAN])
Go to https://bootswatch.com/ to preview these Bootswatch themes:
dbc.themes.BOOTSTRAP
dbc.themes.CERULEAN
dbc.themes.COSMO
dbc.themes.CYBORG
dbc.themes.DARKLY
dbc.themes.FLATLY
dbc.themes.JOURNAL
dbc.themes.LITERA
dbc.themes.LUMEN
dbc.themes.LUX
dbc.themes.MATERIA
dbc.themes.MINTY
dbc.themes.PULSE
dbc.themes.SANDSTONE
dbc.themes.SIMPLEX
dbc.themes.SKETCHY
dbc.themes.SLATE
dbc.themes.SOLAR
dbc.themes.SPACELAB
dbc.themes.SUPERHERO
dbc.themes.UNITED
dbc.themes.YETI
"""
external_stylesheets = [
dbc.themes.CYBORG, # Bootswatch theme
'https://use.fontawesome.com/releases/v5.9.0/css/all.css', # for social media icons
]
meta_tags=[
{'name': 'viewport', 'content': 'width=device-width, initial-scale=1'}
]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets, meta_tags=meta_tags)
app.config.suppress_callback_exceptions = True
app.title = 'Netflix Movie Recommender' # appears in browser title bar
server = app.server | 29.086207 | 89 | 0.797273 |
acf8028e31f08dcdbdef46783f39b31ac058847d | 7,868 | py | Python | homeassistant/components/nextbus/sensor.py | zalke/home-assistant | a31e49c857722c0723dc5297cd83cbce0f8716f6 | [
"Apache-2.0"
] | 4 | 2019-07-03T22:36:57.000Z | 2019-08-10T15:33:25.000Z | homeassistant/components/nextbus/sensor.py | zalke/home-assistant | a31e49c857722c0723dc5297cd83cbce0f8716f6 | [
"Apache-2.0"
] | 7 | 2019-08-23T05:26:02.000Z | 2022-03-11T23:57:18.000Z | homeassistant/components/nextbus/sensor.py | zalke/home-assistant | a31e49c857722c0723dc5297cd83cbce0f8716f6 | [
"Apache-2.0"
] | 3 | 2019-04-28T16:35:45.000Z | 2020-05-28T15:21:59.000Z | """NextBus sensor."""
import logging
from itertools import chain
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME
from homeassistant.const import DEVICE_CLASS_TIMESTAMP
from homeassistant.helpers.entity import Entity
from homeassistant.util.dt import utc_from_timestamp
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'nextbus'
CONF_AGENCY = 'agency'
CONF_ROUTE = 'route'
CONF_STOP = 'stop'
ICON = 'mdi:bus'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_AGENCY): cv.string,
vol.Required(CONF_ROUTE): cv.string,
vol.Required(CONF_STOP): cv.string,
vol.Optional(CONF_NAME): cv.string,
})
def listify(maybe_list):
"""Return list version of whatever value is passed in.
This is used to provide a consistent way of interacting with the JSON
results from the API. There are several attributes that will either missing
if there are no values, a single dictionary if there is only one value, and
a list if there are multiple.
"""
if maybe_list is None:
return []
if isinstance(maybe_list, list):
return maybe_list
return [maybe_list]
def maybe_first(maybe_list):
"""Return the first item out of a list or returns back the input."""
if isinstance(maybe_list, list) and maybe_list:
return maybe_list[0]
return maybe_list
def validate_value(value_name, value, value_list):
"""Validate tag value is in the list of items and logs error if not."""
valid_values = {
v['tag']: v['title']
for v in value_list
}
if value not in valid_values:
_LOGGER.error(
'Invalid %s tag `%s`. Please use one of the following: %s',
value_name,
value,
', '.join(
'{}: {}'.format(title, tag)
for tag, title in valid_values.items()
)
)
return False
return True
def validate_tags(client, agency, route, stop):
"""Validate provided tags."""
# Validate agencies
if not validate_value(
'agency',
agency,
client.get_agency_list()['agency'],
):
return False
# Validate the route
if not validate_value(
'route',
route,
client.get_route_list(agency)['route'],
):
return False
# Validate the stop
route_config = client.get_route_config(route, agency)['route']
if not validate_value(
'stop',
stop,
route_config['stop'],
):
return False
return True
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Load values from configuration and initialize the platform."""
agency = config[CONF_AGENCY]
route = config[CONF_ROUTE]
stop = config[CONF_STOP]
name = config.get(CONF_NAME)
from py_nextbus import NextBusClient
client = NextBusClient(output_format='json')
# Ensures that the tags provided are valid, also logs out valid values
if not validate_tags(client, agency, route, stop):
_LOGGER.error('Invalid config value(s)')
return
add_entities([
NextBusDepartureSensor(
client,
agency,
route,
stop,
name,
),
], True)
class NextBusDepartureSensor(Entity):
"""Sensor class that displays upcoming NextBus times.
To function, this requires knowing the agency tag as well as the tags for
both the route and the stop.
This is possibly a little convoluted to provide as it requires making a
request to the service to get these values. Perhaps it can be simplifed in
the future using fuzzy logic and matching.
"""
def __init__(self, client, agency, route, stop, name=None):
"""Initialize sensor with all required config."""
self.agency = agency
self.route = route
self.stop = stop
self._custom_name = name
# Maybe pull a more user friendly name from the API here
self._name = '{} {}'.format(agency, route)
self._client = client
# set up default state attributes
self._state = None
self._attributes = {}
def _log_debug(self, message, *args):
"""Log debug message with prefix."""
_LOGGER.debug(':'.join((
self.agency,
self.route,
self.stop,
message,
)), *args)
@property
def name(self):
"""Return sensor name.
Uses an auto generated name based on the data from the API unless a
custom name is provided in the configuration.
"""
if self._custom_name:
return self._custom_name
return self._name
@property
def device_class(self):
"""Return the device class."""
return DEVICE_CLASS_TIMESTAMP
@property
def state(self):
"""Return current state of the sensor."""
return self._state
@property
def device_state_attributes(self):
"""Return additional state attributes."""
return self._attributes
@property
def icon(self):
"""Return icon to be used for this sensor."""
# Would be nice if we could determine if the line is a train or bus
# however that doesn't seem to be available to us. Using bus for now.
return ICON
def update(self):
"""Update sensor with new departures times."""
# Note: using Multi because there is a bug with the single stop impl
results = self._client.get_predictions_for_multi_stops(
[{
'stop_tag': int(self.stop),
'route_tag': self.route,
}],
self.agency,
)
self._log_debug('Predictions results: %s', results)
if 'Error' in results:
self._log_debug('Could not get predictions: %s', results)
if not results.get('predictions'):
self._log_debug('No predictions available')
self._state = None
# Remove attributes that may now be outdated
self._attributes.pop('upcoming', None)
return
results = results['predictions']
# Set detailed attributes
self._attributes.update({
'agency': results.get('agencyTitle'),
'route': results.get('routeTitle'),
'stop': results.get('stopTitle'),
})
# List all messages in the attributes
messages = listify(results.get('message', []))
self._log_debug('Messages: %s', messages)
self._attributes['message'] = ' -- '.join((
message.get('text', '')
for message in messages
))
# List out all directions in the attributes
directions = listify(results.get('direction', []))
self._attributes['direction'] = ', '.join((
direction.get('title', '')
for direction in directions
))
# Chain all predictions together
predictions = list(chain(*[
listify(direction.get('prediction', []))
for direction in directions
]))
# Short circuit if we don't have any actual bus predictions
if not predictions:
self._log_debug('No upcoming predictions available')
self._state = None
self._attributes['upcoming'] = 'No upcoming predictions'
return
# Generate list of upcoming times
self._attributes['upcoming'] = ', '.join(
p['minutes'] for p in predictions
)
latest_prediction = maybe_first(predictions)
self._state = utc_from_timestamp(
int(latest_prediction['epochTime']) / 1000
).isoformat()
| 29.249071 | 79 | 0.611464 |
acf802adfce00c1f052b4f24e7528a6146b3ff7d | 1,255 | py | Python | src/main/resources/assets/openpython/opos/v1.0/lib/ocpath.py | fossabot/OpenPython | 8fe3f794f2a6c543d96c1ef5c097ffa18f90b680 | [
"PSF-2.0",
"Apache-2.0",
"CC0-1.0",
"MIT"
] | 41 | 2018-10-25T06:15:31.000Z | 2022-02-20T11:20:43.000Z | src/main/resources/assets/openpython/opos/v1.0/lib/ocpath.py | fossabot/OpenPython | 8fe3f794f2a6c543d96c1ef5c097ffa18f90b680 | [
"PSF-2.0",
"Apache-2.0",
"CC0-1.0",
"MIT"
] | 16 | 2018-03-20T12:25:27.000Z | 2018-03-25T13:34:44.000Z | src/main/resources/assets/openpython/opos/v1.0/lib/ocpath.py | fossabot/OpenPython | 8fe3f794f2a6c543d96c1ef5c097ffa18f90b680 | [
"PSF-2.0",
"Apache-2.0",
"CC0-1.0",
"MIT"
] | 8 | 2018-11-04T02:03:15.000Z | 2022-01-13T11:46:28.000Z | import os
curdir = "."
pardir = ".."
sep = "/"
extsep = "."
altsep = "/"
pathsep = ":"
linesep = "\n"
defpath = pathsep.join(("/bin", "/usr/bin"))
devnull = "/dev/null"
def normcase(s):
return s
def normpath(s):
return s
def abspath(s):
if s[0] != "/":
return os.getcwd() + "/" + s
return s
def join(*args):
# TODO: this is non-compliant
if type(args[0]) is bytes:
return b"/".join(args)
else:
return "/".join(args)
def split(path):
if path == "":
return ("", "")
r = path.rsplit("/", 1)
if len(r) == 1:
return ("", path)
head = r[0] # .rstrip("/")
if not head:
head = "/"
return (head, r[1])
def dirname(path):
return split(path)[0]
def basename(path):
return split(path)[1]
def exists(path):
return os.access(path, os.F_OK)
# TODO
lexists = exists
def isdir(path):
import stat
try:
mode = os.stat(path)[0]
return stat.S_ISDIR(mode)
except OSError:
return False
def expanduser(s):
if s == "~" or s.startswith("~/"):
h = os.getenv("HOME")
return h + s[1:]
if s[0] == "~":
# Sorry folks, follow conventions
return "/home/" + s[1:]
return s
| 15.493827 | 44 | 0.511554 |
acf802f49caba20138f8186062938e1ded0950d2 | 2,597 | py | Python | tools/train.py | LDOUBLEV/DBNet.pytorch | 206f4a1e5cc3686284476f029a26fc69f610e898 | [
"Apache-2.0"
] | null | null | null | tools/train.py | LDOUBLEV/DBNet.pytorch | 206f4a1e5cc3686284476f029a26fc69f610e898 | [
"Apache-2.0"
] | null | null | null | tools/train.py | LDOUBLEV/DBNet.pytorch | 206f4a1e5cc3686284476f029a26fc69f610e898 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# @Time : 2019/8/23 22:00
# @Author : zhoujun
from __future__ import print_function
import argparse
import os
import anyconfig
def init_args():
parser = argparse.ArgumentParser(description='DBNet.pytorch')
parser.add_argument('--config_file', default='config/open_dataset_resnet18_FPN_DBhead_polyLR.yaml', type=str)
parser.add_argument('--local_rank', dest='local_rank', default=0, type=int, help='Use distributed training')
args = parser.parse_args()
return args
def main(config):
import torch
from models import build_model, build_loss
from data_loader import get_dataloader
from trainer import Trainer
from post_processing import get_post_processing
from utils import get_metric
if torch.cuda.device_count() > 1:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend="nccl", init_method="env://", world_size=torch.cuda.device_count(), rank=args.local_rank)
config['distributed'] = True
else:
config['distributed'] = False
config['local_rank'] = args.local_rank
train_loader = get_dataloader(config['dataset']['train'], config['distributed'])
assert train_loader is not None
if 'validate' in config['dataset']:
validate_loader = get_dataloader(config['dataset']['validate'], False)
else:
validate_loader = None
criterion = build_loss(config['loss']).cuda()
config['arch']['backbone']['in_channels'] = 3 if config['dataset']['train']['dataset']['args']['img_mode'] != 'GRAY' else 1
model = build_model(config['arch'])
post_p = get_post_processing(config['post_processing'])
metric = get_metric(config['metric'])
trainer = Trainer(config=config,
model=model,
criterion=criterion,
train_loader=train_loader,
post_process=post_p,
metric_cls=metric,
validate_loader=validate_loader)
trainer.train()
if __name__ == '__main__':
import sys
import pathlib
__dir__ = pathlib.Path(os.path.abspath(__file__))
sys.path.append(str(__dir__))
sys.path.append(str(__dir__.parent.parent))
# project = 'DBNet.pytorch' # 工作项目根目录
# sys.path.append(os.getcwd().split(project)[0] + project)
from utils import parse_config
args = init_args()
assert os.path.exists(args.config_file)
config = anyconfig.load(open(args.config_file, 'rb'))
if 'base' in config:
config = parse_config(config)
main(config)
| 33.294872 | 142 | 0.665768 |
acf80398e8cb040bd8f8d0db8da9a52bf0c4625d | 11,678 | py | Python | tests/roc_yearly.py | jdtogni/trader | 0c54bd002e01554491eeeff56a17448ecb288900 | [
"Apache-2.0"
] | null | null | null | tests/roc_yearly.py | jdtogni/trader | 0c54bd002e01554491eeeff56a17448ecb288900 | [
"Apache-2.0"
] | null | null | null | tests/roc_yearly.py | jdtogni/trader | 0c54bd002e01554491eeeff56a17448ecb288900 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from zipline.api import order, order_target_percent, record, symbol, symbols, set_symbol_lookup_date, history, \
get_datetime, schedule_function, date_rules, time_rules, get_open_orders
from numpy import diff, isnan, arange, insert, sort, array
from pandas import rolling_mean
import collections
from datetime import timedelta
import pprint
from dttrader import DTPortfolio, DTEODChangeTrader
class DTPortfolio:
def __init__(self, cash):
self.start_cash = cash
self.port = {'cash': cash}
self.buy_dates = []
self.sell_dates = []
self.pct = 0
def pre_cache(self, context, data):
for stock in context.stocks:
if stock.symbol not in self.port:
self.port[stock.symbol] = {'pos': 0, 'trades': [], 'first_price': data[stock].price}
self.port[stock.symbol]['last_price'] = data[stock].price
def order_add_percent(self, context, data, stock, pct, quiet=True):
now = get_datetime().date()
if not quiet:
print("buy", now, stock.symbol)
new_pct = min(self.pct + pct, 1)
self.buy_dates.append(now)
self.order_target_percent(context, data, stock, new_pct)
def order_sub_percent(self, context, data, stock, pct, quiet=True):
now = get_datetime().date()
if not quiet:
print("sell", now, stock.symbol)
new_pct = max(self.pct - pct, 0)
self.sell_dates.append(now)
self.order_target_percent(context, data, stock, new_pct)
def order_target_percent(self, context, data, stock, pct):
# quantopian...
# order_target_percent(stock, pct)
# our naive simulation...
now = get_datetime().date()
# pct = min(max(pct, 0), 1)
self.pct = pct
if stock.symbol not in self.port:
self.port[stock.symbol] = {'pos': 0, 'trades': []}
dict = self.port[stock.symbol]
price = int(data[stock].price * 100) / 100.0
value = self.port['cash'] + price * dict['pos']
to_invest = value * pct
new_pos = int(to_invest / price)
prev_pos = dict['pos']
diff_pos = new_pos - prev_pos
to_invest = int(price * diff_pos * 100) / 100.0
dict['pos'] = new_pos
self.port['cash'] -= to_invest
dict['trades'].append({'date': now, 'cost': to_invest, 'price': price, 'pos': diff_pos, 'value': value})
self.port[stock.symbol] = dict
def dump(self):
pprint.pprint(self.port, width=200)
def performance_csv(self, prefix=""):
sp = self.port[my_stock]
st = int((self.port['cash'] + sp['pos'] * sp['last_price']) * 100.0) / 100.0
print(prefix+",%.2f" % (st))
def csv(self):
#print("--- portfolio csv ---")
print("cash,10000")
sp = self.port[my_stock]
bhpos = 5000 / sp['first_price']
bh = int((bhpos * sp['last_price'] + 5000) * 100.0) / 100.0
st = int((self.port['cash'] + sp['pos'] * sp['last_price']) * 100.0) / 100.0
print("first price,%0.2f" % (sp['first_price']))
print("last price,%0.2f" % (sp['last_price']))
print("Buy&Hold,%0.2f" % (bh))
print("Strategy,%0.2f\n" % (st))
print("cost,date,position,price,st value,bh value")
for trade in sp['trades']:
d = trade['date']
bh = int((bhpos * trade['price'] + 5000) * 100.0) / 100.0
print("%0.2f,%d-%d-%d,%d,%0.2f,%0.2f,%0.2f" %
(trade['cost'], d.year, d.month, d.day, trade['pos'], trade['price'], trade['value'], bh))
print("Buy&Hold,%0.2f" % (bh))
print("Strategy,%0.2f" % (st))
#print("\n--- portfolio csv ---")
def plot_signals(self, ax1):
ymin, ymax = ax1.get_ylim()
ax1.vlines(x=self.sell_dates, ymin=ymin, ymax=ymax, color='r')
ax1.vlines(x=self.buy_dates, ymin=ymin, ymax=ymax, color='b')
# all_dates = pv.axes[0].date
# yx = (ymax - ymin) / 3
# ax1.vlines(x=all_dates, ymin=ymin+yx, ymax=ymax-yx, color='g')
class DTEODChangeTrader:
def __init__(self, buy_roc, sell_roc, buy_target, sell_target, roc_window=180):
self.buy_roc = buy_roc
self.sell_roc = sell_roc
self.buy_target = buy_target
self.sell_target = sell_target
self.roc_window = roc_window
self.name = "EODCT"
self.prices = 0
self.portfolio = DTPortfolio(10000)
@property
def portfolio(self):
return self.__portfolio
@portfolio.setter
def portfolio(self, portfolio):
self.__portfolio = portfolio
def pre_cache(self):
# closing prices for all stocks
self.prices = history(self.roc_window, '1d', 'price')
def handle(self, context, data, stock, quiet=True):
# find the historical daily % changes
# choose the top x% and bellow y% value
# use them as thresholds for sell/buy signals
velocity = self.prices.diff()
rate_of_change = velocity / self.prices
roc_sorted = rate_of_change.sort(stock)
roc_size = len(roc_sorted)
# index of nth element (top/bottom n% roc)
buy_index = roc_size * self.buy_roc
sell_index = -roc_size * self.sell_roc
buy_threashold = roc_sorted.values[buy_index][0]
sell_threashold = roc_sorted.values[sell_index][0]
record(self.name + '_buy', buy_threashold)
record(self.name + '_sell', sell_threashold)
# calculate today's (now's) % change (roc)
p_yesterday = self.prices[stock][-2]
p_today = data[stock].price
p_change = 1 - p_yesterday / p_today
if p_change > sell_threashold:
self.portfolio.order_sub_percent(context, data, stock, self.sell_target, quiet=quiet)
elif p_change < buy_threashold:
self.portfolio.order_add_percent(context, data, stock, self.buy_target, quiet=quiet)
year = 2015
my_stock = 'RUSL' # CHAU
trade_start = 0
def initialize(context):
set_symbol_lookup_date('2015-02-08')
context.stocks = symbols(my_stock)
context.prev_cash = 0
schedule_function(handle_end_of_day,
date_rules.every_day(),
time_rules.market_close(minutes=30))
def handle_data(context, data):
today = get_datetime().date()
return
def handle_end_of_day(context, data):
# yesterday + today close price
now = get_datetime()
# price_history = history(2, '1d', 'price')
global trade_start, port, trader
trader.pre_cache()
port.pre_cache(context, data)
for stock in context.stocks:
record(stock.symbol, data[stock].price)
if now < trade_start:
return
for stock in context.stocks:
# to build stats later
trader.handle(context, data, stock)
# print(context.portfolio.positions)
# print(context.portfolio.cash, context.portfolio.portfolio_value)
def plot_histogram(roc, ax3):
h2 = {}
# h3 = {}
# hns = {}
# hps = {}
# ps = 0
# ns = 0
# for h in roc:
# if h >= 0:
# ps += 1
# if ns > 0:
# hns[ns] = hns.get(ns, 0)+1
# ns = 0
# elif h < 0:
# ns += 1
# if ps > 0:
# hps[ps] = hps.get(ps, 0)+1
# ps = 0
#
# h3[h > 0] = h3.get(h > 0, 0)+1
# h = int(h*100)
# h2[h] = h2.get(h, 0)+1
#
# if h >= 0:
# hps[ps] = hps.get(ps, 0)+1
# else:
# hns[ns] = hns.get(ns, 0)+1
#
# # h2 = hns
# print("hps", hps)
# print("hns", hns)
# oh2 = collections.OrderedDict(sorted(h2.items()))
# X2 = arange(len(h2))
# ax3.bar(X2, oh2.values(), align='center', width=0.5)
# plt.xticks(X2, oh2.keys())
# print("h2", h2)
# oh2 = collections.OrderedDict(sorted(h2.items()))
#
# X2 = arange(len(h2))
# ax3.bar(X2, oh2.values(), align='center', width=0.5)
# plt.xticks(X2, oh2.keys())
# tmp = {}
# tmp[-1] = h3[False]
# tmp[1] = h3[True]
# h3 = tmp
# print("h3", h3)
#
# oh3 = collections.OrderedDict(sorted(h3.items()))
# X3 = arange(len(h3))
# ax3.bar(X3+0.5, oh3.values(), align='center', color='r', width=0.5)
def plot_roc(r, ax2):
v = diff(r)
v = insert(v, 0, v[0])
roc = v / r
obj = r.axes[0].date
ax2.plot(obj, roc, 'x-', label='v')
return roc
# Note: this function can be removed if running
# this algorithm on quantopian.com
def analyze(context=None, results=None):
import matplotlib.pyplot as plt
f, (ax1, ax2) = plt.subplots(nrows=2)
# ax1.set_ylabel('Portfolio value (USD)')
pv = results.portfolio_value
pv = (pv / pv[0])
# pv.plot(ax=ax1)
global year
ds = datetime(year, 1, 1)
de = datetime(year, 3, 1)
for symbol in [my_stock]:
r = results.get(symbol)
roc = plot_roc(r, ax2)
r = (r / r[0])
r.plot(ax=ax1)
results.get('EODCT_buy').plot(ax=ax2, color='g')
results.get('EODCT_sell').plot(ax=ax2, color='r')
# plot_histogram(roc, ax3)
ax2.set_xlim(ds, de)
ax1.set_xlim(ds, de)
# ax2.set_ylabel('price (USD)')
plt.gcf().set_size_inches(18, 8)
port.plot_signals(ax2)
port.csv()
print("show")
plt.show()
print("after show")
# print(results)
# Note: this if-block should be removed if running
# this algorithm on quantopian.com
if __name__ == '__main__':
from datetime import datetime
import pytz
from zipline.algorithm import TradingAlgorithm
from zipline.utils.factory import load_from_yahoo
import sys
# RUSL 2014 10000 0.15 0.3 0.4 0.2
my_stock = sys.argv[1]
year = int(sys.argv[2])
port = DTPortfolio(int(sys.argv[3]))
trader = DTEODChangeTrader(buy_threshold=float(sys.argv[4]),
sell_threshold=float(sys.argv[5]),
buy_pct=float(sys.argv[6]),
sell_pct=float(sys.argv[7]))
trader.portfolio = port
# Set the simulation start and end dates
# create more data to prime metrics
start = datetime(year - 1, 1, 1, 0, 0, 0, 0, pytz.utc)
end = datetime(year + 1, 1, 1, 0, 0, 0, 0, pytz.utc)
trade_start = datetime(year, 1, 1, 0, 0, 0, 0, pytz.utc)
# Load price data from yahoo.
# print("load data")
data = load_from_yahoo(stocks=[my_stock], indexes={}, start=start,
end=end)
# Create and run the algorithm.
algo = TradingAlgorithm(initialize=initialize, handle_data=handle_data,
identifiers=[my_stock], capital_base=10000)
# print("run")
results = algo.run(data)
# print("analyze")
# analyze(results=results)
port.performance_csv(prefix="%s,%s,%s,%s,%s,%s,%s" % (sys.argv[1],sys.argv[2],sys.argv[3],
sys.argv[4],sys.argv[5],sys.argv[6],
sys.argv[7]))
| 32.529248 | 112 | 0.583405 |
acf8043b3f0db5a456a0a9121acb369bdce9f2d1 | 3,633 | py | Python | pinax/notifications_backends/backends/push_notifications.py | Boondockers-Welcome/pinax-notifications-backends | 1d83dfc7d745d7f9059b9b6d2a6d18a80d52c2b9 | [
"MIT"
] | null | null | null | pinax/notifications_backends/backends/push_notifications.py | Boondockers-Welcome/pinax-notifications-backends | 1d83dfc7d745d7f9059b9b6d2a6d18a80d52c2b9 | [
"MIT"
] | null | null | null | pinax/notifications_backends/backends/push_notifications.py | Boondockers-Welcome/pinax-notifications-backends | 1d83dfc7d745d7f9059b9b6d2a6d18a80d52c2b9 | [
"MIT"
] | null | null | null | from django.conf import settings
from _ssl import SSLError
from django.template import TemplateDoesNotExist
from django.utils.translation import ugettext
from .base import BaseBackend
from core.utils import get_class_from_path
GCMDevice = get_class_from_path(
path='push_notifications.models.GCMDevice')
APNSDevice = get_class_from_path(
path='push_notifications.models.APNSDevice')
GCMError = get_class_from_path(
path='push_notifications.gcm.GCMError')
APNSError = get_class_from_path(
path='push_notifications.apns.APNSError')
try:
use_notice_model = getattr(settings, 'PINAX_USE_NOTICE_MODEL')
except AttributeError:
use_notice_model = None
class PushNotificationBackend(BaseBackend):
spam_sensitivity = 2
def can_send(self, user, notice_type, scoping):
can_send = super(
PushNotificationBackend, self).can_send(user, notice_type, scoping)
gcm_device_exists = GCMDevice.objects.filter(user=user).exists()
apns_device_exists = APNSDevice.objects.filter(user=user).exists()
if can_send and (gcm_device_exists or apns_device_exists):
return True
return False
def deliver(self, recipient, sender, notice_type, extra_context):
raise NotImplementedError()
def deliver_bulk(self, recipients, sender, notice_type, extra_context):
"""
Sending GCM and APNs Push notifications using:
https://github.com/jleclanche/django-push-notifications
"""
context = self.default_context()
context.update({
"notice": ugettext(notice_type.display),
"current_site": context['current_site'].domain
})
if extra_context.get('aps'):
extra_context['aps'] = str(extra_context['aps']).encode("utf-8")
context.update(extra_context)
try:
messages = self.get_formatted_messages(
("push_notifications.txt",), notice_type.label, context)
except TemplateDoesNotExist:
# We just ignore the backend if the template does not exist.
pass
else:
context['subject'] = context['notice'][:70].strip()
context['body'] = messages["push_notifications.txt"][:70].strip()
gcm_devices = GCMDevice.objects.filter(user__in=recipients)
apns_devices = APNSDevice.objects.filter(user__in=recipients)
if gcm_devices:
try:
gcm_devices.send_message(None, extra=context)
except GCMError as e:
print('GCMError "%s"', str(e))
if apns_devices:
try:
apns_devices.send_message(None, extra=context)
except (APNSError, SSLError) as e:
print('APNSError "%s"', str(e))
if use_notice_model:
Notice = get_class_from_path(
path='pinax.notifications_backends.models.Notice')
# Based on http://stackoverflow.com/a/7390947
# This is mostly a log for sent notifications.
if context.get('subject') and context.get('body'):
context['message'] = (
context['subject'] + '\n\n' + context['body']
)
for recipient in recipients:
Notice.objects.create(
recipient=recipient, message=context['message'],
notice_type=notice_type, sender=sender,
medium='push_notifications'
)
| 36.33 | 79 | 0.612442 |
acf8049a4a5d2104917df16a4c5e23c01556d21c | 629 | py | Python | sample.py | vatsarahul999/Python_sample | aff1c9aae2d1e6cdea2d89ab36a5030a5a2180c0 | [
"MIT"
] | null | null | null | sample.py | vatsarahul999/Python_sample | aff1c9aae2d1e6cdea2d89ab36a5030a5a2180c0 | [
"MIT"
] | null | null | null | sample.py | vatsarahul999/Python_sample | aff1c9aae2d1e6cdea2d89ab36a5030a5a2180c0 | [
"MIT"
] | null | null | null | import pandas as pd
import joblib
from sklearn.model_selection import train_test_split
from sklearn import metrics
joblib_file = "joblib_RL_Model.pkl"
joblib_LR_model = joblib.load(joblib_file)
df = pd.read_csv('USA_Housing.csv')
X = df[['Avg. Area Income', 'Avg. Area House Age', 'Avg. Area Number of Rooms','Avg. Area Number of Bedrooms', 'Area Population']]
y = df['Price']
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=.4,random_state=101)
predict = joblib_LR_model.predict(X_test)
print(predict)
mean_avg_error = metrics.mean_absolute_error(y_test,predict)
print('mean_avg_error : '+ str(mean_avg_error)) | 37 | 130 | 0.782194 |
acf804ce4600ea0d7f3c1a7ad43dd821a9bb1ea4 | 19,437 | py | Python | benchmarks/benchmarks/stats.py | khavernathy/scipy | f09a01721a3859240a8b69f42df8a45508da86d7 | [
"BSD-3-Clause"
] | null | null | null | benchmarks/benchmarks/stats.py | khavernathy/scipy | f09a01721a3859240a8b69f42df8a45508da86d7 | [
"BSD-3-Clause"
] | 2 | 2015-01-06T19:51:42.000Z | 2015-12-04T21:54:44.000Z | benchmarks/benchmarks/stats.py | khavernathy/scipy | f09a01721a3859240a8b69f42df8a45508da86d7 | [
"BSD-3-Clause"
] | 1 | 2021-12-12T12:01:36.000Z | 2021-12-12T12:01:36.000Z | import warnings
import numpy as np
from .common import Benchmark, safe_import, is_xslow
with safe_import():
import scipy.stats as stats
with safe_import():
from scipy.stats._distr_params import distcont, distdiscrete
try: # builtin lib
from itertools import compress
except ImportError:
pass
class Anderson_KSamp(Benchmark):
def setup(self, *args):
self.rand = [np.random.normal(loc=i, size=1000) for i in range(3)]
def time_anderson_ksamp(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
stats.anderson_ksamp(self.rand)
class CorrelationFunctions(Benchmark):
param_names = ['alternative']
params = [
['two-sided', 'less', 'greater']
]
def setup(self, mode):
a = np.random.rand(2,2) * 10
self.a = a
def time_fisher_exact(self, alternative):
oddsratio, pvalue = stats.fisher_exact(self.a, alternative=alternative)
def time_barnard_exact(self, alternative):
resBarnard = stats.barnard_exact(self.a, alternative=alternative)
def time_boschloo_exact(self, alternative):
resBoschloo = stats.boschloo_exact(self.a, alternative=alternative)
class ANOVAFunction(Benchmark):
def setup(self):
rng = np.random.default_rng(12345678)
self.a = rng.random((6,3)) * 10
self.b = rng.random((6,3)) * 10
self.c = rng.random((6,3)) * 10
def time_f_oneway(self):
statistic, pvalue = stats.f_oneway(self.a, self.b, self.c)
statistic, pvalue = stats.f_oneway(self.a, self.b, self.c, axis=1)
class Kendalltau(Benchmark):
param_names = ['nan_policy','method','variant']
params = [
['propagate', 'raise', 'omit'],
['auto', 'asymptotic', 'exact'],
['b', 'c']
]
def setup(self, nan_policy, method, variant):
rng = np.random.default_rng(12345678)
a = np.arange(200)
rng.shuffle(a)
b = np.arange(200)
rng.shuffle(b)
self.a = a
self.b = b
def time_kendalltau(self, nan_policy, method, variant):
stats.kendalltau(self.a, self.b, nan_policy=nan_policy,
method=method, variant=variant)
class KS(Benchmark):
param_names = ['alternative', 'mode']
params = [
['two-sided', 'less', 'greater'],
['auto', 'exact', 'asymp'],
]
def setup(self, alternative, mode):
rng = np.random.default_rng(0x2e7c964ff9a5cd6be22014c09f1dbba9)
self.a = stats.norm.rvs(loc=5, scale=10, size=500, random_state=rng)
self.b = stats.norm.rvs(loc=8, scale=10, size=500, random_state=rng)
def time_ks_1samp(self, alternative, mode):
stats.ks_1samp(self.a, stats.norm.cdf,
alternative=alternative, mode=mode)
def time_ks_2samp(self, alternative, mode):
stats.ks_2samp(self.a, self.b, alternative=alternative, mode=mode)
class RankSums(Benchmark):
param_names = ['alternative']
params = [
['two-sided', 'less', 'greater']
]
def setup(self, alternative):
rng = np.random.default_rng(0xb6acd7192d6e5da0f68b5d8ab8ce7af2)
self.u1 = rng.uniform(-1, 1, 200)
self.u2 = rng.uniform(-0.5, 1.5, 300)
def time_ranksums(self, alternative):
stats.ranksums(self.u1, self.u2, alternative=alternative)
class BrunnerMunzel(Benchmark):
param_names = ['alternative', 'nan_policy', 'distribution']
params = [
['two-sided', 'less', 'greater'],
['propagate', 'raise', 'omit'],
['t', 'normal']
]
def setup(self, alternative, nan_policy, distribution):
rng = np.random.default_rng(0xb82c4db22b2818bdbc5dbe15ad7528fe)
self.u1 = rng.uniform(-1, 1, 200)
self.u2 = rng.uniform(-0.5, 1.5, 300)
def time_brunnermunzel(self, alternative, nan_policy, distribution):
stats.brunnermunzel(self.u1, self.u2, alternative=alternative,
distribution=distribution, nan_policy=nan_policy)
class InferentialStats(Benchmark):
def setup(self):
rng = np.random.default_rng(0x13d756fadb635ae7f5a8d39bbfb0c931)
self.a = stats.norm.rvs(loc=5, scale=10, size=500, random_state=rng)
self.b = stats.norm.rvs(loc=8, scale=10, size=500, random_state=rng)
self.c = stats.norm.rvs(loc=8, scale=20, size=500, random_state=rng)
self.chisq = rng.integers(1, 20, 500)
def time_ttest_ind_same_var(self):
# test different sized sample with variances
stats.ttest_ind(self.a, self.b)
stats.ttest_ind(self.a, self.b, equal_var=False)
def time_ttest_ind_diff_var(self):
# test different sized sample with different variances
stats.ttest_ind(self.a, self.c)
stats.ttest_ind(self.a, self.c, equal_var=False)
def time_chisqure(self):
stats.chisquare(self.chisq)
def time_friedmanchisquare(self):
stats.friedmanchisquare(self.a, self.b, self.c)
def time_epps_singleton_2samp(self):
stats.epps_singleton_2samp(self.a, self.b)
def time_kruskal(self):
stats.mstats.kruskal(self.a, self.b)
class DistributionsAll(Benchmark):
# all distributions are in this list. A conversion to a set is used to
# remove duplicates that appear more than once in either `distcont` or
# `distdiscrete`.
dists = sorted(list(set([d[0] for d in distcont + distdiscrete])))
param_names = ['dist_name', 'method']
params = [
dists, ['pdf/pmf', 'logpdf/logpmf', 'cdf', 'logcdf', 'rvs', 'fit',
'sf', 'logsf', 'ppf', 'isf', 'moment', 'stats_s', 'stats_v',
'stats_m', 'stats_k', 'stats_mvsk', 'entropy']
]
# stats_mvsk is tested separately because of gh-11742
# `moment` tests a higher moment (order 5)
dist_data = dict(distcont + distdiscrete)
# custom shape values can be provided for any distribution in the format
# `dist_name`: [shape1, shape2, ...]
custom_input = {}
# these are the distributions that are the slowest
slow_dists = ['nct', 'ncx2', 'argus', 'cosine', 'foldnorm', 'gausshyper',
'kappa4', 'invgauss', 'wald', 'vonmises_line', 'ksone',
'genexpon', 'exponnorm', 'recipinvgauss', 'vonmises',
'foldcauchy', 'kstwo', 'levy_stable', 'skewnorm']
slow_methods = ['moment']
def setup(self, dist_name, method):
if not is_xslow() and (dist_name in self.slow_dists
or method in self.slow_methods):
raise NotImplementedError("Skipped")
self.dist = getattr(stats, dist_name)
dist_shapes = self.dist_data[dist_name]
if isinstance(self.dist, stats.rv_discrete):
# discrete distributions only use location
self.isCont = False
kwds = {'loc': 4}
else:
# continuous distributions use location and scale
self.isCont = True
kwds = {'loc': 4, 'scale': 10}
bounds = self.dist.interval(.99, *dist_shapes, **kwds)
x = np.linspace(*bounds, 100)
args = [x, *self.custom_input.get(dist_name, dist_shapes)]
self.args = args
self.kwds = kwds
if method == 'fit':
# there are no fit methods for discrete distributions
if isinstance(self.dist, stats.rv_discrete):
raise NotImplementedError("This attribute is not a member "
"of the distribution")
# the only positional argument is the data to be fitted
self.args = [self.dist.rvs(*dist_shapes, size=100, random_state=0, **kwds)]
elif method == 'rvs':
# add size keyword argument for data creation
kwds['size'] = 1000
kwds['random_state'] = 0
# keep shapes as positional arguments, omit linearly spaced data
self.args = args[1:]
elif method == 'pdf/pmf':
method = ('pmf' if isinstance(self.dist, stats.rv_discrete)
else 'pdf')
elif method == 'logpdf/logpmf':
method = ('logpmf' if isinstance(self.dist, stats.rv_discrete)
else 'logpdf')
elif method in ['ppf', 'isf']:
self.args = [np.linspace((0, 1), 100), *args[1:]]
elif method == 'moment':
# the first four moments may be optimized, so compute the fifth
self.args = [5, *args[1:]]
elif method.startswith('stats_'):
kwds['moments'] = method[6:]
method = 'stats'
self.args = args[1:]
elif method == 'entropy':
self.args = args[1:]
self.method = getattr(self.dist, method)
def time_distribution(self, dist_name, method):
self.method(*self.args, **self.kwds)
class Distribution(Benchmark):
# though there is a new version of this benchmark that runs all the
# distributions, at the time of writing there was odd behavior on
# the asv for this benchmark, so it is retained.
# https://pv.github.io/scipy-bench/#stats.Distribution.time_distribution
param_names = ['distribution', 'properties']
params = [
['cauchy', 'gamma', 'beta'],
['pdf', 'cdf', 'rvs', 'fit']
]
def setup(self, distribution, properties):
rng = np.random.default_rng(12345678)
self.x = rng.random(100)
def time_distribution(self, distribution, properties):
if distribution == 'gamma':
if properties == 'pdf':
stats.gamma.pdf(self.x, a=5, loc=4, scale=10)
elif properties == 'cdf':
stats.gamma.cdf(self.x, a=5, loc=4, scale=10)
elif properties == 'rvs':
stats.gamma.rvs(size=1000, a=5, loc=4, scale=10)
elif properties == 'fit':
stats.gamma.fit(self.x, loc=4, scale=10)
elif distribution == 'cauchy':
if properties == 'pdf':
stats.cauchy.pdf(self.x, loc=4, scale=10)
elif properties == 'cdf':
stats.cauchy.cdf(self.x, loc=4, scale=10)
elif properties == 'rvs':
stats.cauchy.rvs(size=1000, loc=4, scale=10)
elif properties == 'fit':
stats.cauchy.fit(self.x, loc=4, scale=10)
elif distribution == 'beta':
if properties == 'pdf':
stats.beta.pdf(self.x, a=5, b=3, loc=4, scale=10)
elif properties == 'cdf':
stats.beta.cdf(self.x, a=5, b=3, loc=4, scale=10)
elif properties == 'rvs':
stats.beta.rvs(size=1000, a=5, b=3, loc=4, scale=10)
elif properties == 'fit':
stats.beta.fit(self.x, loc=4, scale=10)
# Retain old benchmark results (remove this if changing the benchmark)
time_distribution.version = "fb22ae5386501008d945783921fe44aef3f82c1dafc40cddfaccaeec38b792b0"
class DescriptiveStats(Benchmark):
param_names = ['n_levels']
params = [
[10, 1000]
]
def setup(self, n_levels):
rng = np.random.default_rng(12345678)
self.levels = rng.integers(n_levels, size=(1000, 10))
def time_mode(self, n_levels):
stats.mode(self.levels, axis=0)
class GaussianKDE(Benchmark):
def setup(self):
rng = np.random.default_rng(12345678)
n = 2000
m1 = rng.normal(size=n)
m2 = rng.normal(scale=0.5, size=n)
xmin = m1.min()
xmax = m1.max()
ymin = m2.min()
ymax = m2.max()
X, Y = np.mgrid[xmin:xmax:200j, ymin:ymax:200j]
self.positions = np.vstack([X.ravel(), Y.ravel()])
values = np.vstack([m1, m2])
self.kernel = stats.gaussian_kde(values)
def time_gaussian_kde_evaluate_few_points(self):
# test gaussian_kde evaluate on a small number of points
self.kernel(self.positions[:, :10])
def time_gaussian_kde_evaluate_many_points(self):
# test gaussian_kde evaluate on many points
self.kernel(self.positions)
class GroupSampling(Benchmark):
param_names = ['dim']
params = [[3, 10, 50, 200]]
def setup(self, dim):
self.rng = np.random.default_rng(12345678)
def time_unitary_group(self, dim):
stats.unitary_group.rvs(dim, random_state=self.rng)
def time_ortho_group(self, dim):
stats.ortho_group.rvs(dim, random_state=self.rng)
def time_special_ortho_group(self, dim):
stats.special_ortho_group.rvs(dim, random_state=self.rng)
class BinnedStatisticDD(Benchmark):
params = ["count", "sum", "mean", "min", "max", "median", "std", np.std]
def setup(self, statistic):
rng = np.random.default_rng(12345678)
self.inp = rng.random(9999).reshape(3, 3333) * 200
self.subbin_x_edges = np.arange(0, 200, dtype=np.float32)
self.subbin_y_edges = np.arange(0, 200, dtype=np.float64)
self.ret = stats.binned_statistic_dd(
[self.inp[0], self.inp[1]], self.inp[2], statistic=statistic,
bins=[self.subbin_x_edges, self.subbin_y_edges])
def time_binned_statistic_dd(self, statistic):
stats.binned_statistic_dd(
[self.inp[0], self.inp[1]], self.inp[2], statistic=statistic,
bins=[self.subbin_x_edges, self.subbin_y_edges])
def time_binned_statistic_dd_reuse_bin(self, statistic):
stats.binned_statistic_dd(
[self.inp[0], self.inp[1]], self.inp[2], statistic=statistic,
binned_statistic_result=self.ret)
class ContinuousFitAnalyticalMLEOverride(Benchmark):
# list of distributions to time
dists = ["pareto", "laplace", "rayleigh",
"invgauss", "gumbel_r", "gumbel_l"]
# add custom values for rvs and fit, if desired, for any distribution:
# key should match name in dists and value should be list of loc, scale,
# and shapes
custom_input = {}
fnames = ['floc', 'fscale', 'f0', 'f1', 'f2']
fixed = {}
distcont = dict(distcont)
param_names = ["distribution", "loc_fixed", "scale_fixed",
"shape1_fixed", "shape2_fixed", "shape3_fixed"]
params = [dists, * [[True, False]] * 5]
def setup(self, dist_name, loc_fixed, scale_fixed, shape1_fixed,
shape2_fixed, shape3_fixed):
self.distn = eval("stats." + dist_name)
# default `loc` and `scale` are .834 and 4.342, and shapes are from
# `_distr_params.py`
default_shapes = self.distcont[dist_name]
param_values = self.custom_input.get(dist_name, [.834, 4.342,
*default_shapes])
# separate relevant and non-relevant parameters for this distribution
# based on the number of shapes
nparam = len(param_values)
all_parameters = [loc_fixed, scale_fixed, shape1_fixed, shape2_fixed,
shape3_fixed]
relevant_parameters = all_parameters[:nparam]
nonrelevant_parameters = all_parameters[nparam:]
# skip if all parameters are fixed or if non relevant parameters are
# not all false
if True in nonrelevant_parameters or False not in relevant_parameters:
raise NotImplementedError("skip non-relevant case")
# add fixed values if fixed in relevant_parameters to self.fixed
# with keys from self.fnames and values from parameter_values
self.fixed = dict(zip(compress(self.fnames, relevant_parameters),
compress(param_values, relevant_parameters)))
self.data = self.distn.rvs(*param_values, size=1000)
def time_fit(self, dist_name, loc_fixed, scale_fixed, shape1_fixed,
shape2_fixed, shape3_fixed):
self.distn.fit(self.data, **self.fixed)
class BenchMoment(Benchmark):
params = [
[1, 2, 3, 8],
[100, 1000, 10000],
]
param_names = ["order", "size"]
def setup(self, order, size):
np.random.random(1234)
self.x = np.random.random(size)
def time_moment(self, order, size):
stats.moment(self.x, order)
class BenchSkewKurtosis(Benchmark):
params = [
[1, 2, 3, 8],
[100, 1000, 10000],
[False, True]
]
param_names = ["order", "size", "bias"]
def setup(self, order, size, bias):
np.random.random(1234)
self.x = np.random.random(size)
def time_skew(self, order, size, bias):
stats.skew(self.x, bias=bias)
def time_kurtosis(self, order, size, bias):
stats.kurtosis(self.x, bias=bias)
class BenchQMCDiscrepancy(Benchmark):
param_names = ['method']
params = [
["CD", "WD", "MD", "L2-star",]
]
def setup(self, method):
rng = np.random.default_rng(1234)
sample = rng.random((1000, 10))
self.sample = sample
def time_discrepancy(self, method):
disc = stats.qmc.discrepancy(self.sample, method=method)
class BenchQMCHalton(Benchmark):
param_names = ['d', 'scramble', 'n']
params = [
[1, 10],
[True, False],
[10, 1_000, 100_000]
]
def setup(self, d, scramble, n):
self.rng = np.random.default_rng(1234)
def time_halton(self, d, scramble, n):
seq = stats.qmc.Halton(d, scramble=scramble, seed=self.rng)
seq.random(n)
class NumericalInverseHermite(Benchmark):
param_names = ['distribution']
params = [distcont]
def setup(self, *args):
self.rand = [np.random.normal(loc=i, size=1000) for i in range(3)]
def time_fni(self, distcase):
distname, shapes = distcase
slow_dists = {'ksone', 'kstwo', 'levy_stable', 'skewnorm'}
fail_dists = {'beta', 'gausshyper', 'geninvgauss', 'ncf', 'nct',
'norminvgauss', 'genhyperbolic', 'studentized_range'}
if distname in slow_dists or distname in fail_dists:
raise NotImplementedError("skipped")
dist = getattr(stats, distname)(*shapes)
with np.testing.suppress_warnings() as sup:
sup.filter(RuntimeWarning, "overflow encountered")
sup.filter(RuntimeWarning, "divide by zero")
sup.filter(RuntimeWarning, "invalid value encountered")
stats.NumericalInverseHermite(dist)
class DistanceFunctions(Benchmark):
param_names = ['n_size']
params = [
[10, 4000]
]
def setup(self, n_size):
rng = np.random.default_rng(12345678)
self.u_values = rng.random(n_size) * 10
self.u_weights = rng.random(n_size) * 10
self.v_values = rng.random(n_size // 2) * 10
self.v_weights = rng.random(n_size // 2) * 10
def time_energy_distance(self, n_size):
distance = stats.energy_distance(
self.u_values, self.v_values,
self.u_weights, self.v_weights)
def time_wasserstein_distance(self, n_size):
distance = stats.wasserstein_distance(
self.u_values, self.v_values,
self.u_weights, self.v_weights)
class Somersd(Benchmark):
param_names = ['n_size']
params = [
[10, 100]
]
def setup(self, n_size):
rng = np.random.default_rng(12345678)
self.x = rng.choice(n_size, size=n_size)
self.y = rng.choice(n_size, size=n_size)
def time_somersd(self, n_size):
res = stats.somersd(self.x, self.y)
| 34.77102 | 98 | 0.609405 |
acf8059f4c43978b532556e084917d77f6241fc7 | 6,187 | py | Python | docarray/array/mixins/traverse.py | sugatoray/docarray | e62c1ad045ea7236912c702aebe87c3a25db110d | [
"Apache-2.0"
] | null | null | null | docarray/array/mixins/traverse.py | sugatoray/docarray | e62c1ad045ea7236912c702aebe87c3a25db110d | [
"Apache-2.0"
] | 1 | 2022-01-11T00:59:52.000Z | 2022-01-11T00:59:52.000Z | docarray/array/mixins/traverse.py | sugatoray/docarray | e62c1ad045ea7236912c702aebe87c3a25db110d | [
"Apache-2.0"
] | null | null | null | import itertools
import re
from typing import (
Iterable,
TYPE_CHECKING,
Optional,
Callable,
Tuple,
)
if TYPE_CHECKING:
from ... import DocumentArray, Document
from ...types import T
class TraverseMixin:
"""
A mixin used for traversing :class:`DocumentArray`.
"""
def traverse(
self: 'T',
traversal_paths: str,
filter_fn: Optional[Callable[['Document'], bool]] = None,
) -> Iterable['T']:
"""
Return an Iterator of :class:``TraversableSequence`` of the leaves when applying the traversal_paths.
Each :class:``TraversableSequence`` is either the root Documents, a ChunkArray or a MatchArray.
:param traversal_paths: a comma-separated string that represents the traversal path
:param filter_fn: function to filter docs during traversal
:yield: :class:``TraversableSequence`` of the leaves when applying the traversal_paths.
Example on ``traversal_paths``:
- `r`: docs in this TraversableSequence
- `m`: all match-documents at adjacency 1
- `c`: all child-documents at granularity 1
- `cc`: all child-documents at granularity 2
- `mm`: all match-documents at adjacency 2
- `cm`: all match-document at adjacency 1 and granularity 1
- `r,c`: docs in this TraversableSequence and all child-documents at granularity 1
"""
for p in traversal_paths.split(','):
yield from self._traverse(self, p, filter_fn=filter_fn)
@staticmethod
def _traverse(
docs: 'T',
path: str,
filter_fn: Optional[Callable[['Document'], bool]] = None,
):
path = re.sub(r'\s+', '', path)
if path:
cur_loc, cur_slice, _left = _parse_path_string(path)
if cur_loc == 'r':
yield from TraverseMixin._traverse(
docs[cur_slice], _left, filter_fn=filter_fn
)
elif cur_loc == 'm':
for d in docs:
yield from TraverseMixin._traverse(
d.matches[cur_slice], _left, filter_fn=filter_fn
)
elif cur_loc == 'c':
for d in docs:
yield from TraverseMixin._traverse(
d.chunks[cur_slice], _left, filter_fn=filter_fn
)
else:
raise ValueError(
f'`path`:{path} is invalid, please refer to https://docarray.jina.ai/fundamentals/documentarray/access-elements/#index-by-nested-structure'
)
elif filter_fn is None:
yield docs
else:
from .. import DocumentArray
yield DocumentArray(list(filter(filter_fn, docs)))
def traverse_flat_per_path(
self,
traversal_paths: str,
filter_fn: Optional[Callable[['Document'], bool]] = None,
):
"""
Returns a flattened :class:``TraversableSequence`` per path in ``traversal_paths``
with all Documents, that are reached by the path.
:param traversal_paths: a comma-separated string that represents the traversal path
:param filter_fn: function to filter docs during traversal
:yield: :class:``TraversableSequence`` containing the document of all leaves per path.
"""
for p in traversal_paths.split(','):
yield self._flatten(self._traverse(self, p, filter_fn=filter_fn))
def traverse_flat(
self,
traversal_paths: str,
filter_fn: Optional[Callable[['Document'], bool]] = None,
) -> 'DocumentArray':
"""
Returns a single flattened :class:``TraversableSequence`` with all Documents, that are reached
via the ``traversal_paths``.
.. warning::
When defining the ``traversal_paths`` with multiple paths, the returned
:class:``Documents`` are determined at once and not on the fly. This is a different
behavior then in :method:``traverse`` and :method:``traverse_flattened_per_path``!
:param traversal_paths: a list of string that represents the traversal path
:param filter_fn: function to filter docs during traversal
:return: a single :class:``TraversableSequence`` containing the document of all leaves when applying the traversal_paths.
"""
if traversal_paths == 'r' and filter_fn is None:
return self
leaves = self.traverse(traversal_paths, filter_fn=filter_fn)
return self._flatten(leaves)
def flatten(self) -> 'DocumentArray':
"""Flatten all nested chunks and matches into one :class:`DocumentArray`.
.. note::
Flatten an already flattened DocumentArray will have no effect.
:return: a flattened :class:`DocumentArray` object.
"""
from .. import DocumentArray
def _yield_all():
for d in self:
yield from _yield_nest(d)
def _yield_nest(doc: 'Document'):
for d in doc.chunks:
yield from _yield_nest(d)
for m in doc.matches:
yield from _yield_nest(m)
doc.matches.clear()
doc.chunks.clear()
yield doc
return DocumentArray(_yield_all())
@staticmethod
def _flatten(sequence) -> 'DocumentArray':
from ... import DocumentArray
return DocumentArray(list(itertools.chain.from_iterable(sequence)))
def _parse_path_string(p: str) -> Tuple[str, slice, str]:
g = re.match(r'^([rcm])([-\d:]+)?([rcm].*)?$', p)
_this = g.group(1)
slice_str = g.group(2)
_next = g.group(3)
return _this, _parse_slice(slice_str or ':'), _next or ''
def _parse_slice(value):
"""
Parses a `slice()` from string, like `start:stop:step`.
"""
if value:
parts = value.split(':')
if len(parts) == 1:
# slice(stop)
parts = [None, parts[0]]
# else: slice(start, stop[, step])
else:
# slice()
parts = []
return slice(*[int(p) if p else None for p in parts])
| 34.758427 | 159 | 0.592209 |
acf805e88fa491bc1c06076bee9d204bb7a8f930 | 397 | py | Python | book_store/book_store/asgi.py | goulartdev/udemy-django-course | f39ab36aacdf47d19f8c286c317cc54d8358dadc | [
"MIT"
] | null | null | null | book_store/book_store/asgi.py | goulartdev/udemy-django-course | f39ab36aacdf47d19f8c286c317cc54d8358dadc | [
"MIT"
] | null | null | null | book_store/book_store/asgi.py | goulartdev/udemy-django-course | f39ab36aacdf47d19f8c286c317cc54d8358dadc | [
"MIT"
] | null | null | null | """
ASGI config for book_store project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'book_store.settings')
application = get_asgi_application()
| 23.352941 | 78 | 0.788413 |
acf8063cedbed0089972130387d3ebb868f04623 | 7,061 | py | Python | tests/func/test_api.py | codingsett/dvc | b8f2894bc083234532d4b0b7b2791c5eed667553 | [
"Apache-2.0"
] | null | null | null | tests/func/test_api.py | codingsett/dvc | b8f2894bc083234532d4b0b7b2791c5eed667553 | [
"Apache-2.0"
] | null | null | null | tests/func/test_api.py | codingsett/dvc | b8f2894bc083234532d4b0b7b2791c5eed667553 | [
"Apache-2.0"
] | null | null | null | import os
import pytest
from dvc import api
from dvc.exceptions import FileMissingError, OutputNotFoundError
from dvc.path_info import URLInfo
from dvc.utils.fs import remove
from tests.unit.tree.test_repo import make_subrepo
cloud_names = [
"s3",
"gs",
"azure",
"gdrive",
"oss",
"ssh",
"hdfs",
"http",
]
clouds = [pytest.lazy_fixture(cloud) for cloud in cloud_names]
all_clouds = [pytest.lazy_fixture("local_cloud")] + clouds
# `lazy_fixture` is confusing pylint, pylint: disable=unused-argument
@pytest.mark.parametrize("remote", clouds, indirect=True)
def test_get_url(tmp_dir, dvc, remote):
tmp_dir.dvc_gen("foo", "foo")
expected_url = URLInfo(remote.url) / "ac/bd18db4cc2f85cedef654fccc4a4d8"
assert api.get_url("foo") == expected_url
@pytest.mark.parametrize("cloud", clouds)
def test_get_url_external(erepo_dir, cloud):
erepo_dir.add_remote(config=cloud.config)
with erepo_dir.chdir():
erepo_dir.dvc_gen("foo", "foo", commit="add foo")
# Using file url to force clone to tmp repo
repo_url = f"file://{erepo_dir}"
expected_url = URLInfo(cloud.url) / "ac/bd18db4cc2f85cedef654fccc4a4d8"
assert api.get_url("foo", repo=repo_url) == expected_url
def test_get_url_requires_dvc(tmp_dir, scm):
tmp_dir.scm_gen({"foo": "foo"}, commit="initial")
with pytest.raises(OutputNotFoundError, match="output 'foo'"):
api.get_url("foo", repo=os.fspath(tmp_dir))
with pytest.raises(OutputNotFoundError, match="output 'foo'"):
api.get_url("foo", repo=f"file://{tmp_dir}")
@pytest.mark.parametrize("remote", all_clouds, indirect=True)
def test_open(tmp_dir, dvc, remote):
tmp_dir.dvc_gen("foo", "foo-text")
dvc.push()
# Remove cache to force download
remove(dvc.cache.local.cache_dir)
with api.open("foo") as fd:
assert fd.read() == "foo-text"
@pytest.mark.parametrize(
"cloud",
[
pytest.lazy_fixture(cloud)
for cloud in [
"real_s3", # NOTE: moto's s3 fails in some tests
"gs",
"azure",
"gdrive",
"oss",
"ssh",
"hdfs",
"http",
]
],
)
def test_open_external(erepo_dir, cloud):
erepo_dir.add_remote(config=cloud.config)
with erepo_dir.chdir():
erepo_dir.dvc_gen("version", "master", commit="add version")
with erepo_dir.branch("branch", new="True"):
# NOTE: need file to be other size for Mac
erepo_dir.dvc_gen("version", "branchver", commit="add version")
erepo_dir.dvc.push(all_branches=True)
# Remove cache to force download
remove(erepo_dir.dvc.cache.local.cache_dir)
# Using file url to force clone to tmp repo
repo_url = f"file://{erepo_dir}"
with api.open("version", repo=repo_url) as fd:
assert fd.read() == "master"
assert api.read("version", repo=repo_url, rev="branch") == "branchver"
@pytest.mark.parametrize("remote", all_clouds, indirect=True)
def test_open_granular(tmp_dir, dvc, remote):
tmp_dir.dvc_gen({"dir": {"foo": "foo-text"}})
dvc.push()
# Remove cache to force download
remove(dvc.cache.local.cache_dir)
with api.open("dir/foo") as fd:
assert fd.read() == "foo-text"
@pytest.mark.parametrize(
"remote",
[
pytest.lazy_fixture(cloud)
for cloud in [
"real_s3", # NOTE: moto's s3 fails in some tests
"gs",
"azure",
"gdrive",
"oss",
"ssh",
"hdfs",
"http",
]
],
indirect=True,
)
def test_missing(tmp_dir, dvc, remote):
tmp_dir.dvc_gen("foo", "foo")
# Remove cache to make foo missing
remove(dvc.cache.local.cache_dir)
api.read("foo")
remove("foo")
with pytest.raises(FileMissingError):
api.read("foo")
def test_open_scm_controlled(tmp_dir, erepo_dir):
erepo_dir.scm_gen({"scm_controlled": "file content"}, commit="create file")
with api.open("scm_controlled", repo=os.fspath(erepo_dir)) as fd:
assert fd.read() == "file content"
def test_open_not_cached(dvc):
metric_file = "metric.txt"
metric_content = "0.6"
metric_code = "open('{}', 'w').write('{}')".format(
metric_file, metric_content
)
dvc.run(
single_stage=True,
metrics_no_cache=[metric_file],
cmd=(f'python -c "{metric_code}"'),
)
with api.open(metric_file) as fd:
assert fd.read() == metric_content
os.remove(metric_file)
with pytest.raises(FileMissingError):
api.read(metric_file)
@pytest.mark.parametrize("local_repo", [False, True])
def test_read_with_subrepos(tmp_dir, scm, local_cloud, local_repo):
tmp_dir.scm_gen("foo.txt", "foo.txt", commit="add foo.txt")
subrepo = tmp_dir / "dir" / "subrepo"
make_subrepo(subrepo, scm, config=local_cloud.config)
with subrepo.chdir():
subrepo.scm_gen({"lorem": "lorem"}, commit="add lorem")
subrepo.dvc_gen({"dir": {"file.txt": "file.txt"}}, commit="add dir")
subrepo.dvc_gen("dvc-file", "dvc-file", commit="add dir")
subrepo.dvc.push()
repo_path = None if local_repo else f"file:///{tmp_dir}"
subrepo_path = os.path.join("dir", "subrepo")
assert api.read("foo.txt", repo=repo_path) == "foo.txt"
assert (
api.read(os.path.join(subrepo_path, "lorem"), repo=repo_path)
== "lorem"
)
assert (
api.read(os.path.join(subrepo_path, "dvc-file"), repo=repo_path)
== "dvc-file"
)
assert (
api.read(os.path.join(subrepo_path, "dir", "file.txt"), repo=repo_path)
== "file.txt"
)
def test_get_url_granular(tmp_dir, dvc, s3):
tmp_dir.add_remote(config=s3.config)
tmp_dir.dvc_gen(
{"dir": {"foo": "foo", "bar": "bar", "nested": {"file": "file"}}}
)
expected_url = URLInfo(s3.url) / "5f/c28ea78987408341668eba6525ebd1.dir"
assert api.get_url("dir") == expected_url
expected_url = URLInfo(s3.url) / "ac/bd18db4cc2f85cedef654fccc4a4d8"
assert api.get_url("dir/foo") == expected_url
expected_url = URLInfo(s3.url) / "37/b51d194a7513e45b56f6524f2d51f2"
assert api.get_url("dir/bar") == expected_url
expected_url = URLInfo(s3.url) / "8c/7dd922ad47494fc02c388e12c00eac"
assert api.get_url(os.path.join("dir", "nested", "file")) == expected_url
def test_get_url_subrepos(tmp_dir, scm, local_cloud):
subrepo = tmp_dir / "subrepo"
make_subrepo(subrepo, scm, config=local_cloud.config)
with subrepo.chdir():
subrepo.dvc_gen(
{"dir": {"foo": "foo"}, "bar": "bar"}, commit="add files"
)
subrepo.dvc.push()
path = os.path.relpath(local_cloud.config["url"])
expected_url = os.path.join(path, "ac", "bd18db4cc2f85cedef654fccc4a4d8")
assert api.get_url(os.path.join("subrepo", "dir", "foo")) == expected_url
expected_url = os.path.join(path, "37", "b51d194a7513e45b56f6524f2d51f2")
assert api.get_url("subrepo/bar") == expected_url
| 29.177686 | 79 | 0.636171 |
acf80798b3f54038f46aceba7440525ab1041856 | 524 | py | Python | cloud.py | bosstb/YGY60W | 7d8f1848c4c43ffad546ab2bec55084ba81e24fd | [
"MIT"
] | null | null | null | cloud.py | bosstb/YGY60W | 7d8f1848c4c43ffad546ab2bec55084ba81e24fd | [
"MIT"
] | null | null | null | cloud.py | bosstb/YGY60W | 7d8f1848c4c43ffad546ab2bec55084ba81e24fd | [
"MIT"
] | null | null | null | # coding: utf-8
from leancloud import Engine
from leancloud import LeanEngineError
import leancloud
from app import app
import requests
engine = Engine(app)
class AndroidId(leancloud.Object):
pass
@engine.define
def Hello(**params):
print params
return 'androidId missing'
@engine.before_save('Todo')
def before_todo_save(todo):
content = todo.get('content')
if not content:
raise LeanEngineError('内容不能为空')
if len(content) >= 240:
todo.set('content', content[:240] + ' ...')
| 17.466667 | 51 | 0.692748 |
acf807b28d375121d16ddd3961d5b8ca87dcf6fa | 2,846 | py | Python | Algo_Ds_Notes-master/Algo_Ds_Notes-master/Expression_Tree/Expression_Tree.py | rajatenzyme/Coding-Journey- | 65a0570153b7e3393d78352e78fb2111223049f3 | [
"MIT"
] | null | null | null | Algo_Ds_Notes-master/Algo_Ds_Notes-master/Expression_Tree/Expression_Tree.py | rajatenzyme/Coding-Journey- | 65a0570153b7e3393d78352e78fb2111223049f3 | [
"MIT"
] | null | null | null | Algo_Ds_Notes-master/Algo_Ds_Notes-master/Expression_Tree/Expression_Tree.py | rajatenzyme/Coding-Journey- | 65a0570153b7e3393d78352e78fb2111223049f3 | [
"MIT"
] | null | null | null | '''
One other way to represent a mathematical equation is Expression Tree. It is a binary
tree in which every parent node corresponds to the operator and the leaf nodes correspond
to operands.
Expression Tree of a + b is :
+
/ \
a b
Preorder traversal of expression tree will give us prefix to of the expression and inorder
traversal will results to infix expression.
'''
# stack class
class stack:
def __init__(self):
self.arr = []
def push(self, data):
self.arr.append(data)
def pop(self):
try:
return self.arr.pop(-1)
except:
pass
def top(self):
try:
return self.arr[-1]
except:
pass
def size(self):
return len(self.arr)
# node class for expression tree
class node:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
# expression tree class
class exp_tree:
def __init__(self, postfix_exp):
self.exp = postfix_exp
self.root = None
self.createTree(self.exp)
def isOperator(self, char):
optr = ['+', '-', '*', '/', '^']
if char in optr: # if given char is operator
return True # then return true
return False # else return false
def createTree(self, exp):
s = stack() # store those operator node whose any child node is NULL
self.root = node(exp[-1])
# last character of postfix expression is always an operator
s.push(self.root)
# travel on rest of the postfix expression
for i in "".join(reversed(exp[:-1])):
curr_node = s.top()
if not curr_node.right: # if right node of current node is NULL
temp = node(i)
curr_node.right = temp
if self.isOperator(i):
s.push(temp)
else: # if left node of current node is NULL
temp = node(i)
curr_node.left = temp
# if no child node of current node is NULL
s.pop() # pop current from stack
if self.isOperator(i):
s.push(temp)
def inorder(self, head): # inorder traversal of expression tree
# inorder traversal => left,root,right
if head.left:
self.inorder(head.left)
print(head.data, end=" ")
if head.right:
self.inorder(head.right)
def infixExp(self): # inorder traversal of expression tree give infix expression
self.inorder(self.root)
print()
if __name__ == "__main__":
postfixExp = input()
et = exp_tree(postfixExp)
et.infixExp()
'''
sample input : 395+2*+
sample output : 3 + 9 + 5 * 2
'''
'''
This code is contributed by raghav
https://github.com/raghav-dalmia
'''
| 27.104762 | 90 | 0.573085 |
acf8080aaca5fd36e7b4496bc89e8420b11e4e76 | 180 | py | Python | Section10_Facade/Practice/Verifier.py | enriqueescobar-askida/Kinito.Python | e4c5521e771c4de0ceaf81776a4a61f7de01edb4 | [
"MIT"
] | 1 | 2020-10-20T07:41:51.000Z | 2020-10-20T07:41:51.000Z | Section10_Facade/Practice/Verifier.py | enriqueescobar-askida/Kinito.Python | e4c5521e771c4de0ceaf81776a4a61f7de01edb4 | [
"MIT"
] | null | null | null | Section10_Facade/Practice/Verifier.py | enriqueescobar-askida/Kinito.Python | e4c5521e771c4de0ceaf81776a4a61f7de01edb4 | [
"MIT"
] | null | null | null | class Verifier:
def verify(self, arrays):
first = sum(arrays[0])
for i in range(1, len(arrays)):
if sum(arrays[i]) != first:
return False
return True
| 18 | 35 | 0.588889 |
acf8095ba388f3c035a73c513f873c5cc5e69819 | 1,853 | py | Python | examples/upload_battery_data_csv/main.py | ErikBjare/pyzenobase | eb0572c7441a350bf5578bc5287f3be53d32ea19 | [
"MIT"
] | 3 | 2016-03-28T21:05:05.000Z | 2017-09-13T17:33:08.000Z | examples/upload_battery_data_csv/main.py | ErikBjare/pyzenobase | eb0572c7441a350bf5578bc5287f3be53d32ea19 | [
"MIT"
] | null | null | null | examples/upload_battery_data_csv/main.py | ErikBjare/pyzenobase | eb0572c7441a350bf5578bc5287f3be53d32ea19 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import csv
import argparse
from datetime import datetime
import pyzenobase
BUCKET_NAME = "Battery - PyZenobase"
BUCKET_DESC = "Uploaded using PyZenobase (http://github.com/ErikBjare/PyZenobase)"
def upload_batterystats(filename, username, password, bucket_name=BUCKET_NAME, bucket_desc=BUCKET_DESC):
"""
Works with CSVs generated by the Android app 'Battery Log' (https://play.google.com/store/apps/details?id=kr.hwangti.batterylog)
"""
zapi = pyzenobase.ZenobaseAPI(username, password)
bucket = zapi.create_or_get_bucket(bucket_name, description=bucket_desc)
bucket_id = bucket["@id"]
events = []
with open(filename, newline="") as f:
reader = csv.reader(f)
header = next(reader)
for row in reader:
events.append({header[i]: row[i] for i in range(len(header))})
print("Read {} events".format(len(events)))
for event in events:
# Transform data
event["timestamp"] = pyzenobase.fmt_datetime(datetime.strptime(event.pop("datetime"), "%Y-%m-%d %H:%M:%S"), timezone="Europe/Stockholm")
event["tag"] = event.pop("status")
event["percentage"] = float(event.pop("level"))
event["temperature"] = {"@value": float(event.pop("temperature")), "unit": "C"}
event.pop("voltage")
print("Checking that events are valid...")
events = [pyzenobase.ZenobaseEvent(event) for event in events]
print("Uploading...")
zapi.create_events(bucket_id, events)
zapi.close()
print("Done!")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Upload batterystats')
parser.add_argument("filename")
parser.add_argument("username")
parser.add_argument("password")
args = parser.parse_args()
upload_batterystats(args.filename, args.username, args.password)
| 34.962264 | 144 | 0.67728 |
acf809e70f4af7ba2b26aae0719b344051371445 | 3,512 | py | Python | tests/spark/functions/test_clean_names_spark.py | Clayton-Springer/pyjanitor | 602a7e9bf573b46e4b43b66d9855042585c93ec9 | [
"MIT"
] | null | null | null | tests/spark/functions/test_clean_names_spark.py | Clayton-Springer/pyjanitor | 602a7e9bf573b46e4b43b66d9855042585c93ec9 | [
"MIT"
] | null | null | null | tests/spark/functions/test_clean_names_spark.py | Clayton-Springer/pyjanitor | 602a7e9bf573b46e4b43b66d9855042585c93ec9 | [
"MIT"
] | null | null | null | import pytest
from janitor.errors import JanitorError
try:
import pyspark
from pyspark.sql import SparkSession
import janitor.spark
except ImportError:
pyspark = None
@pytest.mark.spark_functions
@pytest.mark.skipif(
pyspark is None, reason="pyspark tests only required for CI"
)
def test_clean_names_method_chain(spark_df):
spark_df = spark_df.clean_names()
expected_columns = [
"a",
"bell_chart",
"decorated_elephant",
"animals@#$%^",
"cities",
]
assert set(spark_df.columns) == set(expected_columns)
@pytest.mark.spark_functions
@pytest.mark.skipif(
pyspark is None, reason="pyspark tests only required for CI"
)
def test_clean_names_special_characters(spark_df):
spark_df = spark_df.clean_names(remove_special=True)
expected_columns = [
"a",
"bell_chart",
"decorated_elephant",
"animals",
"cities",
]
assert set(spark_df.columns) == set(expected_columns)
@pytest.mark.spark_functions
@pytest.mark.skipif(
pyspark is None, reason="pyspark tests only required for CI"
)
def test_clean_names_case_type_uppercase(spark_df):
spark_df = spark_df.clean_names(case_type="upper")
expected_columns = [
"A",
"BELL_CHART",
"DECORATED_ELEPHANT",
"ANIMALS@#$%^",
"CITIES",
]
assert set(spark_df.columns) == set(expected_columns)
@pytest.mark.spark_functions
@pytest.mark.skipif(
pyspark is None, reason="pyspark tests only required for CI"
)
def test_clean_names_case_type_preserve(spark_df):
spark_df = spark_df.clean_names(case_type="preserve")
expected_columns = [
"a",
"Bell_Chart",
"decorated_elephant",
"animals@#$%^",
"cities",
]
assert set(spark_df.columns) == set(expected_columns)
@pytest.mark.spark_functions
@pytest.mark.skipif(
pyspark is None, reason="pyspark tests only required for CI"
)
def test_clean_names_case_type_invalid(spark_df):
with pytest.raises(JanitorError, match=r"case_type must be one of:"):
spark_df = spark_df.clean_names(case_type="foo")
@pytest.mark.spark_functions
@pytest.mark.skipif(
pyspark is None, reason="pyspark tests only required for CI"
)
def test_clean_names_camelcase_to_snake(spark_df):
spark_df = spark_df.selectExpr("a AS AColumnName").clean_names(
case_type="snake"
)
assert list(spark_df.columns) == ["a_column_name"]
@pytest.mark.spark_functions
@pytest.mark.skipif(
pyspark is None, reason="pyspark tests only required for CI"
)
@pytest.mark.parametrize(
"strip_underscores", ["both", True, "right", "r", "left", "l"]
)
def test_clean_names_strip_underscores(spark_df, strip_underscores):
if strip_underscores in ["right", "r"]:
spark_df = spark_df.selectExpr(
*[f"`{col}` AS `{col}_`" for col in spark_df.columns]
)
elif strip_underscores in ["left", "l"]:
spark_df = spark_df.selectExpr(
*[f"`{col}` AS `_{col}`" for col in spark_df.columns]
)
elif strip_underscores in ["both", True]:
spark_df = spark_df.selectExpr(
*[f"`{col}` AS `_{col}_`" for col in spark_df.columns]
)
spark_df = spark_df.clean_names(strip_underscores=strip_underscores)
expected_columns = [
"a",
"bell_chart",
"decorated_elephant",
"animals@#$%^",
"cities",
]
assert set(spark_df.columns) == set(expected_columns)
| 27.015385 | 73 | 0.665148 |
acf80a3b0687bf917eb81f6b77f129433231e448 | 10,489 | py | Python | algorithms/Probablistics Planners/rrt_family_algorithms.py | karanchawla/motion-planning-playground | a14d397ff88120bd8534a25376cfa07032bb03fc | [
"MIT"
] | 10 | 2018-06-30T20:00:10.000Z | 2021-12-21T18:53:34.000Z | algorithms/Probablistics Planners/rrt_family_algorithms.py | karanchawla/motion-planning-playground | a14d397ff88120bd8534a25376cfa07032bb03fc | [
"MIT"
] | null | null | null | algorithms/Probablistics Planners/rrt_family_algorithms.py | karanchawla/motion-planning-playground | a14d397ff88120bd8534a25376cfa07032bb03fc | [
"MIT"
] | 7 | 2018-07-11T03:08:44.000Z | 2021-03-14T22:03:41.000Z | import random
import numpy as np
import math
import copy
import matplotlib.pyplot as plt
show_animation = True
class RRTFamilyPlanners():
def __init__(self, start, goal, obstacleList, randArea, expandDis=0.5, goalSampleRate=10, maxIter=200):
self.start = Node(start[0], start[1])
self.goal = Node(goal[0], goal[1])
self.minrand = randArea[0]
self.maxrand = randArea[1]
self.expandDis = expandDis
self.goalSampleRate = goalSampleRate
self.maxIter = maxIter
self.obstacleList = obstacleList
##################################################################################
def RRTSearch(self, animation=True):
self.nodeList = [self.start]
while True:
# get random point in the free space
rnd = self.sampleFreeSpace()
# find closest node in the tree
nind = self.getNearestListIndex(self.nodeList, rnd)
nearestNode = self.nodeList[nind]
theta = math.atan2(rnd[1] - nearestNode.y, rnd[0] - nearestNode.x)
# compute the position of the new node
newNode = self.getNewNode(theta, nind, nearestNode)
# collision check
if not self.__CollisionCheck(newNode, self.obstacleList):
continue
# if collision doesn't happen in extending the nearest node to the new node
# add it to the tree
self.nodeList.append(newNode)
#check if we reached the goal
if self.isNearGoal(newNode):
break
if animation:
self.drawGraph(rnd)
# compute the path
lastIndex = len(self.nodeList) -1
path = self.getFinalCourse(lastIndex)
return path
def sampleFreeSpace(self):
if random.randint(0,100) > self.goalSampleRate:
rnd = [random.uniform(self.minrand, self.maxrand),
random.uniform(self.minrand, self.maxrand)]
else:
rnd = [self.goal.x, self.goal.y]
return rnd
def getNearestListIndex(self, nodes, rnd):
dList = [(node.x - rnd[0])**2 +
(node.y - rnd[1])**2 for node in nodes]
minIndex = dList.index(min(dList))
return minIndex
def getNewNode(self, theta, nind, nearestNode):
newNode = copy.deepcopy(nearestNode)
newNode.x += self.expandDis * math.cos(theta)
newNode.y += self.expandDis * math.sin(theta)
newNode.cost += self.expandDis
newNode.parent = nind
return newNode
def __CollisionCheck(self, newNode, obstacleList):
for (ox, oy, size) in obstacleList:
dx = ox - newNode.x
dy = oy - newNode.y
d = dx * dx + dy * dy
if d <= 1.1 * size**2:
return False #collision
return True # safe
def isNearGoal(self, node):
d = self.lineCost(node, self.goal)
if d < self.expandDis:
return True
return False
##################################################################################
def RRTStarSearch(self, animation=True):
self.nodeList = [self.start]
iter = 1
while True:
rnd = self.sampleFreeSpace()
nind = self.getNearestListIndex(self.nodeList, rnd)
nearestNode = self.nodeList[nind]
# steer
theta = math.atan2(rnd[1] - nearestNode.y, rnd[0] - nearestNode.x)
newNode = self.getNewNode(theta, nind, nearestNode)
if self.__CollisionCheck(newNode, self.obstacleList):
nearinds = self.findNearNodes(newNode)
newNode = self.chooseParent(newNode, nearinds)
self.nodeList.append(newNode)
self.rewire(newNode, nearinds)
iter += 1
if(iter == self.maxIter):
break
if animation:
self.drawGraph(rnd)
if self.isNearGoal(newNode):
break
# get path
lastIndex = len(self.nodeList) -1
path = self.getFinalCourse(lastIndex)
return path
def rewire(self, newNode, nearInds):
nnode = len(self.nodeList)
for i in nearInds:
nearNode = self.nodeList[i]
d = math.sqrt((nearNode.x - newNode.x)**2 +
(nearNode.y - newNode.y)**2)
scost = newNode.cost + d
if nearNode.cost > scost:
theta = math.atan2(newNode.y - nearNode.y ,
newNode.x - nearNode.x)
if self.check_collision_extend(nearNode, theta, d):
nearNode.parent = nnode - 1
nearNode.cost = scost
def check_collision_extend(self, nearNode, theta, d):
tmpNode = copy.deepcopy(nearNode)
for i in range(int(d / self.expandDis)):
tmpNode.x += self.expandDis * math.cos(theta)
tmpNode.y += self.expandDis * math.sin(theta)
if not self.__CollisionCheck(tmpNode, self.obstacleList):
return False
return True
def findNearNodes(self, newNode):
nnode = len(self.nodeList)
r = 50.0 * math.sqrt((math.log(nnode) / nnode))
dlist = [(node.x - newNode.x) ** 2 +
(node.y - newNode.y) ** 2 for node in self.nodeList]
nearinds = [dlist.index(i) for i in dlist if i <= r ** 2]
return nearinds
def chooseParent(self, newNode, nearInds):
if len(nearInds) == 0:
return newNode
dList = []
for i in nearInds:
dx = newNode.x - self.nodeList[i].x
dy = newNode.y - self.nodeList[i].y
d = math.sqrt(dx ** 2 + dy ** 2)
theta = math.atan2(dy, dx)
if self.check_collision_extend(self.nodeList[i], theta, d):
dList.append(self.nodeList[i].cost + d)
else:
dList.append(float('inf'))
minCost = min(dList)
minInd = nearInds[dList.index(minCost)]
if minCost == float('inf'):
print("mincost is inf")
return newNode
newNode.cost = minCost
newNode.parent = minInd
return newNode
def getFinalCourse(self, lastIndex):
path = [[self.goal.x, self.goal.y]]
while self.nodeList[lastIndex].parent is not None:
node = self.nodeList[lastIndex]
path.append([node.x, node.y])
lastIndex = node.parent
path.append([self.start.x, self.start.y])
return path
def getBestLastIndex(self):
disgList = [self.calcDistToGoal(node.x, node.y)
for node in self.nodeList]
goalInds = [disgList.index(i) for i in disgList if i <= self.expandDis]
if len(goalInds) == 0:
return None
minCost = min([self.nodeList[i].cost for i in goalInds])
for i in goalInds:
if self.nodeList[i].cost == minCost:
return i
return None
def calcDistToGoal(self, x, y):
return np.linalg.norm([x - self.goal.x, y - self.goal.y])
##################################################################################
def InformedRRTStarSearch(self, animation=True):
self.nodeList = [self.start]
# max length we expect to find in our 'informed' sample space, starts as infinite
cBest = float('inf')
pathLen = float('inf')
treeSize = 0
pathSize = 0
solutionSet = set()
path = None
# Computing the sampling space
cMin = math.sqrt(pow(self.start.x - self.goal.x, 2) + pow(self.start.y - self.goal.y, 2))
xCenter = np.matrix([[(self.start.x + self.goal.x) / 2.0], [(self.start.y + self.goal.y) / 2.0], [0]])
a1 = np.matrix([[(self.goal.x - self.start.x) / cMin], [(self.goal.y - self.start.y) / cMin], [0]])
id1_t = np.matrix([1.0, 0.0, 0.0]) # first column of idenity matrix transposed
M = np.dot(a1 , id1_t)
U, S, Vh = np.linalg.svd(M, 1, 1)
C = np.dot(np.dot(U, np.diag([1.0, 1.0, np.linalg.det(U) * np.linalg.det(np.transpose(Vh))])), Vh)
for i in range(self.maxIter):
# Sample space is defined by cBest
# cMin is the minimum distance between the start point and the goal
# xCenter is the midpoint between the start and the goal
# cBest changes when a new path is found
rnd = self.sample(cBest, cMin, xCenter, C)
nind = self.getNearestListIndex(self.nodeList, rnd)
nearestNode = self.nodeList[nind]
# steer
theta = math.atan2(rnd[1] - nearestNode.y, rnd[0] - nearestNode.x)
newNode = self.getNewNode(theta, nind, nearestNode)
d = self.lineCost(nearestNode, newNode)
if self.__CollisionCheck(newNode, self.obstacleList) and self.check_collision_extend(nearestNode, theta, d):
nearInds = self.findNearNodes(newNode)
newNode = self.chooseParent(newNode, nearInds)
self.nodeList.append(newNode)
self.rewire(newNode, nearInds)
if self.isNearGoal(newNode):
solutionSet.add(newNode)
lastIndex = len(self.nodeList) -1
tempPath = self.getFinalCourse(lastIndex)
tempPathLen = self.getPathLen(tempPath)
if tempPathLen < pathLen:
path = tempPath
cBest = tempPathLen
if animation:
self.drawGraph(rnd)
return path
def sample(self, cMax, cMin, xCenter, C):
if cMax < float('inf'):
r = [cMax /2.0, math.sqrt(cMax**2 - cMin**2)/2.0,
math.sqrt(cMax**2 - cMin**2)/2.0]
L = np.diag(r)
xBall = self.sampleUnitBall()
rnd = np.dot(np.dot(C, L), xBall) + xCenter
rnd = [rnd[(0,0)], rnd[(1,0)]]
else:
rnd = self.sampleFreeSpace()
return rnd
def sampleUnitBall(self):
a = random.random()
b = random.random()
if b < a:
a, b = b, a
sample = (b * math.cos(2 * math.pi * a / b),
b * math.sin(2 * math.pi * a / b))
return np.array([[sample[0]], [sample[1]], [0]])
def getPathLen(self, path):
pathLen = 0
for i in range(1, len(path)):
node1_x = path[i][0]
node1_y = path[i][1]
node2_x = path[i-1][0]
node2_y = path[i-1][1]
pathLen += math.sqrt((node1_x - node2_x)**2 + (node1_y - node2_y)**2)
return pathLen
def lineCost(self, node1, node2):
return math.sqrt((node1.x - node2.x)**2 + (node1.y - node2.y)**2)
##################################################################################
def drawGraph(self, rnd=None):
plt.clf()
if rnd is not None:
plt.plot(rnd[0], rnd[1], "^k")
for node in self.nodeList:
if node.parent is not None:
if node.x or node.y is not None:
plt.plot([node.x, self.nodeList[node.parent].x], [
node.y, self.nodeList[node.parent].y], "-g")
for (ox, oy, size) in self.obstacleList:
plt.plot(ox, oy, "ok", ms = 30 * size)
plt.plot(self.start.x, self.start.y, "xr")
plt.plot(self.goal.x, self.goal.y, "xr")
plt.axis([-2, 15, -2, 15])
plt.grid(True)
plt.pause(0.01)
class Node():
def __init__(self, x, y):
self.x = x
self.y = y
self.cost = 0.0
self.parent = None
def main():
print("Start rrt planning")
# ====Search Path with RRT====
obstacleList = [
(5, 5, 0.5),
(9, 6, 1),
(7, 5, 3),
(1, 5, 1),
(2, 2, 1),
(7, 9, 1)
] # [x,y,size(radius)]
# Set Initial parameters
rrt = RRTFamilyPlanners(start = [0, 0], goal = [5, 10],
randArea = [-2, 15], obstacleList = obstacleList)
path = rrt.RRTStarSearch(animation = show_animation)
# Draw final path
if show_animation:
rrt.drawGraph()
plt.plot([x for (x, y) in path], [y for (x, y) in path], '-r')
plt.grid(True)
plt.pause(0.01) # Need for Mac
plt.show()
if __name__ == '__main__':
main() | 28.425474 | 111 | 0.62637 |
acf80a5155d54dc1c334022001dd158a5d4cb766 | 1,713 | py | Python | stock_spider/tools/file_download.py | gegaofeng/stock | 33307ed4f2eec3f836baaee66e59e3636d22f2c4 | [
"MIT"
] | null | null | null | stock_spider/tools/file_download.py | gegaofeng/stock | 33307ed4f2eec3f836baaee66e59e3636d22f2c4 | [
"MIT"
] | 3 | 2021-03-10T21:40:54.000Z | 2022-02-27T06:19:34.000Z | stock_spider/tools/file_download.py | gegaofeng/stock | 33307ed4f2eec3f836baaee66e59e3636d22f2c4 | [
"MIT"
] | null | null | null | import requests
import os
import time
from urllib.parse import unquote
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'Host': 'officecdn-microsoft-com.akamaized.net',
'Pragma': 'no-cache',
'Sec-Fetch-Mode': 'navigate',
'Sec-Fetch-Site': 'none',
'Sec-Fetch-User': '?1',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'
}
# 获取文件名
def get_file_name(url, headers):
filename = ''
if 'Content-Disposition' in headers and headers['Content-Disposition']:
disposition_split = headers['Content-Disposition'].split(';')
if len(disposition_split) > 1:
if disposition_split[1].strip().lower().startswith('filename='):
file_name = disposition_split[1].split('=')
if len(file_name) > 1:
filename = unquote(file_name[1])
if not filename and os.path.basename(url):
filename = os.path.basename(url).split("?")[0]
if not filename:
return time.time()
return filename
# 下载文件
def file_download(url, filename, savepath):
print("downloading....")
# url = 'https://money.finance.sina.com.cn/corp/go.php/vDOWN_ProfitStatement/displaytype/4/stockid/' + number + '/ctrl/all.phtml'
f = requests.get(url)
with open(savepath + filename, "wb") as code:
code.write(f.content)
print('下载完成')
| 34.959184 | 135 | 0.637478 |
acf80a928cfc244c2268ce8f6f6e924bc30d44f1 | 378 | py | Python | e_learning/permissions.py | Mohamed-Kaizen/django_playground | 0ca623041857f15c06bd2f9c8edcd2b54b4c4897 | [
"MIT"
] | null | null | null | e_learning/permissions.py | Mohamed-Kaizen/django_playground | 0ca623041857f15c06bd2f9c8edcd2b54b4c4897 | [
"MIT"
] | 3 | 2021-06-08T21:15:15.000Z | 2022-03-12T00:35:49.000Z | e_learning/permissions.py | Mohamed-Kaizen/django_playground | 0ca623041857f15c06bd2f9c8edcd2b54b4c4897 | [
"MIT"
] | null | null | null | from typing import Any
from rest_framework import permissions
from rest_framework.request import Request
class IsOwnerOrReadOnly(permissions.BasePermission):
def has_object_permission(self, request: Request, view: Any, obj: Any) -> bool:
if request.method in permissions.SAFE_METHODS:
return True
return obj.author == request.user.user_uuid
| 27 | 83 | 0.748677 |
acf80aba50eddf5862ca1b01067f48a043a10eb9 | 1,873 | py | Python | opacus/validators/multihead_attention.py | iamgroot42/opacus | 51708309e71c030aa2bf15d6dccc7bcbbe9ed570 | [
"Apache-2.0"
] | 195 | 2019-12-11T23:55:47.000Z | 2020-08-27T04:17:29.000Z | opacus/validators/multihead_attention.py | iamgroot42/opacus | 51708309e71c030aa2bf15d6dccc7bcbbe9ed570 | [
"Apache-2.0"
] | 35 | 2020-01-21T11:04:29.000Z | 2020-08-27T05:30:57.000Z | opacus/validators/multihead_attention.py | iamgroot42/opacus | 51708309e71c030aa2bf15d6dccc7bcbbe9ed570 | [
"Apache-2.0"
] | 39 | 2020-01-04T20:05:20.000Z | 2020-08-25T23:09:38.000Z | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
import torch.nn as nn
from opacus.layers import DPMultiheadAttention
from .errors import ShouldReplaceModuleError, UnsupportedModuleError
from .utils import register_module_fixer, register_module_validator
@register_module_validator(nn.MultiheadAttention)
def validate(module: nn.MultiheadAttention) -> List[UnsupportedModuleError]:
return [
ShouldReplaceModuleError(
"We do not support nn.MultiheadAttention because its implementation uses special "
"modules. We have written a DPMultiheadAttention class that is a drop-in replacement "
"which is compatible with our Grad Sample hooks. Please run the recommended "
"replacement!"
)
]
@register_module_fixer(nn.MultiheadAttention)
def fix(module: nn.MultiheadAttention) -> DPMultiheadAttention:
dp_attn = DPMultiheadAttention(
embed_dim=module.embed_dim,
num_heads=module.num_heads,
dropout=module.dropout,
bias=module.in_proj_bias is not None,
add_bias_kv=module.bias_k is not None,
add_zero_attn=module.add_zero_attn,
kdim=module.kdim,
vdim=module.vdim,
)
dp_attn.load_state_dict(module.state_dict())
return dp_attn
| 36.72549 | 98 | 0.739455 |
acf80ac7ac7a7c442d6bf80c6d6d5ce09deb661e | 2,175 | py | Python | newsletter/models.py | rokj/django_newsletter | 67e94e54512d3bbc24d56c9f3e1168c989b70de2 | [
"MIT"
] | 2 | 2016-11-06T23:56:41.000Z | 2019-04-15T10:42:59.000Z | newsletter/models.py | rokj/django_newsletter | 67e94e54512d3bbc24d56c9f3e1168c989b70de2 | [
"MIT"
] | null | null | null | newsletter/models.py | rokj/django_newsletter | 67e94e54512d3bbc24d56c9f3e1168c989b70de2 | [
"MIT"
] | null | null | null | import string
from random import choice
from django.db import models
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from django.db import transaction
def get_random_string(length=8, chars=string.letters + string.digits):
return ''.join([choice(chars) for i in xrange(length)])
class NewsletterSubscription(models.Model):
email = models.EmailField(_('Email'), blank=False, null=False, unique=True, db_index=True)
token = models.CharField(_('Token for unsubscription'), max_length=15, blank=True, null=True, unique=True, db_index=True)
subscribed = models.BooleanField(_('Subscribed or not'), blank=False, null=False)
def __unicode__(self):
return u'%s %s' % (self.email, self.subscribed)
@transaction.autocommit
def set_token(self):
if self.token is None or self.token == '':
token = None
i = 0
while token is None:
if i == 100:
raise Exception("Too many times (100) generated random strings. WTF?")
token = get_random_string(length=15)
try:
newsletter_subscriptions = NewsletterSubscription.objects.get(token=token)
token = None
except NewsletterSubscription.DoesNotExist:
self.token = token
self.save()
i += 1
def save(self, *args, **kwargs):
super(NewsletterSubscription, self).save(*args, **kwargs)
self.set_token()
class Newsletter(models.Model):
title = models.CharField(_('Title of the newsletter'), max_length=255, blank=False, null=False)
txt = models.TextField(_('Text version of email'), blank=True, null=True)
html = models.TextField(_('HTML version of email'), blank=True, null=True)
sent_to = models.TextField(_('Emails sent to'), help_text=_('Emails that have received newsletter'), blank=True, null=True, editable=False)
datetime_sent = models.DateTimeField(_('Datetime mail sent'), blank=True, null=True, editable=False)
__unicode__ = lambda self: u'%s %s' % (self.title, self.datetime_sent)
| 42.647059 | 143 | 0.656552 |
acf80f1fb25644f88ec626aba4649014ffc1feb3 | 2,114 | py | Python | lzc/wavelets.py | joker-xii/plant-potential | 4a3e5f2b4755456f058dfc4c235231a14ffbc169 | [
"MIT"
] | null | null | null | lzc/wavelets.py | joker-xii/plant-potential | 4a3e5f2b4755456f058dfc4c235231a14ffbc169 | [
"MIT"
] | null | null | null | lzc/wavelets.py | joker-xii/plant-potential | 4a3e5f2b4755456f058dfc4c235231a14ffbc169 | [
"MIT"
] | null | null | null | import pywt
import pandas as pd
import math
import numpy as np
import matplotlib.pyplot as plt
from lzc.config import *
def read_data(raw, length=SPLIT_SIZE, max_len=MAX_LENGTH):
raw_data = pd.read_csv(raw).iloc[:, 0].values
raw_data = raw_data[:max_len]
sure_value = math.floor(len(raw_data) / length) * length
# print("sure of", sure_value, len(raw_data))
# crop data
raw_data = raw_data[:sure_value]
# split data to length
dds = np.array_split(raw_data, (len(raw_data) / length))
return dds, raw_data
def plot(y,title =""):
plt.title(title)
x = np.linspace(0, len(y) - 1, len(y))
plt.plot(x, y)
plt.show()
def get_transformed(data, func):
retCA = []
retCD = []
for i in data:
# print(len(i), "Fuck!")
cA = np.pad(cA, (0, len(i) - len(cA)), mode='constant')
cD = np.pad(cD, (0, len(i) - len(cD)), mode='constant')
retCA = retCA + cA.tolist()
retCD = retCD + cD.tolist()
return retCA, retCD
def plot_each(data, func):
(cA, cD) = pywt.dwt(data[0], func)
plot(cA,'cA of DWTUnit('+func+")")
plot(cD,'cD of DWTUnit('+func+")")
def to_wavs(fname, max_len=MAX_LENGTH, attr='csv'):
datas, rd = read_data(fname + "." + attr, max_len=max_len)
df = pd.DataFrame()
df["basic"] = rd
for i in WAVELETS:
print(i)
ca, cd = get_transformed(datas, i)
df[i + "_cA"] = ca
df[i + "_cD"] = cd
df.to_csv(fname + "_dwt300.csv", float_format='%.3f')
def show_wav(fname, max_len = MAX_LENGTH, attr='csv'):
datas, rd = read_data(fname + "." + attr, max_len=max_len)
plot(datas[0],'input')
for i in WAVELETS:
plot_each(datas,i)
if __name__ == '__main__':
# to_wavs("olddata/m0", max_len=OLD_DATA_LEN, attr='txt')
# to_wavs("olddata/m1", max_len=OLD_DATA_LEN, attr='txt')
# to_wavs("olddata/m2", max_len=OLD_DATA_LEN, attr='txt')
# to_wavs('0m')
# to_wavs('1m')
# to_wavs('2m')
# print(len(pywt.wavelist(kind='discrete')))
# for i in pywt.wavelist(kind='discrete'):
# print(i)
show_wav('1m')
| 28.186667 | 63 | 0.60123 |
acf80f5e31835c58cf8bcce84fac2817607d0cf4 | 370 | py | Python | Hackerrank-Solutions/Hackerrank-Python-Solutions/Strings/Text Wrap.py | HetDaftary/Competitive-Coding-Solutions | a683fa11895410c6eef07b1a68054f3e90aa596b | [
"MIT"
] | null | null | null | Hackerrank-Solutions/Hackerrank-Python-Solutions/Strings/Text Wrap.py | HetDaftary/Competitive-Coding-Solutions | a683fa11895410c6eef07b1a68054f3e90aa596b | [
"MIT"
] | null | null | null | Hackerrank-Solutions/Hackerrank-Python-Solutions/Strings/Text Wrap.py | HetDaftary/Competitive-Coding-Solutions | a683fa11895410c6eef07b1a68054f3e90aa596b | [
"MIT"
] | null | null | null | import textwrap
def wrap(string, max_width):
a=len(string)//max_width
c=[]
for i in range(0,a):
c.append(string[max_width*(i):max_width*(i)+max_width])
c.append(string[max_width*a:])
c="\n".join(c)
return(c)
if __name__ == '__main__':
string, max_width = input(), int(input())
result = wrap(string, max_width)
print(result)
| 23.125 | 63 | 0.621622 |
acf80f99dc238a49f0d5beee8c705682264b2e90 | 8,068 | py | Python | sandbox/rocky/tf/policies/gaussian_mlp_policy.py | RussellM2020/RoboticTasks | c7157c986cdbbf08cc0ea296205ef2dbcf6fc487 | [
"MIT"
] | null | null | null | sandbox/rocky/tf/policies/gaussian_mlp_policy.py | RussellM2020/RoboticTasks | c7157c986cdbbf08cc0ea296205ef2dbcf6fc487 | [
"MIT"
] | null | null | null | sandbox/rocky/tf/policies/gaussian_mlp_policy.py | RussellM2020/RoboticTasks | c7157c986cdbbf08cc0ea296205ef2dbcf6fc487 | [
"MIT"
] | null | null | null | import numpy as np
from sandbox.rocky.tf.core.layers_powered import LayersPowered
import sandbox.rocky.tf.core.layers as L
from sandbox.rocky.tf.core.network import MLP
from sandbox.rocky.tf.spaces.box import Box
from rllab.core.serializable import Serializable
from sandbox.rocky.tf.policies.base import StochasticPolicy
from sandbox.rocky.tf.distributions.diagonal_gaussian import DiagonalGaussian
from rllab.misc.overrides import overrides
from rllab.misc import logger
from sandbox.rocky.tf.misc import tensor_utils
import tensorflow as tf
class GaussianMLPPolicy(StochasticPolicy, LayersPowered, Serializable):
def __init__(
self,
name,
env_spec,
hidden_sizes=(32, 32),
learn_std=True,
init_std=1.0,
adaptive_std=False,
std_share_network=False,
std_hidden_sizes=(32, 32),
min_std=1e-6,
std_hidden_nonlinearity=tf.nn.tanh,
hidden_nonlinearity=tf.nn.tanh,
output_nonlinearity=None,
mean_network=None,
std_network=None,
std_parametrization='exp'
):
"""
:param env_spec:
:param hidden_sizes: list of sizes for the fully-connected hidden layers
:param learn_std: Is std trainable
:param init_std: Initial std
:param adaptive_std:
:param std_share_network:
:param std_hidden_sizes: list of sizes for the fully-connected layers for std
:param min_std: whether to make sure that the std is at least some threshold value, to avoid numerical issues
:param std_hidden_nonlinearity:
:param hidden_nonlinearity: nonlinearity used for each hidden layer
:param output_nonlinearity: nonlinearity for the output layer
:param mean_network: custom network for the output mean
:param std_network: custom network for the output log std
:param std_parametrization: how the std should be parametrized. There are a few options:
- exp: the logarithm of the std will be stored, and applied a exponential transformation
- softplus: the std will be computed as log(1+exp(x))
:return:
"""
Serializable.quick_init(self, locals())
assert isinstance(env_spec.action_space, Box)
with tf.variable_scope(name):
obs_dim = env_spec.observation_space.flat_dim
action_dim = env_spec.action_space.flat_dim
# create network
if mean_network is None:
mean_network = MLP(
name="mean_network",
input_shape=(obs_dim,),
output_dim=action_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=output_nonlinearity,
)
self._mean_network = mean_network
l_mean = mean_network.output_layer
obs_var = mean_network.input_layer.input_var
if std_network is not None:
l_std_param = std_network.output_layer
else:
if adaptive_std:
std_network = MLP(
name="std_network",
input_shape=(obs_dim,),
input_layer=mean_network.input_layer,
output_dim=action_dim,
hidden_sizes=std_hidden_sizes,
hidden_nonlinearity=std_hidden_nonlinearity,
output_nonlinearity=None,
)
l_std_param = std_network.output_layer
else:
if std_parametrization == 'exp':
init_std_param = np.log(init_std)
elif std_parametrization == 'softplus':
init_std_param = np.log(np.exp(init_std) - 1)
else:
raise NotImplementedError
l_std_param = L.ParamLayer(
mean_network.input_layer,
num_units=action_dim,
param=tf.constant_initializer(init_std_param),
name="output_std_param",
trainable=learn_std,
)
self.std_parametrization = std_parametrization
if std_parametrization == 'exp':
min_std_param = np.log(min_std)
elif std_parametrization == 'softplus':
min_std_param = np.log(np.exp(min_std) - 1)
else:
raise NotImplementedError
self.min_std_param = min_std_param
# mean_var, log_std_var = L.get_output([l_mean, l_std_param])
#
# if self.min_std_param is not None:
# log_std_var = tf.maximum(log_std_var, np.log(min_std))
#
# self._mean_var, self._log_std_var = mean_var, log_std_var
self._l_mean = l_mean
self._l_std_param = l_std_param
self._dist = DiagonalGaussian(action_dim)
LayersPowered.__init__(self, [l_mean, l_std_param])
super(GaussianMLPPolicy, self).__init__(env_spec)
dist_info_sym = self.dist_info_sym(mean_network.input_layer.input_var, dict())
mean_var = dist_info_sym["mean"]
log_std_var = dist_info_sym["log_std"]
self._f_dist = tensor_utils.compile_function(
inputs=[obs_var],
outputs=[mean_var, log_std_var],
)
@property
def vectorized(self):
return True
def dist_info_sym(self, obs_var, state_info_vars=None):
mean_var, std_param_var = L.get_output([self._l_mean, self._l_std_param], obs_var)
if self.min_std_param is not None:
std_param_var = tf.maximum(std_param_var, self.min_std_param)
if self.std_parametrization == 'exp':
log_std_var = std_param_var
elif self.std_parametrization == 'softplus':
log_std_var = tf.log(tf.log(1. + tf.exp(std_param_var)))
else:
raise NotImplementedError
return dict(mean=mean_var, log_std=log_std_var)
@overrides
def get_action(self, observation):
flat_obs = self.observation_space.flatten(observation)
mean, log_std = [x[0] for x in self._f_dist([flat_obs])]
rnd = np.random.normal(size=mean.shape)
action = rnd * np.exp(log_std) + mean
return action, dict(mean=mean, log_std=log_std)
def get_actions(self, observations):
flat_obs = self.observation_space.flatten_n(observations)
means, log_stds = self._f_dist(flat_obs)
rnd = np.random.normal(size=means.shape)
actions = rnd * np.exp(log_stds) + means
return actions, dict(mean=means, log_std=log_stds)
def get_reparam_action_sym(self, obs_var, action_var, old_dist_info_vars):
"""
Given observations, old actions, and distribution of old actions, return a symbolically reparameterized
representation of the actions in terms of the policy parameters
:param obs_var:
:param action_var:
:param old_dist_info_vars:
:return:
"""
new_dist_info_vars = self.dist_info_sym(obs_var, action_var)
new_mean_var, new_log_std_var = new_dist_info_vars["mean"], new_dist_info_vars["log_std"]
old_mean_var, old_log_std_var = old_dist_info_vars["mean"], old_dist_info_vars["log_std"]
epsilon_var = (action_var - old_mean_var) / (tf.exp(old_log_std_var) + 1e-8)
new_action_var = new_mean_var + epsilon_var * tf.exp(new_log_std_var)
return new_action_var
def log_diagnostics(self, paths):
log_stds = np.vstack([path["agent_infos"]["log_std"] for path in paths])
logger.record_tabular('AveragePolicyStd', np.mean(np.exp(log_stds)))
@property
def distribution(self):
return self._dist
| 40.542714 | 117 | 0.614155 |
acf810445bf6fcc5bf5f5833a7e0fcf67a1dbe9c | 116 | py | Python | userauth/views.py | davidgranados/wagtail-python-eats-tail | a8b51d1195dfbe50d6988e8596d1904a20bccd37 | [
"MIT"
] | null | null | null | userauth/views.py | davidgranados/wagtail-python-eats-tail | a8b51d1195dfbe50d6988e8596d1904a20bccd37 | [
"MIT"
] | null | null | null | userauth/views.py | davidgranados/wagtail-python-eats-tail | a8b51d1195dfbe50d6988e8596d1904a20bccd37 | [
"MIT"
] | null | null | null | from django.shortcuts import render
def profile_view(request):
return render(request, 'account/profile.html')
| 19.333333 | 50 | 0.775862 |
acf8107b1db32b2f003e76486a2db10a6cf661c1 | 293 | py | Python | src/main/resources/Generator.py | Vikramadtya/Algorithms | 01c64c719b4d2604f1a4e936b08965a4be35bb6d | [
"MIT"
] | null | null | null | src/main/resources/Generator.py | Vikramadtya/Algorithms | 01c64c719b4d2604f1a4e936b08965a4be35bb6d | [
"MIT"
] | null | null | null | src/main/resources/Generator.py | Vikramadtya/Algorithms | 01c64c719b4d2604f1a4e936b08965a4be35bb6d | [
"MIT"
] | null | null | null | import random
count= 10000
repeat = 100
random.seed(13)
for i in range(10,count,10):
for j in range(repeat):
sequence = []
for k in range(i):
sequence.append(random.randint(1,2147483646))
for x in sequence:
print(x,end=" ")
print() | 19.533333 | 56 | 0.566553 |
acf810b64bb88f95001bffd37551dbceb29b42ef | 47,698 | py | Python | src/atom/__init__.py | lqc/google-data-api | b720582a472d627a0853d02e51e13dbce4cfe6ae | [
"Apache-2.0"
] | null | null | null | src/atom/__init__.py | lqc/google-data-api | b720582a472d627a0853d02e51e13dbce4cfe6ae | [
"Apache-2.0"
] | null | null | null | src/atom/__init__.py | lqc/google-data-api | b720582a472d627a0853d02e51e13dbce4cfe6ae | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains classes representing Atom elements.
Module objective: provide data classes for Atom constructs. These classes hide
the XML-ness of Atom and provide a set of native Python classes to interact
with.
Conversions to and from XML should only be necessary when the Atom classes
"touch the wire" and are sent over HTTP. For this reason this module
provides methods and functions to convert Atom classes to and from strings.
For more information on the Atom data model, see RFC 4287
(http://www.ietf.org/rfc/rfc4287.txt)
AtomBase: A foundation class on which Atom classes are built. It
handles the parsing of attributes and children which are common to all
Atom classes. By default, the AtomBase class translates all XML child
nodes into ExtensionElements.
ExtensionElement: Atom allows Atom objects to contain XML which is not part
of the Atom specification, these are called extension elements. If a
classes parser encounters an unexpected XML construct, it is translated
into an ExtensionElement instance. ExtensionElement is designed to fully
capture the information in the XML. Child nodes in an XML extension are
turned into ExtensionElements as well.
"""
__author__ = 'api.jscudder (Jeffrey Scudder)'
import lxml.etree as ElementTree
import warnings
# XML namespaces which are often used in Atom entities.
ATOM_NAMESPACE = 'http://www.w3.org/2005/Atom'
ELEMENT_TEMPLATE = '{http://www.w3.org/2005/Atom}%s'
APP_NAMESPACE = 'http://purl.org/atom/app#'
APP_TEMPLATE = '{http://purl.org/atom/app#}%s'
## This encoding is used for converting strings before translating the XML
## into an object.
#XML_STRING_ENCODING = 'utf-8'
## The desired string encoding for object members. set or monkey-patch to
## unicode if you want object members to be Python unicode strings, instead of
## encoded strings
#MEMBER_STRING_ENCODING = 'utf-8'
##MEMBER_STRING_ENCODING = unicode
#
# Keeping data as encoded byte arrays is NOT what unicode is about.
# If True, all methods which are exclusive to v1 will raise a
# DeprecationWarning
ENABLE_V1_WARNINGS = False
def v1_deprecated(warning=None):
"""Shows a warning if ENABLE_V1_WARNINGS is True.
Function decorator used to mark methods used in v1 classes which
may be removed in future versions of the library.
"""
warning = warning or ''
# This closure is what is returned from the deprecated function.
def mark_deprecated(f):
# The deprecated_function wraps the actual call to f.
def optional_warn_function(*args, **kwargs):
if ENABLE_V1_WARNINGS:
warnings.warn(warning, DeprecationWarning, stacklevel=2)
return f(*args, **kwargs)
optional_warn_function.__name__ = f.__name__
return optional_warn_function
return mark_deprecated
def CreateClassFromXMLString(target_class, xml_string, string_encoding=None):
"""Creates an instance of the target class from the string contents.
Args:
target_class: class The class which will be instantiated and populated
with the contents of the XML. This class must have a _tag and a
_namespace class variable.
xml_string: str A string which contains valid XML. The root element
of the XML string should match the tag and namespace of the desired
class.
Returns:
An instance of the target class with members assigned according to the
contents of the XML - or None if the root XML tag and namespace did not
match those of the target class.
"""
if isinstance(xml_string, str):
xml_string = xml_string.encode('utf-8')
if not isinstance(xml_string, bytes):
raise Exception("This function accepts bytes or strings")
tree = ElementTree.fromstring(xml_string)
return _CreateClassFromElementTree(target_class, tree)
CreateClassFromXMLString = v1_deprecated(
'Please use atom.core.parse with atom.data classes instead.')(
CreateClassFromXMLString)
def _CreateClassFromElementTree(target_class, tree, namespace=None, tag=None):
"""Instantiates the class and populates members according to the tree.
Note: Only use this function with classes that have _namespace and _tag
class members.
Args:
target_class: class The class which will be instantiated and populated
with the contents of the XML.
tree: ElementTree An element tree whose contents will be converted into
members of the new target_class instance.
namespace: str (optional) The namespace which the XML tree's root node must
match. If omitted, the namespace defaults to the _namespace of the
target class.
tag: str (optional) The tag which the XML tree's root node must match. If
omitted, the tag defaults to the _tag class member of the target
class.
Returns:
An instance of the target class - or None if the tag and namespace of
the XML tree's root node did not match the desired namespace and tag.
"""
if namespace is None:
namespace = target_class._namespace
if tag is None:
tag = target_class._tag
if tree.tag == '{%s}%s' % (namespace, tag):
target = target_class()
target._HarvestElementTree(tree)
return target
else:
return None
class ExtensionContainer(object):
def __init__(self, extension_elements=None, extension_attributes=None,
text=None):
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
self.text = text
__init__ = v1_deprecated(
'Please use data model classes in atom.data instead.')(
__init__)
# Three methods to create an object from an ElementTree
def _HarvestElementTree(self, tree):
# Fill in the instance members from the contents of the XML tree.
for child in tree:
self._ConvertElementTreeToMember(child)
for attribute, value in tree.attrib.items():
self._ConvertElementAttributeToMember(attribute, value)
# Encode the text string according to the desired encoding type. (UTF-8)
if tree.text:
self.text = tree.text
def _ConvertElementTreeToMember(self, child_tree, current_class=None):
self.extension_elements.append(_ExtensionElementFromElementTree(
child_tree))
def _ConvertElementAttributeToMember(self, attribute, value):
# Encode the attribute value's string with the desired type Default UTF-8
if value:
self.extension_attributes[attribute] = value
# One method to create an ElementTree from an object
def _AddMembersToElementTree(self, tree):
for child in self.extension_elements:
child._BecomeChildElement(tree)
for attribute, value in self.extension_attributes.items():
if value:
tree.attrib[attribute] = value
if self.text:
tree.text = self.text
def FindExtensions(self, tag=None, namespace=None):
"""Searches extension elements for child nodes with the desired name.
Returns a list of extension elements within this object whose tag
and/or namespace match those passed in. To find all extensions in
a particular namespace, specify the namespace but not the tag name.
If you specify only the tag, the result list may contain extension
elements in multiple namespaces.
Args:
tag: str (optional) The desired tag
namespace: str (optional) The desired namespace
Returns:
A list of elements whose tag and/or namespace match the parameters
values
"""
results = []
if tag and namespace:
for element in self.extension_elements:
if element.tag == tag and element.namespace == namespace:
results.append(element)
elif tag and not namespace:
for element in self.extension_elements:
if element.tag == tag:
results.append(element)
elif namespace and not tag:
for element in self.extension_elements:
if element.namespace == namespace:
results.append(element)
else:
for element in self.extension_elements:
results.append(element)
return results
class AtomBase(ExtensionContainer):
_children = {}
_attributes = {}
def __init__(self, extension_elements=None, extension_attributes=None,
text=None):
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
self.text = text
__init__ = v1_deprecated(
'Please use data model classes in atom.data instead.')(
__init__)
def _ConvertElementTreeToMember(self, child_tree):
# Find the element's tag in this class's list of child members
if child_tree.tag in self.__class__._children:
member_name = self.__class__._children[child_tree.tag][0]
member_class = self.__class__._children[child_tree.tag][1]
# If the class member is supposed to contain a list, make sure the
# matching member is set to a list, then append the new member
# instance to the list.
if isinstance(member_class, list):
if getattr(self, member_name) is None:
setattr(self, member_name, [])
getattr(self, member_name).append(_CreateClassFromElementTree(
member_class[0], child_tree))
else:
setattr(self, member_name,
_CreateClassFromElementTree(member_class, child_tree))
else:
ExtensionContainer._ConvertElementTreeToMember(self, child_tree)
def _ConvertElementAttributeToMember(self, attribute, value):
# Find the attribute in this class's list of attributes.
if attribute in self.__class__._attributes:
# Find the member of this class which corresponds to the XML attribute
# (lookup in current_class._attributes) and set this member to the
# desired value (using self.__dict__).
if value:
# Encode the string to capture non-ascii characters (default UTF-8)
setattr(self, self.__class__._attributes[attribute], value)
else:
ExtensionContainer._ConvertElementAttributeToMember(
self, attribute, value)
# Three methods to create an ElementTree from an object
def _AddMembersToElementTree(self, tree):
# Convert the members of this class which are XML child nodes.
# This uses the class's _children dictionary to find the members which
# should become XML child nodes.
member_node_names = [values[0] for tag, values in
self.__class__._children.items()]
for member_name in member_node_names:
member = getattr(self, member_name)
if member is None:
pass
elif isinstance(member, list):
for instance in member:
instance._BecomeChildElement(tree)
else:
member._BecomeChildElement(tree)
# Convert the members of this class which are XML attributes.
for xml_attribute, member_name in self.__class__._attributes.items():
member = getattr(self, member_name)
if member is not None:
tree.attrib[xml_attribute] = member
# Lastly, call the ExtensionContainers's _AddMembersToElementTree to
# convert any extension attributes.
ExtensionContainer._AddMembersToElementTree(self, tree)
def _BecomeChildElement(self, tree):
"""
Note: Only for use with classes that have a _tag and _namespace class
member. It is in AtomBase so that it can be inherited but it should
not be called on instances of AtomBase.
"""
new_child = ElementTree.Element( '{%s}%s' % (self.__class__._namespace,
self.__class__._tag) )
tree.append(new_child)
self._AddMembersToElementTree(new_child)
def _ToElementTree(self):
"""
Note, this method is designed to be used only with classes that have a
_tag and _namespace. It is placed in AtomBase for inheritance but should
not be called on this class.
"""
new_tree = ElementTree.Element('{%s}%s' % (self.__class__._namespace,
self.__class__._tag))
self._AddMembersToElementTree(new_tree)
return new_tree
def ToString(self):
"""Converts the Atom object to a string containing XML."""
return ElementTree.tostring(self._ToElementTree(), encoding=str)
def __str__(self):
return self.ToString()
class Name(AtomBase):
"""The atom:name element"""
_tag = 'name'
_namespace = ATOM_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Name
Args:
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def NameFromString(xml_string):
return CreateClassFromXMLString(Name, xml_string)
class Email(AtomBase):
"""The atom:email element"""
_tag = 'email'
_namespace = ATOM_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Email
Args:
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
text: str The text data in the this element
"""
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def EmailFromString(xml_string):
return CreateClassFromXMLString(Email, xml_string)
class Uri(AtomBase):
"""The atom:uri element"""
_tag = 'uri'
_namespace = ATOM_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Uri
Args:
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
text: str The text data in the this element
"""
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def UriFromString(xml_string):
return CreateClassFromXMLString(Uri, xml_string)
class Person(AtomBase):
"""A foundation class from which atom:author and atom:contributor extend.
A person contains information like name, email address, and web page URI for
an author or contributor to an Atom feed.
"""
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
_children['{%s}name' % (ATOM_NAMESPACE)] = ('name', Name)
_children['{%s}email' % (ATOM_NAMESPACE)] = ('email', Email)
_children['{%s}uri' % (ATOM_NAMESPACE)] = ('uri', Uri)
def __init__(self, name=None, email=None, uri=None,
extension_elements=None, extension_attributes=None, text=None):
"""Foundation from which author and contributor are derived.
The constructor is provided for illustrative purposes, you should not
need to instantiate a Person.
Args:
name: Name The person's name
email: Email The person's email address
uri: Uri The URI of the person's webpage
extension_elements: list A list of ExtensionElement instances which are
children of this element.
extension_attributes: dict A dictionary of strings which are the values
for additional XML attributes of this element.
text: String The text contents of the element. This is the contents
of the Entry's XML text node. (Example: <foo>This is the text</foo>)
"""
self.name = name
self.email = email
self.uri = uri
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
self.text = text
class Author(Person):
"""The atom:author element
An author is a required element in Feed.
"""
_tag = 'author'
_namespace = ATOM_NAMESPACE
_children = Person._children.copy()
_attributes = Person._attributes.copy()
#_children = {}
#_attributes = {}
def __init__(self, name=None, email=None, uri=None,
extension_elements=None, extension_attributes=None, text=None):
"""Constructor for Author
Args:
name: Name
email: Email
uri: Uri
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
text: str The text data in the this element
"""
self.name = name
self.email = email
self.uri = uri
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
self.text = text
def AuthorFromString(xml_string):
return CreateClassFromXMLString(Author, xml_string)
class Contributor(Person):
"""The atom:contributor element"""
_tag = 'contributor'
_namespace = ATOM_NAMESPACE
_children = Person._children.copy()
_attributes = Person._attributes.copy()
def __init__(self, name=None, email=None, uri=None,
extension_elements=None, extension_attributes=None, text=None):
"""Constructor for Contributor
Args:
name: Name
email: Email
uri: Uri
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
text: str The text data in the this element
"""
self.name = name
self.email = email
self.uri = uri
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
self.text = text
def ContributorFromString(xml_string):
return CreateClassFromXMLString(Contributor, xml_string)
class Link(AtomBase):
"""The atom:link element"""
_tag = 'link'
_namespace = ATOM_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
_attributes['rel'] = 'rel'
_attributes['href'] = 'href'
_attributes['type'] = 'type'
_attributes['title'] = 'title'
_attributes['length'] = 'length'
_attributes['hreflang'] = 'hreflang'
def __init__(self, href=None, rel=None, link_type=None, hreflang=None,
title=None, length=None, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Link
Args:
href: string The href attribute of the link
rel: string
type: string
hreflang: string The language for the href
title: string
length: string The length of the href's destination
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
text: str The text data in the this element
"""
self.href = href
self.rel = rel
self.type = link_type
self.hreflang = hreflang
self.title = title
self.length = length
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def LinkFromString(xml_string):
return CreateClassFromXMLString(Link, xml_string)
class Generator(AtomBase):
"""The atom:generator element"""
_tag = 'generator'
_namespace = ATOM_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
_attributes['uri'] = 'uri'
_attributes['version'] = 'version'
def __init__(self, uri=None, version=None, text=None,
extension_elements=None, extension_attributes=None):
"""Constructor for Generator
Args:
uri: string
version: string
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.uri = uri
self.version = version
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def GeneratorFromString(xml_string):
return CreateClassFromXMLString(Generator, xml_string)
class Text(AtomBase):
"""A foundation class from which atom:title, summary, etc. extend.
This class should never be instantiated.
"""
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
_attributes['type'] = 'type'
def __init__(self, text_type=None, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Text
Args:
text_type: string
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.type = text_type
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class Title(Text):
"""The atom:title element"""
_tag = 'title'
_namespace = ATOM_NAMESPACE
_children = Text._children.copy()
_attributes = Text._attributes.copy()
def __init__(self, title_type=None, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Title
Args:
title_type: string
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.type = title_type
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def TitleFromString(xml_string):
return CreateClassFromXMLString(Title, xml_string)
class Subtitle(Text):
"""The atom:subtitle element"""
_tag = 'subtitle'
_namespace = ATOM_NAMESPACE
_children = Text._children.copy()
_attributes = Text._attributes.copy()
def __init__(self, subtitle_type=None, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Subtitle
Args:
subtitle_type: string
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.type = subtitle_type
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def SubtitleFromString(xml_string):
return CreateClassFromXMLString(Subtitle, xml_string)
class Rights(Text):
"""The atom:rights element"""
_tag = 'rights'
_namespace = ATOM_NAMESPACE
_children = Text._children.copy()
_attributes = Text._attributes.copy()
def __init__(self, rights_type=None, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Rights
Args:
rights_type: string
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.type = rights_type
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def RightsFromString(xml_string):
return CreateClassFromXMLString(Rights, xml_string)
class Summary(Text):
"""The atom:summary element"""
_tag = 'summary'
_namespace = ATOM_NAMESPACE
_children = Text._children.copy()
_attributes = Text._attributes.copy()
def __init__(self, summary_type=None, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Summary
Args:
summary_type: string
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.type = summary_type
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def SummaryFromString(xml_string):
return CreateClassFromXMLString(Summary, xml_string)
class Content(Text):
"""The atom:content element"""
_tag = 'content'
_namespace = ATOM_NAMESPACE
_children = Text._children.copy()
_attributes = Text._attributes.copy()
_attributes['src'] = 'src'
def __init__(self, content_type=None, src=None, text=None,
extension_elements=None, extension_attributes=None):
"""Constructor for Content
Args:
content_type: string
src: string
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.type = content_type
self.src = src
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def ContentFromString(xml_string):
return CreateClassFromXMLString(Content, xml_string)
class Category(AtomBase):
"""The atom:category element"""
_tag = 'category'
_namespace = ATOM_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
_attributes['term'] = 'term'
_attributes['scheme'] = 'scheme'
_attributes['label'] = 'label'
def __init__(self, term=None, scheme=None, label=None, text=None,
extension_elements=None, extension_attributes=None):
"""Constructor for Category
Args:
term: str
scheme: str
label: str
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.term = term
self.scheme = scheme
self.label = label
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def CategoryFromString(xml_string):
return CreateClassFromXMLString(Category, xml_string)
class Id(AtomBase):
"""The atom:id element."""
_tag = 'id'
_namespace = ATOM_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Id
Args:
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def IdFromString(xml_string):
return CreateClassFromXMLString(Id, xml_string)
class Icon(AtomBase):
"""The atom:icon element."""
_tag = 'icon'
_namespace = ATOM_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Icon
Args:
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def IconFromString(xml_string):
return CreateClassFromXMLString(Icon, xml_string)
class Logo(AtomBase):
"""The atom:logo element."""
_tag = 'logo'
_namespace = ATOM_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Logo
Args:
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def LogoFromString(xml_string):
return CreateClassFromXMLString(Logo, xml_string)
class Draft(AtomBase):
"""The app:draft element which indicates if this entry should be public."""
_tag = 'draft'
_namespace = APP_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for app:draft
Args:
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def DraftFromString(xml_string):
return CreateClassFromXMLString(Draft, xml_string)
class Control(AtomBase):
"""The app:control element indicating restrictions on publication.
The APP control element may contain a draft element indicating whether or
not this entry should be publicly available.
"""
_tag = 'control'
_namespace = APP_NAMESPACE
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
_children['{%s}draft' % APP_NAMESPACE] = ('draft', Draft)
def __init__(self, draft=None, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for app:control"""
self.draft = draft
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def ControlFromString(xml_string):
return CreateClassFromXMLString(Control, xml_string)
class Date(AtomBase):
"""A parent class for atom:updated, published, etc."""
#TODO Add text to and from time conversion methods to allow users to set
# the contents of a Date to a python DateTime object.
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class Updated(Date):
"""The atom:updated element."""
_tag = 'updated'
_namespace = ATOM_NAMESPACE
_children = Date._children.copy()
_attributes = Date._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Updated
Args:
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def UpdatedFromString(xml_string):
return CreateClassFromXMLString(Updated, xml_string)
class Published(Date):
"""The atom:published element."""
_tag = 'published'
_namespace = ATOM_NAMESPACE
_children = Date._children.copy()
_attributes = Date._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Published
Args:
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def PublishedFromString(xml_string):
return CreateClassFromXMLString(Published, xml_string)
class LinkFinder(object):
"""An "interface" providing methods to find link elements
Entry elements often contain multiple links which differ in the rel
attribute or content type. Often, developers are interested in a specific
type of link so this class provides methods to find specific classes of
links.
This class is used as a mixin in Atom entries and feeds.
"""
def GetSelfLink(self):
"""Find the first link with rel set to 'self'
Returns:
An atom.Link or none if none of the links had rel equal to 'self'
"""
for a_link in self.link:
if a_link.rel == 'self':
return a_link
return None
def GetEditLink(self):
for a_link in self.link:
if a_link.rel == 'edit':
return a_link
return None
def GetEditMediaLink(self):
for a_link in self.link:
if a_link.rel == 'edit-media':
return a_link
return None
def GetNextLink(self):
for a_link in self.link:
if a_link.rel == 'next':
return a_link
return None
def GetLicenseLink(self):
for a_link in self.link:
if a_link.rel == 'license':
return a_link
return None
def GetAlternateLink(self):
for a_link in self.link:
if a_link.rel == 'alternate':
return a_link
return None
class FeedEntryParent(AtomBase, LinkFinder):
"""A super class for atom:feed and entry, contains shared attributes"""
_children = AtomBase._children.copy()
_attributes = AtomBase._attributes.copy()
_children['{%s}author' % ATOM_NAMESPACE] = ('author', [Author])
_children['{%s}category' % ATOM_NAMESPACE] = ('category', [Category])
_children['{%s}contributor' % ATOM_NAMESPACE] = ('contributor', [Contributor])
_children['{%s}id' % ATOM_NAMESPACE] = ('id', Id)
_children['{%s}link' % ATOM_NAMESPACE] = ('link', [Link])
_children['{%s}rights' % ATOM_NAMESPACE] = ('rights', Rights)
_children['{%s}title' % ATOM_NAMESPACE] = ('title', Title)
_children['{%s}updated' % ATOM_NAMESPACE] = ('updated', Updated)
def __init__(self, author=None, category=None, contributor=None,
atom_id=None, link=None, rights=None, title=None, updated=None,
text=None, extension_elements=None, extension_attributes=None):
self.author = author or []
self.category = category or []
self.contributor = contributor or []
self.id = atom_id
self.link = link or []
self.rights = rights
self.title = title
self.updated = updated
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class Source(FeedEntryParent):
"""The atom:source element"""
_tag = 'source'
_namespace = ATOM_NAMESPACE
_children = FeedEntryParent._children.copy()
_attributes = FeedEntryParent._attributes.copy()
_children['{%s}generator' % ATOM_NAMESPACE] = ('generator', Generator)
_children['{%s}icon' % ATOM_NAMESPACE] = ('icon', Icon)
_children['{%s}logo' % ATOM_NAMESPACE] = ('logo', Logo)
_children['{%s}subtitle' % ATOM_NAMESPACE] = ('subtitle', Subtitle)
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None, text=None,
extension_elements=None, extension_attributes=None):
"""Constructor for Source
Args:
author: list (optional) A list of Author instances which belong to this
class.
category: list (optional) A list of Category instances
contributor: list (optional) A list on Contributor instances
generator: Generator (optional)
icon: Icon (optional)
id: Id (optional) The entry's Id element
link: list (optional) A list of Link instances
logo: Logo (optional)
rights: Rights (optional) The entry's Rights element
subtitle: Subtitle (optional) The entry's subtitle element
title: Title (optional) the entry's title element
updated: Updated (optional) the entry's updated element
text: String (optional) The text contents of the element. This is the
contents of the Entry's XML text node.
(Example: <foo>This is the text</foo>)
extension_elements: list (optional) A list of ExtensionElement instances
which are children of this element.
extension_attributes: dict (optional) A dictionary of strings which are
the values for additional XML attributes of this element.
"""
self.author = author or []
self.category = category or []
self.contributor = contributor or []
self.generator = generator
self.icon = icon
self.id = atom_id
self.link = link or []
self.logo = logo
self.rights = rights
self.subtitle = subtitle
self.title = title
self.updated = updated
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def SourceFromString(xml_string):
return CreateClassFromXMLString(Source, xml_string)
class Entry(FeedEntryParent):
"""The atom:entry element"""
_tag = 'entry'
_namespace = ATOM_NAMESPACE
_children = FeedEntryParent._children.copy()
_attributes = FeedEntryParent._attributes.copy()
_children['{%s}content' % ATOM_NAMESPACE] = ('content', Content)
_children['{%s}published' % ATOM_NAMESPACE] = ('published', Published)
_children['{%s}source' % ATOM_NAMESPACE] = ('source', Source)
_children['{%s}summary' % ATOM_NAMESPACE] = ('summary', Summary)
_children['{%s}control' % APP_NAMESPACE] = ('control', Control)
def __init__(self, author=None, category=None, content=None,
contributor=None, atom_id=None, link=None, published=None, rights=None,
source=None, summary=None, control=None, title=None, updated=None,
extension_elements=None, extension_attributes=None, text=None):
"""Constructor for atom:entry
Args:
author: list A list of Author instances which belong to this class.
category: list A list of Category instances
content: Content The entry's Content
contributor: list A list on Contributor instances
id: Id The entry's Id element
link: list A list of Link instances
published: Published The entry's Published element
rights: Rights The entry's Rights element
source: Source the entry's source element
summary: Summary the entry's summary element
title: Title the entry's title element
updated: Updated the entry's updated element
control: The entry's app:control element which can be used to mark an
entry as a draft which should not be publicly viewable.
text: String The text contents of the element. This is the contents
of the Entry's XML text node. (Example: <foo>This is the text</foo>)
extension_elements: list A list of ExtensionElement instances which are
children of this element.
extension_attributes: dict A dictionary of strings which are the values
for additional XML attributes of this element.
"""
self.author = author or []
self.category = category or []
self.content = content
self.contributor = contributor or []
self.id = atom_id
self.link = link or []
self.published = published
self.rights = rights
self.source = source
self.summary = summary
self.title = title
self.updated = updated
self.control = control
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
__init__ = v1_deprecated('Please use atom.data.Entry instead.')(__init__)
def EntryFromString(xml_string):
return CreateClassFromXMLString(Entry, xml_string)
class Feed(Source):
"""The atom:feed element"""
_tag = 'feed'
_namespace = ATOM_NAMESPACE
_children = Source._children.copy()
_attributes = Source._attributes.copy()
_children['{%s}entry' % ATOM_NAMESPACE] = ('entry', [Entry])
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None, entry=None,
text=None, extension_elements=None, extension_attributes=None):
"""Constructor for Source
Args:
author: list (optional) A list of Author instances which belong to this
class.
category: list (optional) A list of Category instances
contributor: list (optional) A list on Contributor instances
generator: Generator (optional)
icon: Icon (optional)
id: Id (optional) The entry's Id element
link: list (optional) A list of Link instances
logo: Logo (optional)
rights: Rights (optional) The entry's Rights element
subtitle: Subtitle (optional) The entry's subtitle element
title: Title (optional) the entry's title element
updated: Updated (optional) the entry's updated element
entry: list (optional) A list of the Entry instances contained in the
feed.
text: String (optional) The text contents of the element. This is the
contents of the Entry's XML text node.
(Example: <foo>This is the text</foo>)
extension_elements: list (optional) A list of ExtensionElement instances
which are children of this element.
extension_attributes: dict (optional) A dictionary of strings which are
the values for additional XML attributes of this element.
"""
self.author = author or []
self.category = category or []
self.contributor = contributor or []
self.generator = generator
self.icon = icon
self.id = atom_id
self.link = link or []
self.logo = logo
self.rights = rights
self.subtitle = subtitle
self.title = title
self.updated = updated
self.entry = entry or []
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
__init__ = v1_deprecated('Please use atom.data.Feed instead.')(__init__)
def FeedFromString(xml_string):
return CreateClassFromXMLString(Feed, xml_string)
class ExtensionElement(object):
"""Represents extra XML elements contained in Atom classes."""
def __init__(self, tag, namespace=None, attributes=None,
children=None, text=None):
"""Constructor for EtensionElement
Args:
namespace: string (optional) The XML namespace for this element.
tag: string (optional) The tag (without the namespace qualifier) for
this element. To reconstruct the full qualified name of the element,
combine this tag with the namespace.
attributes: dict (optinal) The attribute value string pairs for the XML
attributes of this element.
children: list (optional) A list of ExtensionElements which represent
the XML child nodes of this element.
"""
self.namespace = namespace
self.tag = tag
self.attributes = attributes or {}
self.children = children or []
self.text = text
def ToString(self):
element_tree = self._TransferToElementTree(ElementTree.Element('tag__'))
return ElementTree.tostring(element_tree, encoding=str)
def _TransferToElementTree(self, element_tree):
if self.tag is None:
return None
if self.namespace is not None:
element_tree.tag = '{%s}%s' % (self.namespace, self.tag)
else:
element_tree.tag = self.tag
for key, value in self.attributes.items():
element_tree.attrib[key] = value
for child in self.children:
child._BecomeChildElement(element_tree)
element_tree.text = self.text
return element_tree
def _BecomeChildElement(self, element_tree):
"""Converts this object into an etree element and adds it as a child node.
Adds self to the ElementTree. This method is required to avoid verbose XML
which constantly redefines the namespace.
Args:
element_tree: ElementTree._Element The element to which this object's XML
will be added.
"""
new_element = ElementTree.Element('tag__') # uh, uhm... empty tag name - sorry google, this is bogus?
element_tree.append(new_element)
self._TransferToElementTree(new_element)
def FindChildren(self, tag=None, namespace=None):
"""Searches child nodes for objects with the desired tag/namespace.
Returns a list of extension elements within this object whose tag
and/or namespace match those passed in. To find all children in
a particular namespace, specify the namespace but not the tag name.
If you specify only the tag, the result list may contain extension
elements in multiple namespaces.
Args:
tag: str (optional) The desired tag
namespace: str (optional) The desired namespace
Returns:
A list of elements whose tag and/or namespace match the parameters
values
"""
results = []
if tag and namespace:
for element in self.children:
if element.tag == tag and element.namespace == namespace:
results.append(element)
elif tag and not namespace:
for element in self.children:
if element.tag == tag:
results.append(element)
elif namespace and not tag:
for element in self.children:
if element.namespace == namespace:
results.append(element)
else:
for element in self.children:
results.append(element)
return results
def ExtensionElementFromString(xml_string):
element_tree = ElementTree.fromstring(xml_string)
return _ExtensionElementFromElementTree(element_tree)
def _ExtensionElementFromElementTree(element_tree):
element_tag = element_tree.tag
if '}' in element_tag:
namespace = element_tag[1:element_tag.index('}')]
tag = element_tag[element_tag.index('}')+1:]
else:
namespace = None
tag = element_tag
extension = ExtensionElement(namespace=namespace, tag=tag)
for key, value in element_tree.attrib.items():
extension.attributes[key] = value
for child in element_tree:
extension.children.append(_ExtensionElementFromElementTree(child))
extension.text = element_tree.text
return extension
def deprecated(warning=None):
"""Decorator to raise warning each time the function is called.
Args:
warning: The warning message to be displayed as a string (optinoal).
"""
warning = warning or ''
# This closure is what is returned from the deprecated function.
def mark_deprecated(f):
# The deprecated_function wraps the actual call to f.
def deprecated_function(*args, **kwargs):
warnings.warn(warning, DeprecationWarning, stacklevel=2)
return f(*args, **kwargs)
# Preserve the original name to avoid masking all decorated functions as
# 'deprecated_function'
deprecated_function.__name__ = f.__name__
return deprecated_function
return mark_deprecated
| 32.986169 | 105 | 0.711204 |
acf810dda9617e848003e1af532268730d8a6d3f | 511 | py | Python | solutions/020_solution_01.py | UFResearchComputing/py4ai | db7f80614f26274ec18556d56ea9f549c463165a | [
"CC-BY-4.0"
] | null | null | null | solutions/020_solution_01.py | UFResearchComputing/py4ai | db7f80614f26274ec18556d56ea9f549c463165a | [
"CC-BY-4.0"
] | null | null | null | solutions/020_solution_01.py | UFResearchComputing/py4ai | db7f80614f26274ec18556d56ea9f549c463165a | [
"CC-BY-4.0"
] | 1 | 2021-04-27T09:50:54.000Z | 2021-04-27T09:50:54.000Z | # Command # Value of x # Value of y # Value of swap #
x = 1.0 # 1.0 # not defined # not defined #
y = 3.0 # 1.0 # 3.0 # not defined #
swap = x # 1.0 # 3.0 # 1.0 #
x = y # 3.0 # 3.0 # 1.0 #
y = swap # 3.0 # 1.0 # 1.0 #
# These three lines exchange the values in `x` and `y` using the `swap`
# Variable for temporary storage. This is a fairly common programming idiom. | 56.777778 | 76 | 0.428571 |
acf8120d666c6b03b9486a91d44e051d277b56c6 | 2,312 | py | Python | SmartAnno/test/TestRBDocumentClassifier.py | jianlins/SmartAnno | e0925c45853967683aa2955588437bd120fe39c3 | [
"Apache-2.0"
] | null | null | null | SmartAnno/test/TestRBDocumentClassifier.py | jianlins/SmartAnno | e0925c45853967683aa2955588437bd120fe39c3 | [
"Apache-2.0"
] | null | null | null | SmartAnno/test/TestRBDocumentClassifier.py | jianlins/SmartAnno | e0925c45853967683aa2955588437bd120fe39c3 | [
"Apache-2.0"
] | null | null | null | from SmartAnno.utils.ConfigReader import ConfigReader
from SmartAnno.models.rulebased.RBDocumentClassifier import RBDocumentClassifierFactory
ConfigReader()
filters={'TypeA': ['patient'], 'TypeB': ['family'], 'Irrelevant': []}
rb_classifier = RBDocumentClassifierFactory.genDocumentClassifier(filters)
print(rb_classifier.classify('The Patient is ok'))
txt='''Record date: 2126-11-07\n\n\n\n\tCARDIOLOGY\n\n\t\n\n\tFAMILY HEALTH CLINIC\n\n\t\n\n\n\n\tInterval History:\n\n Dr. Devan Chandler\n\n100 CRP\n\n\n\nRE: Bruce Corona\n\nFHC Unit #: 795-76-17\n\n\n\nDear Dunham:\n\n\n\nI had the pleasure of seeing Bruce Corona in the Cardiology Department office for a f/u visit. Since I last saw him, he continues to complain of dyspnea. An ETT was negative for ischemia. PFTs were not really useful. CT of the chest showed scarring/fibrosis. His NT-proBNP has been on the marginal side, though he is without evidence for overt CHF.\n\n\n\nMedications: Aspirin 325 mg qd, Flomax 0.4 mg qd, Lopressor 25 mg bid, Lipitor 10 mg qd, Lisinopril 20 mg qd, Colace 100 mg tid.\n\n\n\nPhysical examination: Reveals him to be well appearing. His BP is 120/70 and his heart rate is 60 and regular. He is 170 pounds. There is no jugular venous distention and carotid pulses are 2+ bilaterally without bruits. His lungs are clear throughout, and notably demonstrate only very slight dullness at the left base. His sternotomy is healing well and is stable. His apical impulse is non-displaced with a slightly irregular rate and rhythm, a normal S1 and S2. He has an S3 gallop. No murmur. His abdomen is benign without hepatosplenomegaly, bruits, or a dilated aorta. There is no pedal edema and posterior tibial pulses are 2+ bilaterally.\n\n\n\nEKG: NSR with a 1st degree AV block. He has a LBBB, which is chronic.\n\n\n\nImpression:\n\n\n\n1.CAD, s/p MI: currently stable.\n\n\n\n2. Hypertension: under good control.\n\n\n\n3.Hypercholesterolemia: controlled\n\n\n\n4. Dyspnea: I suspect he has an element of diastolic dysfunction. I will restart low-dose lasix.\n\n\n\nThank you very much for the opportunity to participate in his care.\n\n\n\nWith best regards,\n\n\n\nBruce D. Brian, Jr., M.D.\n\n\n\n\tSigned electronically by Bruce D Brian MD on Nov 7, 2126'''
print(rb_classifier.classify(txt)) | 231.2 | 1,921 | 0.764706 |
acf814ddcb60ab327de1130d7da10aa577e9fc44 | 1,511 | py | Python | src/sqlfluff/rules/L014.py | jaypark72/sqlfluff | 636bf5e09d9b42638a1f44119a02010e78ea21a3 | [
"MIT"
] | 2 | 2021-08-04T08:58:33.000Z | 2021-08-04T18:54:06.000Z | src/sqlfluff/rules/L014.py | jaypark72/sqlfluff | 636bf5e09d9b42638a1f44119a02010e78ea21a3 | [
"MIT"
] | null | null | null | src/sqlfluff/rules/L014.py | jaypark72/sqlfluff | 636bf5e09d9b42638a1f44119a02010e78ea21a3 | [
"MIT"
] | 1 | 2021-07-03T12:56:56.000Z | 2021-07-03T12:56:56.000Z | """Implementation of Rule L014."""
from typing import Tuple, List
from sqlfluff.core.rules.base import LintResult
from sqlfluff.core.rules.doc_decorators import (
document_configuration,
document_fix_compatible,
)
from sqlfluff.rules.L010 import Rule_L010
def unquoted_ids_policy_applicable(policy, parent_stack):
"""Does `unquoted_identifiers_policy` apply to this segment?"""
if policy == "all":
return True
is_alias = parent_stack and parent_stack[-1].is_type(
"alias_expression", "column_definition", "with_compound_statement"
)
if policy == "aliases" and is_alias:
return True
is_inside_from = any(p.is_type("from_clause") for p in parent_stack)
if policy == "column_aliases" and is_alias and not is_inside_from:
return True
return False
@document_configuration
@document_fix_compatible
class Rule_L014(Rule_L010):
"""Inconsistent capitalisation of unquoted identifiers.
The functionality for this rule is inherited from :obj:`Rule_L010`.
"""
_target_elems: List[Tuple[str, str]] = [("name", "naked_identifier")]
config_keywords = ["extended_capitalisation_policy", "unquoted_identifiers_policy"]
def _eval(self, segment, memory, parent_stack, **kwargs):
if unquoted_ids_policy_applicable(
self.unquoted_identifiers_policy, parent_stack
):
return super()._eval(segment, memory, parent_stack, **kwargs)
else:
return LintResult(memory=memory)
| 32.847826 | 87 | 0.716082 |
acf815cea8f3f6843f4140cabaa5863552e28c6f | 13,873 | py | Python | scripts/word_embedding_evaluation/word_embedding_evaluation.py | zhiheng-huang/gluon-nlp | 302b0bb64301a2a94af07fb83dbf2e600a421c17 | [
"Apache-2.0"
] | null | null | null | scripts/word_embedding_evaluation/word_embedding_evaluation.py | zhiheng-huang/gluon-nlp | 302b0bb64301a2a94af07fb83dbf2e600a421c17 | [
"Apache-2.0"
] | null | null | null | scripts/word_embedding_evaluation/word_embedding_evaluation.py | zhiheng-huang/gluon-nlp | 302b0bb64301a2a94af07fb83dbf2e600a421c17 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=eval-used, logging-too-many-args
"""Word Embeddings
===============
This example shows how to load and perform intrinsic evaluation of word
embeddings using a variety of datasets all part of the Gluon NLP Toolkit.
"""
import argparse
import itertools
import logging
import sys
import json
import mxnet as mx
import numpy as np
import gluonnlp as nlp
try:
import progressbar
except ImportError:
logging.warning(
'progressbar not installed. '
' Install via `pip install progressbar2` for better usability.')
progressbar = None
try:
from scipy import stats
except ImportError:
stats = None
def get_args():
"""Construct the argument parser."""
parser = argparse.ArgumentParser(
description='Word embedding training with Gluon.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Embeddings arguments
group = parser.add_argument_group('Embedding arguments')
group.add_argument('--embedding-name', type=str, default='fasttext',
help=('Name of embedding type to load. '
'Valid entries: {}'.format(
', '.join(
nlp.embedding.list_sources().keys()))))
group.add_argument('--embedding-source', type=str, default='wiki.simple',
help=('Source from which to initialize the embedding.'
'Pass --list-embedding-sources to get a list of '
'valid sources for a given --embedding-name.'))
group.add_argument('--list-embedding-sources', action='store_true')
# Evaluation arguments
group = parser.add_argument_group('Evaluation arguments')
group.add_argument('--ignore-oov', action='store_true',
help='Drop OOV words from evaluation datasets.')
## Datasets
group.add_argument(
'--similarity-datasets', type=str,
default=nlp.data.word_embedding_evaluation.word_similarity_datasets,
nargs='*',
help='Word similarity datasets to use for intrinsic evaluation.')
group.add_argument(
'--similarity-functions', type=str,
default=nlp.embedding.evaluation.list_evaluation_functions(
'similarity'), nargs='+',
help='Word similarity functions to use for intrinsic evaluation.')
group.add_argument(
'--analogy-datasets', type=str,
default=nlp.data.word_embedding_evaluation.word_analogy_datasets,
nargs='*',
help='Word similarity datasets to use for intrinsic evaluation.')
group.add_argument(
'--analogy-functions', type=str,
default=nlp.embedding.evaluation.list_evaluation_functions('analogy'),
nargs='+',
help='Word analogy functions to use for intrinsic evaluation. ')
## Analogy evaluation specific arguments
group.add_argument(
'--analogy-dont-exclude-question-words', action='store_true',
help=('Exclude input words from valid output analogies.'
'The performance of word embeddings on the analogy task '
'is around 0% accuracy if input words are not excluded.'))
group.add_argument(
'--analogy-max-vocab', type=int, default=None,
help=('Only retain the X first tokens from the pretrained embedding. '
'The tokens are ordererd by decreasing frequency.'
'As the analogy task takes the whole vocabulary into account, '
'removing very infrequent words improves performance.'))
# Computation options
group = parser.add_argument_group('Computation arguments')
group.add_argument('--batch-size', type=int, default=32,
help='Batch size to use on analogy task.'
'Decrease batch size if evaluation crashes.')
group.add_argument('--gpu', type=int,
help=('Number (index) of GPU to run on, e.g. 0. '
'If not specified, uses CPU.'))
group.add_argument('--dont-hybridize', action='store_true',
help='Disable hybridization of gluon HybridBlocks.')
# Logging options
group = parser.add_argument_group('Logging arguments')
group.add_argument(
'--log', type=str, default='results.csv', help='Path to logfile.'
'Results of evaluation runs are written to there in a CSV format.')
args = parser.parse_args()
return args
###############################################################################
# Parse arguments
###############################################################################
def validate_args(args):
"""Validate provided arguments and act on --help."""
if args.list_embedding_sources:
print('Listing all sources for {} embeddings.'.format(
args.embedding_name))
print('Specify --embedding-name if you wish to '
'list sources of other embeddings')
print('')
if args.embedding_name not in nlp.embedding.list_sources().keys():
print('Invalid embedding name.')
print('Only {} are supported.'.format(', '.join(
nlp.embedding.list_sources().keys())))
sys.exit(1)
print(' '.join(nlp.embedding.list_sources()[args.embedding_name]))
sys.exit(0)
print(args)
# Check correctness of similarity dataset names
for dataset_name in args.similarity_datasets:
if dataset_name.lower() not in map(
str.lower,
nlp.data.word_embedding_evaluation.word_similarity_datasets):
print('{} is not a supported dataset.'.format(dataset_name))
sys.exit(1)
# Check correctness of analogy dataset names
for dataset_name in args.analogy_datasets:
if dataset_name.lower() not in map(
str.lower,
nlp.data.word_embedding_evaluation.word_analogy_datasets):
print('{} is not a supported dataset.'.format(dataset_name))
sys.exit(1)
def get_context(args):
if args.gpu is None or args.gpu == '':
context = mx.cpu()
else:
context = mx.gpu(int(args.gpu))
return context
###############################################################################
# Evaluation
###############################################################################
def log_result(args, evaluation_type, dataset, kwargs, evaluation, value,
num_samples):
if not args.log:
return
with open(args.log, 'a') as f:
f.write('\t'.join(
(evaluation_type, dataset, kwargs, args.embedding_name,
args.embedding_source, evaluation, value, num_samples)))
f.write('\n')
###############################################################################
# Evaluation
###############################################################################
def evaluate_similarity(args, token_embedding, dataset,
similarity_function='CosineSimilarity'):
"""Evaluation on similarity task."""
# Closed vocabulary: Only need the words occuring in the dataset
counter = nlp.data.utils.Counter(w for wpair in dataset for w in wpair[:2])
vocab = nlp.vocab.Vocab(counter)
vocab.set_embedding(token_embedding)
if args.ignore_oov:
initial_length = len(dataset)
dataset = [d for d in dataset if d[0] in vocab and d[1] in vocab]
num_dropped = initial_length - len(dataset)
if num_dropped:
logging.warning('Dropped %s pairs from %s as the were OOV.',
num_dropped, dataset.__class__.__name__)
dataset_coded = [[vocab[d[0]], vocab[d[1]], d[2]] for d in dataset]
words1, words2, scores = zip(*dataset_coded)
evaluator = nlp.embedding.evaluation.WordEmbeddingSimilarity(
idx_to_vec=vocab.embedding.idx_to_vec,
similarity_function=similarity_function)
context = get_context(args)
evaluator.initialize(ctx=context)
if not args.dont_hybridize:
evaluator.hybridize()
pred_similarity = evaluator(
mx.nd.array(words1, ctx=context), mx.nd.array(words2, ctx=context))
sr = stats.spearmanr(pred_similarity.asnumpy(), np.array(scores))
logging.info('Spearman rank correlation on %s: %s',
dataset.__class__.__name__, sr.correlation)
return sr.correlation, len(dataset)
def evaluate_analogy(args, token_embedding, dataset,
analogy_function='ThreeCosMul'):
"""Evaluation on analogy task."""
# Open vocabulary: Use all known words
if args.analogy_max_vocab:
counter = nlp.data.Counter(token_embedding.idx_to_token[:args.analogy_max_vocab])
else:
counter = nlp.data.Counter(token_embedding.idx_to_token)
vocab = nlp.vocab.Vocab(counter)
vocab.set_embedding(token_embedding)
if args.ignore_oov:
initial_length = len(dataset)
dataset = [
d for d in dataset if d[0] in vocab and d[1] in vocab
and d[2] in vocab and d[3] in vocab
]
num_dropped = initial_length - len(dataset)
if num_dropped:
logging.warning('Dropped %s pairs from %s as the were OOV.',
num_dropped, dataset.__class__.__name__)
dataset_coded = [[vocab[d[0]], vocab[d[1]], vocab[d[2]], vocab[d[3]]]
for d in dataset]
dataset_coded_batched = mx.gluon.data.DataLoader(
dataset_coded, batch_size=args.batch_size)
exclude_question_words = not args.analogy_dont_exclude_question_words
evaluator = nlp.embedding.evaluation.WordEmbeddingAnalogy(
idx_to_vec=vocab.embedding.idx_to_vec,
exclude_question_words=exclude_question_words,
analogy_function=analogy_function)
context = get_context(args)
evaluator.initialize(ctx=context)
if not args.dont_hybridize:
evaluator.hybridize()
acc = mx.metric.Accuracy()
if progressbar is not None:
dataset_coded_batched = progressbar.progressbar(dataset_coded_batched)
for batch in dataset_coded_batched:
batch = batch.as_in_context(context)
words1, words2, words3, words4 = (batch[:, 0], batch[:, 1],
batch[:, 2], batch[:, 3])
pred_idxs = evaluator(words1, words2, words3)
acc.update(pred_idxs[:, 0], words4.astype(np.float32))
logging.info('Accuracy on %s: %s', dataset.__class__.__name__,
acc.get()[1])
return acc.get()[1], len(dataset)
def evaluate(args):
"""Main evaluation function."""
# Load pretrained embeddings
print('Loading embedding ', args.embedding_name, ' from ',
args.embedding_source)
token_embedding = nlp.embedding.create(args.embedding_name,
source=args.embedding_source)
# Similarity based evaluation
for dataset_name in args.similarity_datasets:
if stats is None:
raise RuntimeError(
'Similarity evaluation requires scipy.'
'You may install scipy via `pip install scipy`.')
logging.info('Starting evaluation of %s', dataset_name)
parameters = nlp.data.list_datasets(dataset_name)
for key_values in itertools.product(*parameters.values()):
kwargs = dict(zip(parameters.keys(), key_values))
logging.info('Evaluating with %s', kwargs)
dataset = nlp.data.create(dataset_name, **kwargs)
for similarity_function in args.similarity_functions:
logging.info('Evaluating with %s', similarity_function)
result, num_samples = evaluate_similarity(
args, token_embedding, dataset, similarity_function)
log_result(args, 'similarity', dataset.__class__.__name__,
json.dumps(kwargs), similarity_function,
str(result), str(num_samples))
# Analogy based evaluation
for dataset_name in args.analogy_datasets:
logging.info('Starting evaluation of %s', dataset_name)
parameters = nlp.data.list_datasets(dataset_name)
for key_values in itertools.product(*parameters.values()):
kwargs = dict(zip(parameters.keys(), key_values))
logging.info('Evaluating with %s', kwargs)
dataset = nlp.data.create(dataset_name, **kwargs)
for analogy_function in args.analogy_functions:
logging.info('Evaluating with %s', analogy_function)
result, num_samples = evaluate_analogy(
args, token_embedding, dataset, analogy_function)
log_result(args, 'analogy', dataset.__class__.__name__,
json.dumps(kwargs), analogy_function, str(result),
str(num_samples))
if __name__ == '__main__':
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
args_ = get_args()
validate_args(args_)
evaluate(args_)
| 41.044379 | 89 | 0.622504 |
acf816aa61a06f02894f3b140b475906482e90f8 | 866 | py | Python | optking/tests/test_linesearch.py | psi-rking/optking | 6f113db58e733b6a56929a2b890f9dae0092995c | [
"BSD-3-Clause"
] | 12 | 2018-02-06T22:02:12.000Z | 2022-01-06T09:26:44.000Z | optking/tests/test_linesearch.py | psi-rking/optking | 6f113db58e733b6a56929a2b890f9dae0092995c | [
"BSD-3-Clause"
] | 51 | 2017-11-22T16:00:02.000Z | 2021-12-23T20:49:56.000Z | optking/tests/test_linesearch.py | psi-rking/optking | 6f113db58e733b6a56929a2b890f9dae0092995c | [
"BSD-3-Clause"
] | 9 | 2017-11-21T19:55:46.000Z | 2022-02-28T06:09:11.000Z | #! Linesearch tests
# memory 8gb
refnucenergy = 41.670589 # Eh
refenergy = -1053.880393 # Eh
import optking
import psi4
def test_linesearch():
Ar2 = psi4.geometry(
"""
Ar
Ar 1 5.0
"""
)
psi4.core.clean_options()
psi4_options = {"basis": "cc-pvdz", "d_convergence": 10, "geom_maxiter": 20, "g_convergence": "gau_tight"}
psi4.set_options(psi4_options)
# "linesearch" is not currrently recognized by psi4 read_options.
json_output = optking.optimize_psi4("mp2", **{"step_type": "linesearch"})
print(json_output)
E = json_output["energies"][-1]
nucenergy = json_output["trajectory"][-1]["properties"]["nuclear_repulsion_energy"]
assert psi4.compare_values(nucenergy, nucenergy, 3, "Nuclear repulsion energy") # TEST
assert psi4.compare_values(refenergy, E, 1, "Reference energy") # TEST
| 27.935484 | 110 | 0.67321 |
acf816fc775690439c7da0f819c2b0820945ae23 | 5,315 | py | Python | ms1searchpy/ms1todiffacto.py | markmipt/ms1searchpy | 1fae3ba9ca25ac151b34110d333820f0a063ee11 | [
"Apache-2.0"
] | 6 | 2020-01-28T12:29:02.000Z | 2022-02-01T14:43:44.000Z | ms1searchpy/ms1todiffacto.py | markmipt/ms1searchpy | 1fae3ba9ca25ac151b34110d333820f0a063ee11 | [
"Apache-2.0"
] | 3 | 2021-07-30T01:28:05.000Z | 2021-11-25T09:14:31.000Z | ms1searchpy/ms1todiffacto.py | markmipt/ms1searchpy | 1fae3ba9ca25ac151b34110d333820f0a063ee11 | [
"Apache-2.0"
] | 2 | 2020-07-23T10:01:10.000Z | 2021-05-04T12:46:04.000Z | from __future__ import division
import argparse
import pkg_resources
import pandas as pd
import ast
import subprocess
import numpy as np
def run():
parser = argparse.ArgumentParser(
description='run diffacto for ms1searchpy results',
epilog='''
Example usage
-------------
$ ms1todiffacto -S1 sample1_1_proteins.tsv sample1_n_proteins.tsv -S2 sample2_1_proteins.tsv sample2_n_proteins.tsv
-------------
''',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-dif', help='path to Diffacto', required=True)
parser.add_argument('-S1', nargs='+', help='input files for S1 sample', required=True)
parser.add_argument('-S2', nargs='+', help='input files for S2 sample', required=True)
parser.add_argument('-S3', nargs='+', help='input files for S3 sample')
parser.add_argument('-S4', nargs='+', help='input files for S4 sample')
parser.add_argument('-S5', nargs='+', help='input files for S5 sample')
parser.add_argument('-S6', nargs='+', help='input files for S6 sample')
parser.add_argument('-S7', nargs='+', help='input files for S7 sample')
parser.add_argument('-S8', nargs='+', help='input files for S8 sample')
parser.add_argument('-S9', nargs='+', help='input files for S9 sample')
parser.add_argument('-S10', nargs='+', help='input files for S10 sample')
parser.add_argument('-S11', nargs='+', help='input files for S11 sample')
parser.add_argument('-S12', nargs='+', help='input files for S12 sample')
parser.add_argument('-peptides', help='name of output peptides file', default='peptides.txt')
parser.add_argument('-samples', help='name of output samples file', default='sample.txt')
parser.add_argument('-allowed_prots', help='path to allowed prots', default='')
parser.add_argument('-out', help='name of diffacto output file', default='diffacto_out.txt')
parser.add_argument('-norm', help='normalization method. Can be average, median, GMM or None', default='None')
parser.add_argument('-impute_threshold', help='impute_threshold for missing values fraction', default='0.75')
parser.add_argument('-min_samples', help='minimum number of samples for peptide usage', default='3')
args = vars(parser.parse_args())
replace_label = '_proteins.tsv'
df_final = False
allowed_prots = set()
allowed_peptides = set()
if not args['allowed_prots']:
for i in range(1, 13, 1):
sample_num = 'S%d' % (i, )
if args[sample_num]:
for z in args[sample_num]:
df0 = pd.read_table(z)
allowed_prots.update(df0['dbname'])
else:
for prot in open(args['allowed_prots'], 'r'):
allowed_prots.add(prot.strip())
for i in range(1, 13, 1):
sample_num = 'S%d' % (i, )
if args[sample_num]:
for z in args[sample_num]:
label = z.replace(replace_label, '')
df1 = pd.read_table(z)
df3 = pd.read_table(z.replace(replace_label, '_PFMs.tsv'))
print(z)
print(z.replace(replace_label, '_PFMs.tsv'))
print(df3.shape)
print(df3.columns)
df3 = df3[df3['proteins'].apply(lambda x: any(z in allowed_prots for z in x.split(';')))]
df3['proteins'] = df3['proteins'].apply(lambda x: ';'.join([z for z in x.split(';') if z in allowed_prots]))
df3 = df3.sort_values(by='Intensity', ascending=False)
df3 = df3.drop_duplicates(subset='sequence')
df3 = df3.explode('proteins')
df3[label] = df3['Intensity']
df3['protein'] = df3['proteins']
df3['peptide'] = df3['sequence']
df3 = df3[['peptide', 'protein', label]]
if df_final is False:
label_basic = label
df_final = df3.reset_index(drop=True)
else:
df_final = df_final.reset_index(drop=True).merge(df3.reset_index(drop=True), on='peptide', how='outer')
df_final.protein_x.fillna(value=df_final.protein_y, inplace=True)
df_final['protein'] = df_final['protein_x']
df_final = df_final.drop(columns=['protein_x', 'protein_y'])
print(df_final.columns)
df_final = df_final.set_index('peptide')
df_final['proteins'] = df_final['protein']
df_final = df_final.drop(columns=['protein'])
cols = df_final.columns.tolist()
cols.remove('proteins')
cols.insert(0, 'proteins')
df_final = df_final[cols]
df_final.fillna(value='')
df_final.to_csv(args['peptides'], sep=',')
out = open(args['samples'], 'w')
for i in range(1, 13, 1):
sample_num = 'S%d' % (i, )
if args[sample_num]:
for z in args[sample_num]:
label = z.replace(replace_label, '')
out.write(label + '\t' + sample_num + '\n')
out.close()
subprocess.call([args['dif'], '-i', args['peptides'], '-samples', args['samples'], '-out',\
args['out'], '-normalize', args['norm'], '-impute_threshold', args['impute_threshold'], '-min_samples', args['min_samples']])
if __name__ == '__main__':
run()
| 41.850394 | 130 | 0.604892 |
acf817785fff8ff04a27d96f10428e979b73d446 | 25,897 | py | Python | python/ccxt/async_support/bigone.py | eerenyuan/ccxt | 9b9131f6f131a880c2c8e7bfc8eb2b76be8d8f96 | [
"MIT"
] | null | null | null | python/ccxt/async_support/bigone.py | eerenyuan/ccxt | 9b9131f6f131a880c2c8e7bfc8eb2b76be8d8f96 | [
"MIT"
] | 8 | 2018-09-04T05:28:15.000Z | 2018-12-21T08:10:35.000Z | python/ccxt/async_support/bigone.py | eerenyuan/ccxt | 9b9131f6f131a880c2c8e7bfc8eb2b76be8d8f96 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
import math
import json
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import ExchangeNotAvailable
class bigone (Exchange):
def describe(self):
return self.deep_extend(super(bigone, self).describe(), {
'id': 'bigone',
'name': 'BigONE',
'countries': 'GB',
'version': 'v2',
'has': {
'fetchTickers': True,
'fetchOpenOrders': True,
'fetchMyTrades': True,
'fetchDepositAddress': True,
'withdraw': True,
'fetchOHLCV': False,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/42803606-27c2b5ec-89af-11e8-8d15-9c8c245e8b2c.jpg',
'api': {
'public': 'https://big.one/api/v2',
'private': 'https://big.one/api/v2/viewer',
},
'www': 'https://big.one',
'doc': 'https://open.big.one/docs/api.html',
'fees': 'https://help.big.one/hc/en-us/articles/115001933374-BigONE-Fee-Policy',
'referral': 'https://b1.run/users/new?code=D3LLBVFT',
},
'api': {
'public': {
'get': [
'ping', # timestamp in nanoseconds
'markets',
'markets/{symbol}/depth',
'markets/{symbol}/trades',
'markets/{symbol}/ticker',
'orders',
'orders/{id}',
'tickers',
'trades',
],
},
'private': {
'get': [
'accounts',
'orders',
'orders/{order_id}',
],
'post': [
'orders',
'orders/{order_id}/cancel',
'orders/cancel_all',
],
},
},
'fees': {
'trading': {
'maker': 0.1 / 100,
'taker': 0.1 / 100,
},
'funding': {
# HARDCODING IS DEPRECATED THE FEES BELOW ARE TO BE REMOVED SOON
'withdraw': {
'BTC': 0.002,
'ETH': 0.01,
'EOS': 0.01,
'ZEC': 0.002,
'LTC': 0.01,
'QTUM': 0.01,
# 'INK': 0.01 QTUM,
# 'BOT': 0.01 QTUM,
'ETC': 0.01,
'GAS': 0.0,
'BTS': 1.0,
'GXS': 0.1,
'BITCNY': 1.0,
},
},
},
'exceptions': {
'codes': {
'401': AuthenticationError,
},
'detail': {
'Internal server error': ExchangeNotAvailable,
},
},
})
async def fetch_markets(self):
response = await self.publicGetMarkets()
markets = response['data']
result = []
self.options['marketsByUuid'] = {}
for i in range(0, len(markets)):
#
# { uuid: "550b34db-696e-4434-a126-196f827d9172",
# quoteScale: 3,
# quoteAsset: { uuid: "17082d1c-0195-4fb6-8779-2cdbcb9eeb3c",
# symbol: "USDT",
# name: "TetherUS" },
# name: "BTC-USDT",
# baseScale: 5,
# baseAsset: { uuid: "0df9c3c3-255a-46d7-ab82-dedae169fba9",
# symbol: "BTC",
# name: "Bitcoin" } }}
#
market = markets[i]
id = market['name']
uuid = market['uuid']
baseId = market['baseAsset']['symbol']
quoteId = market['quoteAsset']['symbol']
base = self.common_currency_code(baseId)
quote = self.common_currency_code(quoteId)
symbol = base + '/' + quote
precision = {
'amount': market['baseScale'],
'price': market['quoteScale'],
}
entry = {
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': True,
'precision': precision,
'limits': {
'amount': {
'min': math.pow(10, -precision['amount']),
'max': math.pow(10, precision['amount']),
},
'price': {
'min': math.pow(10, -precision['price']),
'max': math.pow(10, precision['price']),
},
'cost': {
'min': None,
'max': None,
},
},
'info': market,
}
self.options['marketsByUuid'][uuid] = entry
result.append(entry)
return result
def parse_ticker(self, ticker, market=None):
#
# [
# {
# "volume": "190.4925000000000000",
# "open": "0.0777371200000000",
# "market_uuid": "38dd30bf-76c2-4777-ae2a-a3222433eef3",
# "market_id": "ETH-BTC",
# "low": "0.0742925600000000",
# "high": "0.0789150000000000",
# "daily_change_perc": "-0.3789180767180466680525339760",
# "daily_change": "-0.0002945600000000",
# "close": "0.0774425600000000", # last price
# "bid": {
# "price": "0.0764777900000000",
# "amount": "6.4248000000000000"
# },
# "ask": {
# "price": "0.0774425600000000",
# "amount": "1.1741000000000000"
# }
# }
# ]
#
if market is None:
marketId = self.safe_string(ticker, 'market_id')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = None
if market is not None:
symbol = market['symbol']
timestamp = self.milliseconds()
close = self.safe_float(ticker, 'close')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': self.safe_float(ticker['bid'], 'price'),
'bidVolume': self.safe_float(ticker['bid'], 'amount'),
'ask': self.safe_float(ticker['ask'], 'price'),
'askVolume': self.safe_float(ticker['ask'], 'amount'),
'vwap': None,
'open': self.safe_float(ticker, 'open'),
'close': close,
'last': close,
'previousClose': None,
'change': self.safe_float(ticker, 'daily_change'),
'percentage': self.safe_float(ticker, 'daily_change_perc'),
'average': None,
'baseVolume': self.safe_float(ticker, 'volume'),
'quoteVolume': None,
'info': ticker,
}
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
response = await self.publicGetMarketsSymbolTicker(self.extend({
'symbol': market['id'],
}, params))
return self.parse_ticker(response['data'], market)
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
response = await self.publicGetTickers(params)
tickers = response['data']
result = {}
for i in range(0, len(tickers)):
ticker = self.parse_ticker(tickers[i])
symbol = ticker['symbol']
result[symbol] = ticker
return result
async def fetch_order_book(self, symbol, params={}):
await self.load_markets()
response = await self.publicGetMarketsSymbolDepth(self.extend({
'symbol': self.market_id(symbol),
}, params))
return self.parse_order_book(response['data'], None, 'bids', 'asks', 'price', 'amount')
def parse_trade(self, trade, market=None):
#
# { node: { taker_side: "ASK",
# price: "0.0694071600000000",
# market_uuid: "38dd30bf-76c2-4777-ae2a-a3222433eef3",
# market_id: "ETH-BTC",
# inserted_at: "2018-07-14T09:22:06Z",
# id: "19913306",
# amount: "0.8800000000000000" },
# cursor: "Y3Vyc29yOnYxOjE5OTEzMzA2" }
#
node = trade['node']
timestamp = self.parse8601(node['inserted_at'])
price = self.safe_float(node, 'price')
amount = self.safe_float(node, 'amount')
if market is None:
marketId = self.safe_string(node, 'market_id')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = None
if market is not None:
symbol = market['symbol']
cost = self.cost_to_precision(symbol, price * amount)
side = node['taker_side'] == 'sell' if 'ASK' else 'buy'
return {
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': self.safe_string(node, 'id'),
'order': None,
'type': 'limit',
'side': side,
'price': price,
'amount': amount,
'cost': float(cost),
'fee': None,
'info': trade,
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['first'] = limit
response = await self.publicGetMarketsSymbolTrades(self.extend(request, params))
#
# {data: {page_info: { start_cursor: "Y3Vyc29yOnYxOjE5OTEzMzA2",
# has_previous_page: True,
# has_next_page: False,
# end_cursor: "Y3Vyc29yOnYxOjIwMDU0NzIw" },
# edges: [{ node: { taker_side: "ASK",
# price: "0.0694071600000000",
# market_uuid: "38dd30bf-76c2-4777-ae2a-a3222433eef3",
# market_id: "ETH-BTC",
# inserted_at: "2018-07-14T09:22:06Z",
# id: "19913306",
# amount: "0.8800000000000000" },
# cursor: "Y3Vyc29yOnYxOjE5OTEzMzA2" },
# { node: { taker_side: "ASK",
# price: "0.0694071600000000",
# market_uuid: "38dd30bf-76c2-4777-ae2a-a3222433eef3",
# market_id: "ETH-BTC",
# inserted_at: "2018-07-14T09:22:07Z",
# id: "19913307",
# amount: "0.3759000000000000" },
# cursor: "Y3Vyc29yOnYxOjE5OTEzMzA3" },
# { node: { taker_side: "ASK",
# price: "0.0694071600000000",
# market_uuid: "38dd30bf-76c2-4777-ae2a-a3222433eef3",
# market_id: "ETH-BTC",
# inserted_at: "2018-07-14T09:22:08Z",
# id: "19913321",
# amount: "0.2197000000000000" },
# cursor: "Y3Vyc29yOnYxOjE5OTEzMzIx" },
#
return self.parse_trades(response['data']['edges'], market, since, limit)
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.privateGetAccounts(params)
#
# {data: [{locked_balance: "0",
# balance: "0",
# asset_uuid: "04479958-d7bb-40e4-b153-48bd63f2f77f",
# asset_id: "NKC" },
# {locked_balance: "0",
# balance: "0",
# asset_uuid: "04c8da0e-44fd-4d71-aeb0-8f4d54a4a907",
# asset_id: "UBTC" },
# {locked_balance: "0",
# balance: "0",
# asset_uuid: "05bc0d34-4809-4a39-a3c8-3a1851c8d224",
# asset_id: "READ" },
#
result = {'info': response}
balances = response['data']
for i in range(0, len(balances)):
balance = balances[i]
currencyId = balance['asset_id']
code = self.common_currency_code(currencyId)
if currencyId in self.currencies_by_id:
code = self.currencies_by_id[currencyId]['code']
total = self.safe_float(balance, 'balance')
used = self.safe_float(balance, 'locked_balance')
free = None
if total is not None and used is not None:
free = total - used
account = {
'free': free,
'used': used,
'total': total,
}
result[code] = account
return self.parse_balance(result)
def parse_order(self, order, market=None):
#
# {
# "id": 10,
# "market_uuid": "d2185614-50c3-4588-b146-b8afe7534da6",
# "market_uuid": "BTC-EOS", # not sure which one is correct
# "market_id": "BTC-EOS", # not sure which one is correct
# "price": "10.00",
# "amount": "10.00",
# "filled_amount": "9.0",
# "avg_deal_price": "12.0",
# "side": "ASK",
# "state": "FILLED"
# }
#
id = self.safe_string(order, 'order_id')
if market is None:
marketId = self.safe_string(order, 'market_id')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
else:
marketUuid = self.safe_string(order, 'market_uuid')
if marketUuid in self.options['marketsByUuid']:
market = self.options['marketsByUuid'][marketUuid]
symbol = None
if market is not None:
symbol = market['symbol']
timestamp = self.parse8601(order['created_at'])
price = self.safe_float(order, 'price')
amount = self.safe_float(order, 'amount')
filled = self.safe_float(order, 'filled_amount')
remaining = max(0, amount - filled)
status = self.parse_order_status(self.safe_string(order, 'state'))
side = self.safe_string(order, 'side')
if side == 'BID':
side = 'buy'
else:
side = 'sell'
return {
'id': id,
'datetime': self.iso8601(timestamp),
'timestamp': timestamp,
'status': status,
'symbol': symbol,
'type': None,
'side': side,
'price': price,
'cost': None,
'amount': amount,
'filled': filled,
'remaining': remaining,
'trades': None,
'fee': None,
'info': order,
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
# NAME DESCRIPTION EXAMPLE REQUIRE
# market_id market uuid d2185614-50c3-4588-b146-b8afe7534da6 True
# side order side one of "ASK"/"BID" True
# price order price string True
# amount order amount string, must larger than 0 True
#
# {
# "id": 10,
# "market_uuid": "BTC-EOS",
# "price": "10.00",
# "amount": "10.00",
# "filled_amount": "9.0",
# "avg_deal_price": "12.0",
# "side": "ASK",
# "state": "FILLED"
# }
#
await self.load_markets()
market = self.market(symbol)
response = await self.privatePostOrders(self.extend({
'order_market': market['id'],
'order_side': (side == 'BID' if 'buy' else 'ASK'),
'amount': self.amount_to_precision(symbol, amount),
'price': self.price_to_precision(symbol, price),
}, params))
return self.parse_order(response, market)
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
request = {'order_id': id}
response = await self.privatePostOrdersOrderIdCancel(self.extend(request, params))
#
# {
# "id": 10,
# "market_uuid": "BTC-EOS",
# "price": "10.00",
# "amount": "10.00",
# "filled_amount": "9.0",
# "avg_deal_price": "12.0",
# "side": "ASK",
# "state": "FILLED"
# }
#
return self.parse_order(response)
async def cancel_all_orders(self, symbol=None, params={}):
await self.load_markets()
response = await self.privatePostOrdersOrderIdCancel(params)
#
# [
# {
# "id": 10,
# "market_uuid": "d2185614-50c3-4588-b146-b8afe7534da6",
# "price": "10.00",
# "amount": "10.00",
# "filled_amount": "9.0",
# "avg_deal_price": "12.0",
# "side": "ASK",
# "state": "FILLED"
# },
# {
# ...
# },
# ]
#
return self.parse_orders(response)
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
request = {'order_id': id}
response = await self.privateGetOrdersOrderId(self.extend(request, params))
#
# {
# "id": 10,
# "market_uuid": "d2185614-50c3-4588-b146-b8afe7534da6",
# "price": "10.00",
# "amount": "10.00",
# "filled_amount": "9.0",
# "avg_deal_price": "12.0",
# "side": "ASK",
# "state": "FILLED"
# }
#
return self.parse_order(response)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
# NAME DESCRIPTION EXAMPLE REQUIRED
# market_id market id ETH-BTC True
# after ask for the server to return orders after the cursor dGVzdGN1cmVzZQo False
# before ask for the server to return orders before the cursor dGVzdGN1cmVzZQo False
# first slicing count 20 False
# last slicing count 20 False
# side order side one of "ASK"/"BID" False
# state order state one of "CANCELED"/"FILLED"/"PENDING" False
if symbol is None:
raise ExchangeError(self.id + ' fetchOrders requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market_id': market['id'],
}
if limit is not None:
request['first'] = limit
response = await self.privateGetOrders(self.extend(request, params))
#
# {
# "edges": [
# {
# "node": {
# "id": 10,
# "market_id": "ETH-BTC",
# "price": "10.00",
# "amount": "10.00",
# "filled_amount": "9.0",
# "avg_deal_price": "12.0",
# "side": "ASK",
# "state": "FILLED"
# },
# "cursor": "dGVzdGN1cmVzZQo="
# }
# ],
# "page_info": {
# "end_cursor": "dGVzdGN1cmVzZQo=",
# "start_cursor": "dGVzdGN1cmVzZQo=",
# "has_next_page": True,
# "has_previous_page": False
# }
# }
#
orders = self.safe_value(response, 'edges', [])
result = []
for i in range(0, len(orders)):
result.append(self.parse_order(orders[i]['node'], market))
return self.filter_by_symbol_since_limit(result, symbol, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
return await self.fetch_orders(symbol, since, limit, self.extend({
'state': 'PENDING',
}, params))
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
return await self.fetch_orders(symbol, since, limit, self.extend({
'state': 'FILLED',
}, params))
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
url = self.urls['api'][api] + '/' + self.implode_params(path, params)
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
nonce = self.nonce() * 1000000000
request = {
'type': 'OpenAPI',
'sub': self.apiKey,
'nonce': nonce,
}
jwt = self.jwt(request, self.secret)
headers = {
'Authorization': 'Bearer ' + jwt,
}
if method == 'GET':
if query:
url += '?' + self.urlencode(query)
elif method == 'POST':
headers['Content-Type'] = 'application/json'
body = self.json(query)
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body):
if not isinstance(body, basestring):
return # fallback to default error handler
if len(body) < 2:
return # fallback to default error handler
if (body[0] == '{') or (body[0] == '['):
response = json.loads(body)
#
# {"errors":{"detail":"Internal server error"}}
#
error = self.safe_value(response, 'error')
errors = self.safe_value(response, 'errors')
data = self.safe_value(response, 'data')
if error is not None or errors is not None or data is None:
feedback = self.id + ' ' + self.json(response)
code = self.safe_integer(error, 'code')
exceptions = self.exceptions['codes']
if errors is not None:
code = self.safe_string(errors, 'detail')
exceptions = self.exceptions['detail']
if code in exceptions:
raise exceptions[code](feedback)
else:
raise ExchangeError(feedback)
| 41.769355 | 126 | 0.4292 |
acf819077252a3467ac91c7a2358c8fe2c5cf639 | 7,213 | py | Python | tests/test_satellite/test_astronomical_quantities.py | JaiWillems/celest | 5074b94feecc39127e5b0b0b1a683f636b725ca4 | [
"BSD-3-Clause"
] | 3 | 2021-12-31T08:17:19.000Z | 2022-03-27T00:36:31.000Z | tests/test_satellite/test_astronomical_quantities.py | JaiWillems/celest | 5074b94feecc39127e5b0b0b1a683f636b725ca4 | [
"BSD-3-Clause"
] | 1 | 2022-02-05T04:05:41.000Z | 2022-02-05T04:05:41.000Z | tests/test_satellite/test_astronomical_quantities.py | JaiWillems/celest | 5074b94feecc39127e5b0b0b1a683f636b725ca4 | [
"BSD-3-Clause"
] | null | null | null |
from celest.satellite._astronomical_quantities import *
from unittest import TestCase
import numpy as np
import unittest
class TestAstronomicalQuantities(TestCase):
def test_nutation_angles(self):
"""Test `AstronomicalQuantities.nutation_angles`.
Notes
-----
Test case is taken from Astronomical Algorithms.[1]_
References
----------
.. [1] Jean Meeus. Astronomical algorithms. 2nd ed. Willmann-Bell,
1998, pp. 148. isbn: 9780943396613.
"""
julian = np.array([2446895.5])
D, M, N, F, O = nutation_angles(julian)
self.assertAlmostEqual(D[0], 136.9623, places=4)
self.assertAlmostEqual(M[0], 94.9792, places=4)
self.assertAlmostEqual(N[0], 229.2784, places=4)
self.assertAlmostEqual(F[0], 143.4079, places=4)
self.assertAlmostEqual(O[0], 11.2531, places=4)
def test_nutation_components(self):
"""Test `AstronomicalQuantities.nutation_components`.
Notes
-----
Test cases are taken from the PHP Science Labs.[1]_
References
----------
.. [1] Jay Tanner. NeoProgrammics - Science Computations. 2021.
url: http://www.neoprogrammics.com/nutations/.
"""
julian = np.array([2449634.5, 2453420.5625, 2477418.211805555555])
true_d_psi = np.array([11.694, -5.993, 2.937])
true_d_epsilon = np.array([-5.946, 8.431, -8.871])
d_psi, d_epsilon = nutation_components(julian)
for i in range(julian.size):
with self.subTest(i=i):
self.assertAlmostEqual(d_psi[i], true_d_psi[i], delta=0.5)
self.assertAlmostEqual(d_epsilon[i], true_d_epsilon[i], delta=0.1)
def test_mean_obliquity(self):
"""Test `AstronomicalQuantities.mean_obliquity`.
Notes
-----
Test cases are taken from PHP Science Labs.[1]_
References
----------
.. [1] Jay Tanner. Obliquity of the Ecliptic - PHP Science Labs. 2021.
url: https://www.neoprogrammics.com/obliquity_of_the_ecliptic/Obliquity_Of_The_Ecliptic_Calculator.php
"""
julian = np.array([2459437.815972222, 2477404.5729166665, 2422327.21875])
true_mean_obliquity = np.array([23.4364767133, 23.4300808752, 23.4496874486])
calc_mean_obliquity = mean_obliquity(julian)
for i in range(julian.size):
with self.subTest(i=i):
self.assertAlmostEqual(calc_mean_obliquity[i], true_mean_obliquity[i], delta=0.0001)
def test_apparent_obliquity(self):
"""Test `AstronomicalQuantities.apparent_obliquity`.
Notes
-----
Test cases are taken from PHP Science Labs.[1]_
References
----------
.. [1] Jay Tanner. Obliquity of the Ecliptic - PHP Science Labs. 2021.
url: https://www.neoprogrammics.com/obliquity_of_the_ecliptic/Obliquity_Of_The_Ecliptic_Calculator.php
"""
julian = np.array([2459437.815972222, 2477404.5729166665, 2422327.21875])
true_apparent_obliquity = np.array([23.4376318857, 23.4276258425, 23.4479812709])
calc_apparent_obliquity = apparent_obliquity(julian)
for i in range(julian.size):
with self.subTest(i=i):
self.assertAlmostEqual(calc_apparent_obliquity[i], true_apparent_obliquity[i], delta=0.0001)
def test_from_julian(self):
"""Test `AstronomicalQuantities.from_julian`.
Notes
-----
Test cases are generated from the `Julian` Python library.
"""
import julian as jd
julian = np.array([2436116.31, 2445246.65, 2456124.09])
year, month, day = from_julian(julian)
for i in range(julian.size):
with self.subTest(i=i):
dt = jd.from_jd(julian[i])
true_day = dt.day + (dt.hour + (dt.minute + (dt.second + dt.microsecond / 100000) / 60) / 60) / 24
true_month = dt.month
true_year = dt.year
self.assertAlmostEqual(day[i], true_day, places=3)
self.assertEqual(month[i], true_month)
self.assertEqual(year[i], true_year)
def test_day_of_year(self):
"""Test `AstronomicalQuantities.day_of_year`.
Notes
-----
Test cases are taken from "Astronomical Algorithms" by Jean Meeus.[1]_
References
----------
.. [1] Jean Meeus. Astronomical algorithms. 2nd ed. Willmann-Bell,
1998, pp. 65. isbn: 9780943396613.
"""
julian = np.array([2443826.5, 2447273.5, 2447273.8, 2447274.4])
day = day_of_year(julian)
true_day = np.array([318, 113, 113, 113])
for i in range(julian.size):
with self.subTest(i=i):
self.assertEqual(day[i], true_day[i])
def test_equation_of_time(self):
"""Test `AstronomicalQuantities.equation_of_time`.
Notes
-----
Test cases are taken from "Astronomical Algorithms" by Jean Meeus,
PLANETCALC, and the Global Monitoring Laboratory.[1]_[2]_[3]_
References
----------
.. [1] Jean Meeus. Astronomical algorithms. 2nd ed. Willmann-Bell,
1998, pp. 185. isbn: 9780943396613.
.. [2] Anton. Online calculator: Equation of time. url:
https://planetcalc.com/9235/.
.. [3] NOAA US Department of Commerce. ESRL Global Monitoring
Laboratory -Global Radiation and Aerosols. url:
https://gml.noaa.gov/grad/solcalc/.
"""
julian = np.array([2455368.75, 2448908.5, 2459448.5])
true_EOT = np.array([-0.42657696, 3.427351, -0.710537])
EOT = equation_of_time(julian)
for i in range(julian.size):
with self.subTest(i=i):
self.assertAlmostEqual(EOT[i], true_EOT[i], delta=0.04)
def test_equation_of_equinoxes(self):
"""Test `AstronomicalQuantities.equation_of_equinoxes`.
Notes
-----
Test cases taken from "Astronomical Algorithms" by Jean Meeus.[1]_
References
----------
.. [1] Jean Meeus. Astronomical algorithms. 2nd ed. Willmann-Bell,
1998, pp. 88. isbn: 9780943396613.
"""
julian = np.array([2446895.5])
true_EOE = np.array([-0.2317])
EOE = equation_of_equinoxes(julian)
for i in range(julian.size-1):
with self.subTest(i=i):
self.assertAlmostEqual(EOE[i], true_EOE[i], delta=0.0001)
def test_sun_right_ascension(self):
"""Test `AstronomicalQuantities.sun_right_ascension`.
Notes
-----
The test case is taken from "Astronomical Algorithms" by Jean
Meeus.[1]_
References
----------
.. [1] Jean Meeus. Astronomical algorithms. 2nd ed. Willmann-Bell,
1998, pp. 185. isbn: 9780943396613.
"""
julian = np.array([2448908.5])
ra = np.array([198.38083])
calc_ra = sun_right_ascension(julian)
self.assertAlmostEqual(ra[0], calc_ra[0], delta=0.001)
if __name__ == "__main__":
unittest.main()
| 32.638009 | 114 | 0.599612 |
acf81910d06c72c034cc0d0bc84df9c637ca88ac | 10,147 | py | Python | mars/dataframe/groupby/apply.py | hxri/mars | f7864f00911883b94800b63856f0e57648d3d9b4 | [
"Apache-2.0"
] | 2,413 | 2018-12-06T09:37:11.000Z | 2022-03-30T15:47:39.000Z | mars/dataframe/groupby/apply.py | hxri/mars | f7864f00911883b94800b63856f0e57648d3d9b4 | [
"Apache-2.0"
] | 1,335 | 2018-12-07T03:06:18.000Z | 2022-03-31T11:45:57.000Z | mars/dataframe/groupby/apply.py | hxri/mars | f7864f00911883b94800b63856f0e57648d3d9b4 | [
"Apache-2.0"
] | 329 | 2018-12-07T03:12:41.000Z | 2022-03-29T21:49:57.000Z | # Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
from ... import opcodes
from ...core import OutputType
from ...core.custom_log import redirect_custom_log
from ...serialization.serializables import TupleField, DictField, FunctionField
from ...utils import enter_current_session, quiet_stdio
from ..operands import DataFrameOperandMixin, DataFrameOperand
from ..utils import build_empty_df, build_empty_series, parse_index, \
validate_output_types, make_dtypes, make_dtype
class GroupByApply(DataFrameOperand, DataFrameOperandMixin):
_op_type_ = opcodes.APPLY
_op_module_ = 'dataframe.groupby'
_func = FunctionField('func')
_args = TupleField('args')
_kwds = DictField('kwds')
def __init__(self, func=None, args=None, kwds=None, output_types=None, **kw):
super().__init__(_func=func, _args=args, _kwds=kwds, _output_types=output_types, **kw)
@property
def func(self):
return self._func
@property
def args(self):
return getattr(self, '_args', None) or ()
@property
def kwds(self):
return getattr(self, '_kwds', None) or dict()
@classmethod
@redirect_custom_log
@enter_current_session
def execute(cls, ctx, op):
in_data = ctx[op.inputs[0].key]
out = op.outputs[0]
if not in_data:
if op.output_types[0] == OutputType.dataframe:
ctx[op.outputs[0].key] = build_empty_df(op.outputs[0].dtypes)
else:
ctx[op.outputs[0].key] = build_empty_series(
op.outputs[0].dtype, name=out.name)
return
applied = in_data.apply(op.func, *op.args, **op.kwds)
if isinstance(applied, pd.DataFrame):
# when there is only one group, pandas tend to return a DataFrame, while
# we need to convert it into a compatible series
if op.output_types[0] == OutputType.series:
assert len(applied.index) == 1
applied_idx = pd.MultiIndex.from_arrays(
[[applied.index[0]] * len(applied.columns), applied.columns.tolist()])
applied_idx.names = [applied.index.name, None]
applied = pd.Series(np.array(applied.iloc[0]), applied_idx,
name=applied.columns.name)
else:
applied.columns.name = None
else:
applied.name = out.name
ctx[out.key] = applied
@classmethod
def tile(cls, op):
in_groupby = op.inputs[0]
out_df = op.outputs[0]
chunks = []
for c in in_groupby.chunks:
inp_chunks = [c]
new_op = op.copy().reset_key()
new_op.tileable_op_key = op.key
if op.output_types[0] == OutputType.dataframe:
chunks.append(new_op.new_chunk(
inp_chunks, index=c.index, shape=(np.nan, len(out_df.dtypes)), dtypes=out_df.dtypes,
columns_value=out_df.columns_value, index_value=out_df.index_value))
else:
chunks.append(new_op.new_chunk(
inp_chunks, name=out_df.name, index=(c.index[0],), shape=(np.nan,), dtype=out_df.dtype,
index_value=out_df.index_value))
new_op = op.copy()
kw = out_df.params.copy()
kw['chunks'] = chunks
if op.output_types[0] == OutputType.dataframe:
kw['nsplits'] = ((np.nan,) * len(chunks), (out_df.shape[1],))
else:
kw['nsplits'] = ((np.nan,) * len(chunks),)
return new_op.new_tileables([in_groupby], **kw)
def _infer_df_func_returns(self, in_groupby, in_df, dtypes, dtype=None,
name=None, index=None):
index_value, output_type, new_dtypes = None, None, None
try:
infer_df = in_groupby.op.build_mock_groupby().apply(self.func, *self.args, **self.kwds)
# todo return proper index when sort=True is implemented
index_value = parse_index(infer_df.index[:0], in_df.key, self.func)
# for backward compatibility
dtype = dtype if dtype is not None else dtypes
if isinstance(infer_df, pd.DataFrame):
output_type = output_type or OutputType.dataframe
new_dtypes = new_dtypes or infer_df.dtypes
elif isinstance(infer_df, pd.Series):
output_type = output_type or OutputType.series
new_dtypes = new_dtypes or (name or infer_df.name, dtype or infer_df.dtype)
else:
output_type = OutputType.series
new_dtypes = (name, dtype or pd.Series(infer_df).dtype)
except: # noqa: E722 # nosec
pass
self.output_types = [output_type] if not self.output_types else self.output_types
dtypes = new_dtypes if dtypes is None else dtypes
index_value = index_value if index is None else parse_index(index)
return dtypes, index_value
def __call__(self, groupby, dtypes=None, dtype=None, name=None, index=None):
in_df = groupby
while in_df.op.output_types[0] not in (OutputType.dataframe, OutputType.series):
in_df = in_df.inputs[0]
with quiet_stdio():
dtypes, index_value = self._infer_df_func_returns(
groupby, in_df, dtypes, dtype=dtype, name=name, index=index)
if index_value is None:
index_value = parse_index(None, (in_df.key, in_df.index_value.key))
for arg, desc in zip((self.output_types, dtypes), ('output_types', 'dtypes')):
if arg is None:
raise TypeError(f'Cannot determine {desc} by calculating with enumerate data, '
'please specify it as arguments')
if self.output_types[0] == OutputType.dataframe:
new_shape = (np.nan, len(dtypes))
return self.new_dataframe([groupby], shape=new_shape, dtypes=dtypes, index_value=index_value,
columns_value=parse_index(dtypes.index, store_data=True))
else:
name = name or dtypes[0]
dtype = dtype or dtypes[1]
new_shape = (np.nan,)
return self.new_series([groupby], name=name, shape=new_shape, dtype=dtype,
index_value=index_value)
def groupby_apply(groupby, func, *args, output_type=None, dtypes=None, dtype=None,
name=None, index=None, **kwargs):
"""
Apply function `func` group-wise and combine the results together.
The function passed to `apply` must take a dataframe as its first
argument and return a DataFrame, Series or scalar. `apply` will
then take care of combining the results back together into a single
dataframe or series. `apply` is therefore a highly flexible
grouping method.
While `apply` is a very flexible method, its downside is that
using it can be quite a bit slower than using more specific methods
like `agg` or `transform`. Pandas offers a wide range of method that will
be much faster than using `apply` for their specific purposes, so try to
use them before reaching for `apply`.
Parameters
----------
func : callable
A callable that takes a dataframe as its first argument, and
returns a dataframe, a series or a scalar. In addition the
callable may take positional and keyword arguments.
output_type : {'dataframe', 'series'}, default None
Specify type of returned object. See `Notes` for more details.
dtypes : Series, default None
Specify dtypes of returned DataFrames. See `Notes` for more details.
dtype : numpy.dtype, default None
Specify dtype of returned Series. See `Notes` for more details.
name : str, default None
Specify name of returned Series. See `Notes` for more details.
index : Index, default None
Specify index of returned object. See `Notes` for more details.
args, kwargs : tuple and dict
Optional positional and keyword arguments to pass to `func`.
Returns
-------
applied : Series or DataFrame
See Also
--------
pipe : Apply function to the full GroupBy object instead of to each
group.
aggregate : Apply aggregate function to the GroupBy object.
transform : Apply function column-by-column to the GroupBy object.
Series.apply : Apply a function to a Series.
DataFrame.apply : Apply a function to each row or column of a DataFrame.
Notes
-----
When deciding output dtypes and shape of the return value, Mars will
try applying ``func`` onto a mock grouped object, and the apply call
may fail. When this happens, you need to specify the type of apply
call (DataFrame or Series) in output_type.
* For DataFrame output, you need to specify a list or a pandas Series
as ``dtypes`` of output DataFrame. ``index`` of output can also be
specified.
* For Series output, you need to specify ``dtype`` and ``name`` of
output Series.
"""
output_types = kwargs.pop('output_types', None)
object_type = kwargs.pop('object_type', None)
output_types = validate_output_types(
output_types=output_types, output_type=output_type, object_type=object_type)
dtypes = make_dtypes(dtypes)
dtype = make_dtype(dtype)
op = GroupByApply(func=func, args=args, kwds=kwargs, output_types=output_types)
return op(groupby, dtypes=dtypes, dtype=dtype, name=name, index=index)
| 41.416327 | 107 | 0.64147 |
acf8195074d3ca87156b56701c6b25c8c6b727cc | 173 | py | Python | plugins/examples/example_searchpath_plugin/hydra_plugins/example_searchpath_plugin/__init__.py | andrewjong/hydra | c2faea0f137164721e73d4d0143f9e03554daae4 | [
"MIT"
] | 1 | 2021-07-20T16:14:45.000Z | 2021-07-20T16:14:45.000Z | plugins/examples/example_searchpath_plugin/hydra_plugins/example_searchpath_plugin/__init__.py | andrewjong/hydra | c2faea0f137164721e73d4d0143f9e03554daae4 | [
"MIT"
] | 4 | 2021-10-06T22:51:46.000Z | 2022-02-27T12:53:27.000Z | plugins/examples/example_searchpath_plugin/hydra_plugins/example_searchpath_plugin/__init__.py | andrewjong/hydra | c2faea0f137164721e73d4d0143f9e03554daae4 | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from .example_searchpath_plugin import ExampleSearchPathPlugin
__all__ = ["ExampleSearchPathPlugin"]
| 34.6 | 70 | 0.820809 |
acf81a9630999762eee661ec3d9064ad8ae6ada0 | 937 | py | Python | db.py | darghex/PARCES-APIREST | 494ab92f7d894cac2d5cdc25092f1a777622ba62 | [
"Apache-2.0"
] | null | null | null | db.py | darghex/PARCES-APIREST | 494ab92f7d894cac2d5cdc25092f1a777622ba62 | [
"Apache-2.0"
] | null | null | null | db.py | darghex/PARCES-APIREST | 494ab92f7d894cac2d5cdc25092f1a777622ba62 | [
"Apache-2.0"
] | null | null | null | from sqlalchemy import *
from sqlalchemy.ext.declarative import declarative_base
from main import engine
Base = declarative_base()
Base.metadata.reflect(engine)
from sqlalchemy.orm import relationship, backref
class instancias_curso(Base):
__table__ = Base.metadata.tables['instancias_curso']
class actividades(Base):
__table__ = Base.metadata.tables['actividades']
class asignaciones(Base):
__table__ = Base.metadata.tables['asignaciones']
class propuestas_matricula(Base):
__table__ = Base.metadata.tables['propuestas_matricula']
class comentarios_propuesta(Base):
__table__ = Base.metadata.tables['comentarios_propuesta']
class calificaciones(Base):
__table__ = Base.metadata.tables['calificaciones']
class comentarios_instancia_curso(Base):
__table__ = Base.metadata.tables['comentarios_instancia_curso']
class asistencias(Base):
__table__ = Base.metadata.tables['asistencias']
| 27.558824 | 65 | 0.778015 |
acf81aa45653db12111ae730b0a4a06a74126ed0 | 888 | py | Python | app.py | jaredbancroft/serious_business | b064b6c5ab6f7fec58f8c0b7a031b017a722a9de | [
"MIT"
] | null | null | null | app.py | jaredbancroft/serious_business | b064b6c5ab6f7fec58f8c0b7a031b017a722a9de | [
"MIT"
] | null | null | null | app.py | jaredbancroft/serious_business | b064b6c5ab6f7fec58f8c0b7a031b017a722a9de | [
"MIT"
] | null | null | null | """
Silly slack app for fun
"""
import os
from slack_bolt import App
app = App(
token=os.environ.get("SLACK_BOT_TOKEN"),
signing_secret=os.environ.get("SLACK_SIGNING_SECRET")
)
@app.command("/sb")
def format_for_serious_business(ack, respond, command):
"""
Takes the /sb text and wraps it in BEGIN/END Serious Business tags.
"""
# Acknowledge command request
ack()
blocks = [{
"type": "section",
"text": {
"type": "mrkdwn",
# pylint: disable=line-too-long
"text": f"```-------------BEGIN Serious Business----------------\n{command['text']}\n-------------END Serious Business------------------```"
}
}]
respond(
blocks = blocks,
response_type = "in_channel"
)
if __name__ == "__main__":
app.start(port=int(os.environ.get("PORT", 3000)))
| 25.371429 | 156 | 0.540541 |
acf81ad6ff7e3382e90783699b9b4b1652533a1b | 9,646 | py | Python | swift3/test/functional/test_multi_delete.py | KoreaCloudObjectStorage/swift3 | 3bd412b33d524bb84bffbe5bd97642faaab71703 | [
"Apache-2.0"
] | null | null | null | swift3/test/functional/test_multi_delete.py | KoreaCloudObjectStorage/swift3 | 3bd412b33d524bb84bffbe5bd97642faaab71703 | [
"Apache-2.0"
] | null | null | null | swift3/test/functional/test_multi_delete.py | KoreaCloudObjectStorage/swift3 | 3bd412b33d524bb84bffbe5bd97642faaab71703 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from swift3.test.functional.utils import calculate_md5, get_error_code
from swift3.etree import fromstring, tostring, Element, SubElement
from swift3.controllers.multi_delete import MAX_MULTI_DELETE_BODY_SIZE
from swift3.test.functional import Swift3FunctionalTestCase
from swift3.test.functional.s3_test_client import Connection
class TestSwift3MultiDelete(Swift3FunctionalTestCase):
def setUp(self):
super(TestSwift3MultiDelete, self).setUp()
def _prepare_test_delete_multi_objects(self, bucket, objects):
self.conn.make_request('PUT', bucket)
for obj in objects:
self.conn.make_request('PUT', bucket, obj)
def _gen_multi_delete_xml(self, objects, quiet=None):
elem = Element('Delete')
if quiet:
SubElement(elem, 'Quiet').text = quiet
for key in objects:
obj = SubElement(elem, 'Object')
SubElement(obj, 'Key').text = key
return tostring(elem, use_s3ns=False)
def _gen_invalid_multi_delete_xml(self, hasObjectTag=False):
elem = Element('Delete')
if hasObjectTag:
obj = SubElement(elem, 'Object')
SubElement(obj, 'Key').text = ''
return tostring(elem, use_s3ns=False)
def test_delete_multi_objects(self):
bucket = 'bucket'
put_objects = ['obj%s' % var for var in xrange(4)]
self._prepare_test_delete_multi_objects(bucket, put_objects)
query = 'delete'
# Delete an object via MultiDelete API
req_objects = ['obj0']
xml = self._gen_multi_delete_xml(req_objects)
content_md5 = calculate_md5(xml)
status, headers, body = \
self.conn.make_request('POST', bucket, body=xml,
headers={'Content-MD5': content_md5},
query=query)
self.assertEquals(status, 200)
self.assertCommonResponseHeaders(headers)
self.assertTrue(headers['content-type'] is not None)
self.assertEquals(headers['content-length'], str(len(body)))
elem = fromstring(body)
resp_objects = elem.findall('Deleted')
self.assertEquals(len(resp_objects), len(req_objects))
for o in resp_objects:
self.assertTrue(o.find('Key').text in req_objects)
# Delete 2 objects via MultiDelete API
req_objects = ['obj1', 'obj2']
xml = self._gen_multi_delete_xml(req_objects)
content_md5 = calculate_md5(xml)
status, headers, body = \
self.conn.make_request('POST', bucket, body=xml,
headers={'Content-MD5': content_md5},
query=query)
self.assertEquals(status, 200)
elem = fromstring(body, 'DeleteResult')
resp_objects = elem.findall('Deleted')
self.assertEquals(len(resp_objects), len(req_objects))
for o in resp_objects:
self.assertTrue(o.find('Key').text in req_objects)
# Delete 2 objects via MultiDelete API but one (obj4) doesn't exist.
req_objects = ['obj3', 'obj4']
xml = self._gen_multi_delete_xml(req_objects)
content_md5 = calculate_md5(xml)
status, headers, body = \
self.conn.make_request('POST', bucket, body=xml,
headers={'Content-MD5': content_md5},
query=query)
self.assertEquals(status, 200)
elem = fromstring(body, 'DeleteResult')
resp_objects = elem.findall('Deleted')
# S3 assumes a NoSuchKey object as deleted.
self.assertEquals(len(resp_objects), len(req_objects))
for o in resp_objects:
self.assertTrue(o.find('Key').text in req_objects)
# Delete 2 objects via MultiDelete API but no objects exist
req_objects = ['obj4', 'obj5']
xml = self._gen_multi_delete_xml(req_objects)
content_md5 = calculate_md5(xml)
status, headers, body = \
self.conn.make_request('POST', bucket, body=xml,
headers={'Content-MD5': content_md5},
query=query)
self.assertEquals(status, 200)
elem = fromstring(body, 'DeleteResult')
resp_objects = elem.findall('Deleted')
self.assertEquals(len(resp_objects), len(req_objects))
for o in resp_objects:
self.assertTrue(o.find('Key').text in req_objects)
def test_delete_multi_objects_error(self):
bucket = 'bucket'
put_objects = ['obj']
self._prepare_test_delete_multi_objects(bucket, put_objects)
xml = self._gen_multi_delete_xml(put_objects)
content_md5 = calculate_md5(xml)
query = 'delete'
auth_error_conn = Connection(aws_secret_key='invalid')
status, headers, body = \
auth_error_conn.make_request('POST', bucket, body=xml,
headers={
'Content-MD5': content_md5
},
query=query)
self.assertEquals(get_error_code(body), 'SignatureDoesNotMatch')
status, headers, body = \
self.conn.make_request('POST', 'nothing', body=xml,
headers={'Content-MD5': content_md5},
query=query)
self.assertEquals(get_error_code(body), 'NoSuchBucket')
# without Object tag
xml = self._gen_invalid_multi_delete_xml()
content_md5 = calculate_md5(xml)
status, headers, body = \
self.conn.make_request('POST', bucket, body=xml,
headers={'Content-MD5': content_md5},
query=query)
self.assertEquals(get_error_code(body), 'MalformedXML')
# without value of Key tag
xml = self._gen_invalid_multi_delete_xml(hasObjectTag=True)
content_md5 = calculate_md5(xml)
status, headers, body = \
self.conn.make_request('POST', bucket, body=xml,
headers={'Content-MD5': content_md5},
query=query)
self.assertEquals(get_error_code(body), 'UserKeyMustBeSpecified')
# specified number of objects are over CONF.max_multi_delete_objects
# (Default 1000), but xml size is smaller than 61365 bytes.
req_objects = ['obj%s' for var in xrange(1001)]
xml = self._gen_multi_delete_xml(req_objects)
self.assertTrue(len(xml.encode('utf-8')) <= MAX_MULTI_DELETE_BODY_SIZE)
content_md5 = calculate_md5(xml)
status, headers, body = \
self.conn.make_request('POST', bucket, body=xml,
headers={'Content-MD5': content_md5},
query=query)
self.assertEquals(get_error_code(body), 'MalformedXML')
# specified xml size is over 61365 bytes, but number of objects are
# smaller than CONF.max_multi_delete_objects.
obj = 'a' * 1024
req_objects = [obj + str(var) for var in xrange(999)]
xml = self._gen_multi_delete_xml(req_objects)
self.assertTrue(len(xml.encode('utf-8')) > MAX_MULTI_DELETE_BODY_SIZE)
content_md5 = calculate_md5(xml)
status, headers, body = \
self.conn.make_request('POST', bucket, body=xml,
headers={'Content-MD5': content_md5},
query=query)
self.assertEquals(get_error_code(body), 'MalformedXML')
def test_delete_multi_objects_with_quiet(self):
bucket = 'bucket'
put_objects = ['obj']
query = 'delete'
# with Quiet true
quiet = 'true'
self._prepare_test_delete_multi_objects(bucket, put_objects)
xml = self._gen_multi_delete_xml(put_objects, quiet)
content_md5 = calculate_md5(xml)
status, headers, body = \
self.conn.make_request('POST', bucket, body=xml,
headers={'Content-MD5': content_md5},
query=query)
self.assertEquals(status, 200)
elem = fromstring(body, 'DeleteResult')
resp_objects = elem.findall('Deleted')
self.assertEquals(len(resp_objects), 0)
# with Quiet false
quiet = 'false'
self._prepare_test_delete_multi_objects(bucket, put_objects)
xml = self._gen_multi_delete_xml(put_objects, quiet)
content_md5 = calculate_md5(xml)
status, headers, body = \
self.conn.make_request('POST', bucket, body=xml,
headers={'Content-MD5': content_md5},
query=query)
self.assertEquals(status, 200)
elem = fromstring(body, 'DeleteResult')
resp_objects = elem.findall('Deleted')
self.assertEquals(len(resp_objects), 1)
| 44.247706 | 79 | 0.601804 |
acf81b911498071a09d1caf28fe8e8b293db0ee5 | 2,794 | py | Python | utils/preprocess_tuning.py | najafian-lab/em-calibration | 81693ddbf87e642cd66a0b375e25ca378c2752a8 | [
"MIT"
] | 1 | 2021-07-05T12:48:39.000Z | 2021-07-05T12:48:39.000Z | utils/preprocess_tuning.py | najafian-lab/em-calibration | 81693ddbf87e642cd66a0b375e25ca378c2752a8 | [
"MIT"
] | null | null | null | utils/preprocess_tuning.py | najafian-lab/em-calibration | 81693ddbf87e642cd66a0b375e25ca378c2752a8 | [
"MIT"
] | null | null | null | from calibration.grid import GridProcessor
import os
import random
import numpy as np
import threading
import queue
import cv2
IMAGE_FOLDER = 'C:\\Users\\smerk\\Downloads\\images'
IMAGES = os.listdir(IMAGE_FOLDER)
SAMPLES = 8
IMAGES_SAMPLE = random.sample(IMAGES, SAMPLES)
IMAGES_FULL = [os.path.join(IMAGE_FOLDER, image) for image in IMAGES]
IM_SIZE = (400, 400)
THREADS = 5
image_cache = {}
settings = {}
# let's process them
def process_image(item: str, o_q: queue.Queue):
global settings, image_cache
if item in image_cache:
image = image_cache[item].copy()
else:
image = cv2.imread(image, cv2.IMREAD_GRAYSCALE)
image_cache[item] = image
process = GridProcessor(image, 1, settings)
process.preprocess_image()
image = process.processed
name = os.path.basename(item)
o_q.put((name, cv2.resize(image, IM_SIZE, interpolation=cv2.INTER_LANCZOS4)))
def process_image_queue(q: queue.Queue, o_q: queue.Queue):
while True:
item = q.get()
if item is None:
break
process_image(item, o_q)
q.task_done()
def process_images():
global IMAGES_FULL
threads = []
in_q = queue.Queue()
out_q = queue.Queue()
for _ in range(THREADS):
thread = threading.Thread(target=process_image_queue, args=(in_q, out_q))
thread.start()
threads.append(thread)
# push into queue
for im in IMAGES_FULL:
in_q.put(im)
# end the queues
for _ in range(THREADS * 3):
in_q.put(None)
# join the threads
for thread in threads:
thread.join()
# display the output images
while True:
try:
(name, image) = out_q.get_nowait()
cv2.imshow(name, image)
except queue.Empty:
break
# controls
def change_kernel(val):
global settings
k = 2*val + 1
settings.update({
'kernel': int(k)
})
process_images()
def change_alpha(val):
global settings
settings.update({
'contrast_alpha': int(val)
})
process_images()
def change_canny_low(val):
global settings
settings.update({
'canny_low': int(val)
})
process_images()
def change_canny_high(val):
global settings
settings.update({
'canny_high': int(val)
})
process_images()
# show all of the images
try:
blank = np.zeros((10, 400), np.uint8)
cv2.imshow('control', blank)
process_images()
cv2.createTrackbar('kernel', 'control', 0, 10, change_kernel)
cv2.createTrackbar('alpha', 'control', 1, 110, change_alpha)
cv2.createTrackbar('canny_low', 'control', 1, 250, change_alpha)
cv2.createTrackbar('canny_high', 'control', 1, 250, change_alpha)
cv2.waitKey(0)
finally:
cv2.destroyAllWindows() | 23.283333 | 81 | 0.642806 |
acf81bd0ad27559c9e9e9a66f6116401541a02d0 | 6,224 | py | Python | bof/admin.py | parente/bof | 0ab92a83e8eeca777ba5e2cfd2753da327f8a886 | [
"BSD-2-Clause"
] | null | null | null | bof/admin.py | parente/bof | 0ab92a83e8eeca777ba5e2cfd2753da327f8a886 | [
"BSD-2-Clause"
] | 4 | 2016-08-21T15:46:35.000Z | 2016-08-21T15:47:31.000Z | bof/admin.py | parente/bof | 0ab92a83e8eeca777ba5e2cfd2753da327f8a886 | [
"BSD-2-Clause"
] | null | null | null | """Admin control CLI."""
# Copyright (c) Peter Parente
# Distributed under the terms of the BSD 2-Clause License.
import click
from prettytable import PrettyTable
from . import app
from .model import db, User, Flock, Location
@click.group()
def admin():
"""Admin access to BoF data."""
pass
@admin.group()
def user():
"""Manage users."""
pass
@user.command()
def list():
"""List all registered users."""
table = PrettyTable(['id', 'username', 'banned', 'admin'])
with app.app_context():
for user in User.query.all():
table.add_row([user.id, user.username, user.banned, user.admin])
click.echo(table)
def apply_ban(username, ban):
with app.app_context():
user = User.query.filter_by(username=username).first()
user.banned = ban
db.session.commit()
return user.to_dict()
@user.command()
@click.argument('username')
def ban(username):
"""Ban a user."""
user = apply_ban(username, True)
click.echo(user)
@user.command()
@click.argument('username')
def unban(username):
"""Unban a user."""
user = apply_ban(username, False)
click.echo(user)
@admin.group()
def flock():
"""Manage flocks."""
pass
@flock.command()
def list():
"""List all flocks."""
table = PrettyTable(['id', 'name', 'leader', 'birds'])
with app.app_context():
for flock in Flock.query.all():
table.add_row([flock.id, flock.name, flock.leader.username,
len(flock.birds)])
click.echo(table)
@flock.command()
@click.argument('id')
@click.confirmation_option(prompt='Are you sure you want to delete the flock?')
def remove(id):
"""Remove a flock."""
with app.app_context():
flock = Flock.query.get(id)
db.session.delete(flock)
db.session.commit()
@flock.command()
@click.argument('id')
def edit(id):
"""Edit a flock."""
with app.app_context():
flock = Flock.query.get(id)
flock.name = click.prompt('Name', default=flock.name)
flock.description = click.prompt('Description',
default=flock.description)
flock.when = click.prompt('When', default=flock.when)
flock.where = click.prompt('Where', default=flock.where)
db.session.commit()
@admin.group()
def location():
"""Manage location suggestions."""
pass
@location.command()
def list():
"""List location suggestions."""
table = PrettyTable(['id', 'name', 'image'])
with app.app_context():
for loc in Location.query.all():
table.add_row([loc.id, loc.name, loc.image_url])
click.echo(table)
@location.command()
@click.option('--name', '-n', prompt='Location name', help='Location name')
@click.option('--image_url', '-i', prompt='Location image URL', help='Location image URL')
def add(name, image_url):
"""Add a location suggestion."""
with app.app_context():
loc = Location(name, image_url)
db.session.add(loc)
db.session.commit()
id = loc.id
click.echo('Created location {}'.format(id))
@location.command()
@click.argument('id')
@click.confirmation_option(prompt='Are you sure you want to delete this location?')
def remove(id):
"""Remove a location suggestion."""
with app.app_context():
loc = Location.query.get(id)
db.session.delete(loc)
db.session.commit()
@admin.group()
def data():
"""Manage database."""
pass
@data.command()
def examples():
"""Drop / create tables, and seed examples."""
with app.app_context():
click.confirm('Are you sure you want to reset {}?'.format(db.engine),
abort=True)
db.drop_all()
db.create_all()
admin = User('admin', admin=True)
nobody = User('nobody')
foobar = User('foobar')
db.session.add(admin)
db.session.add(foobar)
db.session.add(nobody)
db.session.commit()
f1 = Flock(name='Jupyter and Drinks',
description="Let's chat about all things Jupyter",
where='front door',
when='7 pm',
leader=admin)
f2 = Flock(name='the life of scipy',
description="Where are we going next?",
where='back door',
when='7 pm',
leader=nobody)
db.session.add(f1)
db.session.add(f2)
db.session.commit()
f1.birds.append(foobar)
f1.birds.append(nobody)
f2.birds.append(foobar)
db.session.commit()
db.session.add(Location('front door', 'http://placehold.it/350x150'))
db.session.add(Location('back door', 'http://placehold.it/350x150'))
db.session.add(Location('lobby', ''))
db.session.commit()
@data.command()
def stress():
"""Drop / create tables, and seed 200 card test."""
with app.app_context():
click.confirm('Are you sure you want to reset {}?'.format(db.engine),
abort=True)
db.drop_all()
db.create_all()
admin = User('admin', admin=True)
db.session.add(admin)
db.session.commit()
for i in range(200):
f = Flock(name='Flock {}'.format(i),
description='Description of flock {}'.format(i),
where='front door',
when='later tonight',
leader=admin)
db.session.add(f)
db.session.commit()
db.session.add(Location('front door', 'http://placehold.it/350x150'))
db.session.commit()
@data.command()
def empty():
"""Create empty database tables."""
with app.app_context():
click.confirm('Are you sure you want to create tables in {}?'.format(db.engine),
abort=True)
db.create_all()
@data.command()
def reset():
"""Drop and create empty database tables."""
with app.app_context():
click.confirm('Are you sure you want to reset {}?'.format(db.engine),
abort=True)
db.drop_all()
db.create_all()
if __name__ == '__main__':
admin()
| 26.372881 | 90 | 0.579692 |
acf81cc4d180d78a7814e5c73e1fa210f0236b8c | 2,071 | py | Python | osisoft/pidevclub/piwebapi/models/pi_items_substatus.py | jugillar/PI-Web-API-Client-Python | 9652e18384d8c66194c6d561d5ef01f60d820253 | [
"Apache-2.0"
] | 30 | 2019-01-03T03:09:25.000Z | 2022-03-30T17:42:54.000Z | osisoft/pidevclub/piwebapi/models/pi_items_substatus.py | jugillar/PI-Web-API-Client-Python | 9652e18384d8c66194c6d561d5ef01f60d820253 | [
"Apache-2.0"
] | null | null | null | osisoft/pidevclub/piwebapi/models/pi_items_substatus.py | jugillar/PI-Web-API-Client-Python | 9652e18384d8c66194c6d561d5ef01f60d820253 | [
"Apache-2.0"
] | 46 | 2018-11-07T14:46:35.000Z | 2022-03-31T12:23:39.000Z | # coding: utf-8
"""
Copyright 2018 OSIsoft, LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
<http://www.apache.org/licenses/LICENSE-2.0>
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
class PIItemsSubstatus(object):
swagger_types = {
'items': 'list[PISubstatus]',
'links': 'PIPaginationLinks',
}
attribute_map = {
'items': 'Items',
'links': 'Links',
}
def __init__(self, items=None, links=None):
self._items = None
self._links = None
if items is not None:
self.items = items
if links is not None:
self.links = links
@property
def items(self):
return self._items
@items.setter
def items(self, items):
self._items = items
@property
def links(self):
return self._links
@links.setter
def links(self, links):
self._links = links
def to_dict(self):
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __ne__(self, other):
return not self == other
def __eq__(self, other):
if not isinstance(other, PIItemsSubstatus):
return False
return self.__dict__ == other.__dict__
| 22.51087 | 73 | 0.688556 |
acf81cd0fe008125cc334795ae12a1e2339545f1 | 8,780 | py | Python | Pi/Deploy/Database/MsSql/DeployMsSqlModule.py | abs/Pi.Deploy | 202b0677fefebe43bde7c2b7ea1f30f8c5a34470 | [
"BSD-3-Clause"
] | 2 | 2015-07-16T05:20:29.000Z | 2015-07-18T05:50:49.000Z | Pi/Deploy/Database/MsSql/DeployMsSqlModule.py | abs/Pi.Deploy | 202b0677fefebe43bde7c2b7ea1f30f8c5a34470 | [
"BSD-3-Clause"
] | null | null | null | Pi/Deploy/Database/MsSql/DeployMsSqlModule.py | abs/Pi.Deploy | 202b0677fefebe43bde7c2b7ea1f30f8c5a34470 | [
"BSD-3-Clause"
] | null | null | null | #
# (c) Peralta Informatics 2007
# $Id: DeployMsSqlModule.py 33 2007-12-10 18:42:18Z andrei $
#
import clr
import sys
import os
import shutil
import re
clr.AddReference("System.Data")
clr.AddReference("System.Xml")
import System.Data
import System.Data.SqlClient
import System.Diagnostics
import System.Text
import System.Xml
from Pi.Deploy.Database.DeployDatabaseModule import DeployDatabaseModule
from Pi.Deploy import DeployUtilities
class DeployMsSqlModule(DeployDatabaseModule):
def __init__(self):
pass
def __GetNamespaceUri(self):
return 'http://schemas.peralta-informatics.com/Deploy/Sql/MsSql/2007'
NamespaceUri = property(__GetNamespaceUri)
def CreateConnectionString(self, configuration):
connectionString = System.String.Format("Server={0};Database={1}", configuration.Server, configuration.Name)
if hasattr(configuration, 'IntegratedSecurity'):
connectionString = System.String.Format("{0};Integrated Security='{1}'", connectionString, configuration.IntegratedSecurity)
if hasattr(configuration, 'TrustedConnection'):
connectionString = System.String.Format("{0};Trusted_Connection={1}", connectionString, configuration.TrustedConnection)
if hasattr(configuration, 'ApplicationName'):
connectionString = System.String.Format("{0};Application Name={1}", connectionString, configuration.ApplicationName)
connectionString = System.String.Format("{0};", connectionString)
return connectionString
def DatabaseExists(self, configuration):
exists = False
connection = System.Data.SqlClient.SqlConnection()
connection.ConnectionString = "Integrated Security=SSPI;Database=master;Server=%s" % (configuration.Server)
try:
connection.Open()
try:
command = connection.CreateCommand()
command.CommandText = "select * from sys.databases where name = N'%s'" % (configuration.Name)
exists = command.ExecuteScalar()
if exists != None:
exists = True
except Exception, detail:
print detail
command.Dispose()
raise
else:
command.Dispose()
except:
raise
else:
connection.Close()
return exists
def DropDatabase(self, configuration):
for hook in configuration.Hooks:
if hook.BeforeDrop is True:
try:
arguments = '%s "%s"' % (hook.Arguments, configuration.ConnectionString)
DeployUtilities.RunExternalCommand(hook.Executable, arguments)
except System.ComponentModel.Win32Exception:
print 'Could not open "%s".' % (hook.Executable)
raise
else:
print 'Ran hook [%s %s "%s"]' % (hook.Executable, hook.Arguments, configuration.ConnectionString)
connection = System.Data.SqlClient.SqlConnection()
connection.ConnectionString = "Integrated Security=SSPI;Database=master;Server=%s" % (configuration.Server)
try:
connection.Open()
try:
command = connection.CreateCommand()
commandText = []
commandText.append("if exists (select * from sys.databases where name = N'%s') " % (configuration.Name))
commandText.append("drop database %s" % (configuration.Name))
command.CommandText = ''.join(commandText)
command.ExecuteNonQuery()
print 'Dropping database %s on %s' % (configuration.Name, configuration.Server)
except Exception, detail:
print detail
command.Dispose()
raise
else:
command.Dispose()
except System.Exception, e:
print e
raise
else:
connection.Close()
def BuildDatabase(self, configuration):
connection = System.Data.SqlClient.SqlConnection()
connection.ConnectionString = "Integrated Security=SSPI;Database=master;Server=%s" % (configuration.Server)
commands = []
command = []
command.append("if not exists (select * from sys.databases where name = N'%s') " % (configuration.Name))
command.append("create database [%s]" % (configuration.Name))
commands.append(''.join(command))
command[:] = []
command.append("use [%s]" % (configuration.Name))
commands.append(''.join(command))
command[:] = []
builder = System.Text.StringBuilder()
#
# builder.AppendFormat("if not exists (select * from sys.server_principals where name = N'{0}')", configuration.Database.UserName)
# builder.AppendFormat("create login [{0}] with password = N'{1}' else alter login [{0}] with password = N'{1}'", configuration.Database.UserName, configuration.Database.Password)
# commands.append(builder.ToString())
#
# builder.Length = 0
#
# builder.AppendFormat("if not exists (select * from [{0}].sys.database_principals where name = N'{1}')", configuration.Database.Name, configuration.Database.UserName)
# builder.AppendFormat("create user [{0}] for login [{0}]", configuration.Database.UserName)
# commands.append(builder.ToString())
#
builder.Length = 0
# builder.AppendFormat("execute sp_addrolemember N'db_owner', N'{0}'", configuration.Database.UserName)
builder.AppendFormat("execute sp_addrolemember N'db_owner', N'{0}'", 'NT AUTHORITY\NETWORK SERVICE')
commands.append(builder.ToString())
try:
connection.Open()
for command in commands:
try:
dbCommand = connection.CreateCommand()
dbCommand.CommandText = command
dbCommand.ExecuteNonQuery()
except Exception, detail:
dbCommand.Dispose()
raise
else:
dbCommand.Dispose()
except:
raise
else:
print 'Creating database %s on %s' % (configuration.Name, configuration.Server)
connection.Close()
def PopulateDatabase(self, configuration):
connection = System.Data.SqlClient.SqlConnection()
connection.ConnectionString = "Integrated Security=SSPI;Database=master;Server=%s" % (configuration.Server)
try:
connection.Open()
goRegexp = re.compile(r'^go[ \t]*\n', re.IGNORECASE | re.MULTILINE | re.UNICODE)
for encodedScriptPath in configuration.Scripts:
scriptPath = DeployUtilities.ExpandEnvironmentVariables(encodedScriptPath)
print 'Running %s ...' % (scriptPath)
f = open(scriptPath)
try:
normalizedText = goRegexp.sub('go\n', f.read())
sql = 'use %s\ngo\n%s' % (configuration.Name, normalizedText)
for sqlCommand in goRegexp.split(sql):
if len(sqlCommand.strip()) != 0:
try:
# print 'DEBUG: %s' % (sqlCommand)
command = connection.CreateCommand()
command.CommandText = sqlCommand
command.ExecuteNonQuery()
finally:
command.Dispose()
finally:
f.close()
for hook in configuration.Hooks:
if hook.BeforeDrop is False:
try:
arguments = '%s "%s"' % (hook.Arguments, configuration.ConnectionString)
DeployUtilities.RunExternalCommand(hook.Executable, arguments)
except System.ComponentModel.Win32Exception:
print 'Could not open "%s".' % (hook.Executable)
raise
else:
print 'Ran hook "%s %s %s"' % (hook.Executable, configuration.ConnectionString, hook.Arguments)
for module in configuration.Modules.values():
module.PopulateDatabase(configuration)
except Exception, detail:
print detail
raise
else:
print 'Populated database %s on %s' % (configuration.Name, configuration.Server)
connection.Close()
| 33.007519 | 186 | 0.575854 |
acf81d5f97b92c91a20a84bf1525a4b91cbf35c4 | 8,770 | py | Python | forte/processors/tests/allennlp_processors_test.py | atif93/forte | 2b70531abfb54fc5c2429ee26a501ad5c9094f3c | [
"Apache-2.0"
] | null | null | null | forte/processors/tests/allennlp_processors_test.py | atif93/forte | 2b70531abfb54fc5c2429ee26a501ad5c9094f3c | [
"Apache-2.0"
] | 13 | 2019-12-01T04:51:38.000Z | 2020-02-11T23:55:11.000Z | forte/processors/tests/allennlp_processors_test.py | gpengzhi/forte | 2b70531abfb54fc5c2429ee26a501ad5c9094f3c | [
"Apache-2.0"
] | null | null | null | """This module tests LowerCaser processor."""
import unittest
from ddt import ddt, data, unpack
from forte.pipeline import Pipeline
from forte.data.readers import StringReader
from forte.processors.allennlp_processors import AllenNLPProcessor
from forte.processors.spacy_processors import SpacyProcessor
from ft.onto.base_ontology import Sentence, Token, Dependency
from forte.common import ProcessorConfigError
@ddt
class TestAllenNLPProcessor(unittest.TestCase):
def setUp(self):
self.document = "This tool is called Forte. The goal of this project " \
"to help you build NLP pipelines. NLP has never been " \
"made this easy before."
self.tokens = [["This", "tool", "is", "called", "Forte", "."],
["The", "goal", "of", "this", "project", "to", "help",
"you", "build", "NLP", "pipelines", "."],
["NLP", "has", "never", "been", "made", "this", "easy",
"before", "."]]
self.pos = {
'stanford_dependencies': [
['DT', 'NN', 'VBZ', 'VBN', 'NNP', '.'],
['DT', 'NN', 'IN', 'DT', 'NN', 'TO', 'VB', 'PRP', 'VB', 'NNP',
'NNS', '.'],
['NNP', 'VBZ', 'RB', 'VBN', 'VBN', 'DT', 'JJ', 'RB', '.'],
],
'universal_dependencies': [
['DET', 'NOUN', 'AUX', 'VERB', 'PROPN', 'PUNCT'],
['DET', 'NOUN', 'ADP', 'DET', 'NOUN', 'PART', 'VERB', 'PRON',
'VERB', 'PROPN', 'NOUN', 'PUNCT'],
['PROPN', 'AUX', 'ADV', 'AUX', 'VERB', 'DET', 'ADJ', 'ADV',
'PUNCT'],
],
}
self.deps = {
'stanford_dependencies': [
['det', 'nsubjpass', 'auxpass', 'root', 'xcomp', 'punct'],
['det', 'root', 'prep', 'det', 'pobj', 'aux', 'infmod', 'nsubj',
'ccomp', 'nn', 'dobj', 'punct'],
['nsubjpass', 'aux', 'neg', 'auxpass', 'root', 'det', 'xcomp',
'advmod', 'punct'],
],
'universal_dependencies': [
['det', 'nsubj:pass', 'aux:pass', 'root', 'xcomp', 'punct'],
['det', 'root', 'case', 'det', 'nmod', 'mark', 'acl', 'obj',
'xcomp', 'compound', 'obj', 'punct'],
['nsubj:pass', 'aux', 'advmod', 'aux:pass', 'root', 'det',
'obj', 'advmod', 'punct'],
]
}
self.dep_heads = {
'stanford_dependencies': [
[2, 4, 4, 0, 4, 4],
[2, 0, 2, 5, 3, 7, 5, 9, 7, 11, 9, 2],
[5, 5, 5, 5, 0, 7, 5, 5, 5],
],
'universal_dependencies': [
[2, 4, 4, 0, 4, 4],
[2, 0, 5, 5, 2, 7, 2, 7, 7, 11, 9, 2],
[5, 5, 5, 5, 0, 7, 5, 5, 5],
]
}
@data(
"tokenize",
"tokenize,pos",
"tokenize,pos,depparse",
"tokenize,depparse",
"",
"pos", # nothing will be output by processor
"depparse", # nothing will be output by processor
)
def test_allennlp_processor_with_different_processors(self, processors):
nlp = self._create_pipeline({
'processors': processors
})
pack = nlp.process(self.document)
if processors == "":
processors = AllenNLPProcessor.default_configs()['processors']
output_format = AllenNLPProcessor.default_configs()['output_format']
self._check_results(pack, processors, output_format)
@data(
"stanford_dependencies",
"universal_dependencies",
"random_dependencies",
)
def test_allennlp_processor_with_different_output_formats(self, format):
if format == "random_dependencies":
with self.assertRaises(ProcessorConfigError):
self._create_pipeline({'output_format': format})
else:
nlp = self._create_pipeline({'output_format': format})
pack = nlp.process(self.document)
processors = AllenNLPProcessor.default_configs()['processors']
self._check_results(pack, processors, format)
@data(
(True, True),
(True, False),
(False, True),
(False, False),
)
@unpack
def test_allennlp_processor_with_existing_entries(self, overwrite_entries,
allow_parallel_entries):
config = {
'overwrite_entries': overwrite_entries,
'allow_parallel_entries': allow_parallel_entries
}
nlp = self._create_pipeline(config)
# Adding extra processor to have existing tokens and dependencies
nlp.add_processor(processor=AllenNLPProcessor(), config=config)
nlp.initialize()
if not overwrite_entries and not allow_parallel_entries:
# Processor should raise config error when both the flags are False
# and existing entries are found
with self.assertRaises(ProcessorConfigError):
nlp.process(self.document)
else:
pack = nlp.process(self.document)
processors = AllenNLPProcessor.default_configs()['processors']
output_format = AllenNLPProcessor.default_configs()['output_format']
if not overwrite_entries:
if allow_parallel_entries:
# Should raise AssertionError due to duplicate tokens
with self.assertRaises(AssertionError):
self._check_results(pack, processors, output_format)
else:
self._check_results(pack, processors, output_format)
@data(
"This tool is called Forte tool.",
"NLP NLP NLP NLP.",
"AllenNLP does NLP.",
)
def test_allennlp_processor_with_repeating_words(self, sentence):
processors = "tokenize"
nlp = self._create_pipeline({
'processors': processors
})
self.document = sentence
self.tokens = [sentence.replace('.', ' .').split()]
pack = nlp.process(self.document)
output_format = AllenNLPProcessor.default_configs()['output_format']
self._check_results(pack, processors, output_format)
def _check_results(self, pack, processors, output_format):
# checking the whole datapack text
self.assertEqual(pack.text, self.document)
if "tokenize" in processors:
deps = [dep for dep in pack.get(Dependency)]
offset = 0
for i, sentence in enumerate(pack.get(Sentence)):
# checking the tokens and pos
tokens = self._test_tokenizer(pack, sentence, i,
processors, output_format)
if "depparse" in processors:
# checking the dependencies
self._test_dependencies(i, tokens, deps, offset,
output_format)
offset += len(self.tokens[i])
@staticmethod
def _create_pipeline(config):
nlp = Pipeline()
nlp.set_reader(StringReader())
# Using SpacyProcessor to segment the sentences
nlp.add_processor(processor=SpacyProcessor(), config={
'processors': '',
'lang': "en_core_web_sm", # Language code to build the Pipeline
'use_gpu': False
})
nlp.add_processor(processor=AllenNLPProcessor(), config=config)
nlp.initialize()
return nlp
def _test_tokenizer(self, pack, sentence, sent_idx,
processors, output_format):
tokens = []
for j, token in enumerate(
pack.get(entry_type=Token, range_annotation=sentence)):
self.assertEqual(token.text, self.tokens[sent_idx][j])
self._test_pos(sent_idx, token, j, processors, output_format)
tokens.append(token)
return tokens
def _test_pos(self, sent_idx, token, token_idx,
processors, output_format):
exp_pos = self.pos[output_format][sent_idx][token_idx] \
if "pos" in processors else None
self.assertEqual(token.pos, exp_pos)
def _test_dependencies(self, sent_idx, tokens, deps, offset, output_format):
for j, dep in enumerate(deps[offset:offset +
len(self.tokens[sent_idx])]):
self.assertEqual(dep.get_parent(),
tokens[self.dep_heads[output_format][sent_idx][j] - 1])
self.assertEqual(dep.rel_type,
self.deps[output_format][sent_idx][j])
| 39.863636 | 80 | 0.541163 |
acf81e7e2947dfc9d22a1ba29d64d96570e2df5d | 5,517 | py | Python | electrum_redd/constants.py | reddcoin-project/electrum-reddcoin | b40266c18c149392e8c8cc6074d4c35da21004e6 | [
"MIT"
] | 1 | 2022-01-17T23:05:46.000Z | 2022-01-17T23:05:46.000Z | electrum_redd/constants.py | reddcoin-project/electrum-reddcoin | b40266c18c149392e8c8cc6074d4c35da21004e6 | [
"MIT"
] | 6 | 2020-11-03T16:31:13.000Z | 2021-02-16T11:25:53.000Z | electrum_redd/constants.py | reddcoin-project/electrum-reddcoin | b40266c18c149392e8c8cc6074d4c35da21004e6 | [
"MIT"
] | 1 | 2020-09-16T18:56:45.000Z | 2020-09-16T18:56:45.000Z | # -*- coding: utf-8 -*-
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2018 The Electrum developers
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import json
from .util import inv_dict
from . import bitcoin
def read_json(filename, default):
path = os.path.join(os.path.dirname(__file__), filename)
try:
with open(path, 'r') as f:
r = json.loads(f.read())
except:
r = default
return r
GIT_REPO_URL = "https://github.com/reddcoin-project/electrum-redd"
GIT_REPO_ISSUES_URL = "https://github.com/reddcoin-project/electrum-redd/issues"
BIP39_WALLET_FORMATS = read_json('bip39_wallet_formats.json', [])
class AbstractNet:
BLOCK_HEIGHT_FIRST_LIGHTNING_CHANNELS = 0
@classmethod
def max_checkpoint(cls) -> int:
return max(0, len(cls.CHECKPOINTS) * 2016 - 1)
@classmethod
def rev_genesis_bytes(cls) -> bytes:
return bytes.fromhex(bitcoin.rev_hex(cls.GENESIS))
class BitcoinMainnet(AbstractNet):
TESTNET = False
WIF_PREFIX = 0xbd
ADDRTYPE_P2PKH = 61
ADDRTYPE_P2SH = 5
SEGWIT_HRP = "rdd"
GENESIS = "b868e0d95a3c3c0e0dadc67ee587aaf9dc8acbf99e3b4b3110fad4eb74c1decc"
LAST_POW_BLOCK = 260799
DEFAULT_PORTS = {'t': '50001', 's': '50002'}
DEFAULT_SERVERS = read_json('servers.json', {})
CHECKPOINTS = read_json('checkpoints.json', [])
HEADERS_URL = "https://download.reddcoin.com/bin/electrum/headers/blockchain_headers"
BLOCK_HEIGHT_FIRST_LIGHTNING_CHANNELS = 497000
XPRV_HEADERS = {
'standard': 0x0488ade4, # xprv
'p2wpkh-p2sh': 0x049d7878, # yprv
'p2wsh-p2sh': 0x0295b005, # Yprv
'p2wpkh': 0x04b2430c, # zprv
'p2wsh': 0x02aa7a99, # Zprv
}
XPRV_HEADERS_INV = inv_dict(XPRV_HEADERS)
XPUB_HEADERS = {
'standard': 0x0488b21e, # xpub
'p2wpkh-p2sh': 0x049d7cb2, # ypub
'p2wsh-p2sh': 0x0295b43f, # Ypub
'p2wpkh': 0x04b24746, # zpub
'p2wsh': 0x02aa7ed3, # Zpub
}
XPUB_HEADERS_INV = inv_dict(XPUB_HEADERS)
BIP44_COIN_TYPE = 4
LN_REALM_BYTE = 0
LN_DNS_SEEDS = [
'nodes.lightning.directory.',
'lseed.bitcoinstats.com.',
]
class BitcoinTestnet(AbstractNet):
TESTNET = True
WIF_PREFIX = 0xef
ADDRTYPE_P2PKH = 111
ADDRTYPE_P2SH = 196
SEGWIT_HRP = "trdd"
GENESIS = "a12ac9bd4cd26262c53a6277aafc61fe9dfe1e2b05eaa1ca148a5be8b394e35a"
LAST_POW_BLOCK = 349
DEFAULT_PORTS = {'t': '51001', 's': '51002'}
DEFAULT_SERVERS = read_json('servers_testnet.json', {})
CHECKPOINTS = read_json('checkpoints_testnet.json', [])
HEADERS_URL = "https://download.reddcoin.com/bin/electrum/headers/testnet/blockchain_headers"
XPRV_HEADERS = {
'standard': 0x04358394, # tprv
'p2wpkh-p2sh': 0x044a4e28, # uprv
'p2wsh-p2sh': 0x024285b5, # Uprv
'p2wpkh': 0x045f18bc, # vprv
'p2wsh': 0x02575048, # Vprv
}
XPRV_HEADERS_INV = inv_dict(XPRV_HEADERS)
XPUB_HEADERS = {
'standard': 0x043587cf, # tpub
'p2wpkh-p2sh': 0x044a5262, # upub
'p2wsh-p2sh': 0x024289ef, # Upub
'p2wpkh': 0x045f1cf6, # vpub
'p2wsh': 0x02575483, # Vpub
}
XPUB_HEADERS_INV = inv_dict(XPUB_HEADERS)
BIP44_COIN_TYPE = 4
LN_REALM_BYTE = 1
LN_DNS_SEEDS = [ # TODO investigate this again
#'test.nodes.lightning.directory.', # times out.
#'lseed.bitcoinstats.com.', # ignores REALM byte and returns mainnet peers...
]
class BitcoinRegtest(BitcoinTestnet):
SEGWIT_HRP = "rrdd"
GENESIS = "0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206"
DEFAULT_SERVERS = read_json('servers_regtest.json', {})
CHECKPOINTS = []
LN_DNS_SEEDS = []
class BitcoinSimnet(BitcoinTestnet):
WIF_PREFIX = 0x64
ADDRTYPE_P2PKH = 0x3f
ADDRTYPE_P2SH = 0x7b
SEGWIT_HRP = "sb"
GENESIS = "683e86bd5c6d110d91b94b97137ba6bfe02dbbdb8e3dff722a669b5d69d77af6"
DEFAULT_SERVERS = read_json('servers_regtest.json', {})
CHECKPOINTS = []
LN_DNS_SEEDS = []
# don't import net directly, import the module instead (so that net is singleton)
net = BitcoinMainnet
def set_simnet():
global net
net = BitcoinSimnet
def set_mainnet():
global net
net = BitcoinMainnet
def set_testnet():
global net
net = BitcoinTestnet
def set_regtest():
global net
net = BitcoinRegtest
| 30.994382 | 97 | 0.680805 |
acf81f66ee61f3fd0e63b192b69b00989a1eb1e5 | 690 | py | Python | google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/services/services/gender_view_service/__init__.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 7 | 2021-02-21T10:39:41.000Z | 2021-12-07T07:31:28.000Z | google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/services/services/gender_view_service/__init__.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 6 | 2021-02-02T23:46:11.000Z | 2021-11-15T01:46:02.000Z | google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/services/services/gender_view_service/__init__.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 4 | 2021-01-28T23:25:45.000Z | 2021-08-30T01:55:16.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .client import GenderViewServiceClient
__all__ = (
'GenderViewServiceClient',
)
| 32.857143 | 74 | 0.749275 |
acf81fdfcedf3932750603d54c1e38afbc72cde3 | 9,555 | py | Python | devp2p/tests/test_go_handshake.py | vaporyproject/pydevp2p | 084d58bd98e1573ccce82fbb766ff06fb6831fe4 | [
"MIT"
] | null | null | null | devp2p/tests/test_go_handshake.py | vaporyproject/pydevp2p | 084d58bd98e1573ccce82fbb766ff06fb6831fe4 | [
"MIT"
] | null | null | null | devp2p/tests/test_go_handshake.py | vaporyproject/pydevp2p | 084d58bd98e1573ccce82fbb766ff06fb6831fe4 | [
"MIT"
] | null | null | null | # https://gist.github.com/fjl/3a78780d17c755d22df2 # data used here
# https://github.com/ethereum/cpp-ethereum/blob/develop/test/rlpx.cpp#L183
# https://gist.github.com/fjl/6dd7f51f1bf226488e00
from devp2p.rlpxcipher import RLPxSession
from devp2p.crypto import ECCx, privtopub
test_values = \
{
"initiator_private_key": "5e173f6ac3c669587538e7727cf19b782a4f2fda07c1eaa662c593e5e85e3051",
"receiver_private_key": "c45f950382d542169ea207959ee0220ec1491755abe405cd7498d6b16adb6df8",
"initiator_ephemeral_private_key": "19c2185f4f40634926ebed3af09070ca9e029f2edd5fae6253074896205f5f6c",
"receiver_ephemeral_private_key": "d25688cf0ab10afa1a0e2dba7853ed5f1e5bf1c631757ed4e103b593ff3f5620",
"auth_plaintext": "884c36f7ae6b406637c1f61b2f57e1d2cab813d24c6559aaf843c3f48962f32f46662c066d39669b7b2e3ba14781477417600e7728399278b1b5d801a519aa570034fdb5419558137e0d44cd13d319afe5629eeccb47fd9dfe55cc6089426e46cc762dd8a0636e07a54b31169eba0c7a20a1ac1ef68596f1f283b5c676bae4064abfcce24799d09f67e392632d3ffdc12e3d6430dcb0ea19c318343ffa7aae74d4cd26fecb93657d1cd9e9eaf4f8be720b56dd1d39f190c4e1c6b7ec66f077bb1100",
"authresp_plaintext": "802b052f8b066640bba94a4fc39d63815c377fced6fcb84d27f791c9921ddf3e9bf0108e298f490812847109cbd778fae393e80323fd643209841a3b7f110397f37ec61d84cea03dcc5e8385db93248584e8af4b4d1c832d8c7453c0089687a700",
"auth_ciphertext": "04a0274c5951e32132e7f088c9bdfdc76c9d91f0dc6078e848f8e3361193dbdc43b94351ea3d89e4ff33ddcefbc80070498824857f499656c4f79bbd97b6c51a514251d69fd1785ef8764bd1d262a883f780964cce6a14ff206daf1206aa073a2d35ce2697ebf3514225bef186631b2fd2316a4b7bcdefec8d75a1025ba2c5404a34e7795e1dd4bc01c6113ece07b0df13b69d3ba654a36e35e69ff9d482d88d2f0228e7d96fe11dccbb465a1831c7d4ad3a026924b182fc2bdfe016a6944312021da5cc459713b13b86a686cf34d6fe6615020e4acf26bf0d5b7579ba813e7723eb95b3cef9942f01a58bd61baee7c9bdd438956b426a4ffe238e61746a8c93d5e10680617c82e48d706ac4953f5e1c4c4f7d013c87d34a06626f498f34576dc017fdd3d581e83cfd26cf125b6d2bda1f1d56",
"authresp_ciphertext": "049934a7b2d7f9af8fd9db941d9da281ac9381b5740e1f64f7092f3588d4f87f5ce55191a6653e5e80c1c5dd538169aa123e70dc6ffc5af1827e546c0e958e42dad355bcc1fcb9cdf2cf47ff524d2ad98cbf275e661bf4cf00960e74b5956b799771334f426df007350b46049adb21a6e78ab1408d5e6ccde6fb5e69f0f4c92bb9c725c02f99fa72b9cdc8dd53cff089e0e73317f61cc5abf6152513cb7d833f09d2851603919bf0fbe44d79a09245c6e8338eb502083dc84b846f2fee1cc310d2cc8b1b9334728f97220bb799376233e113",
"ecdhe_shared_secret": "e3f407f83fc012470c26a93fdff534100f2c6f736439ce0ca90e9914f7d1c381",
"initiator_nonce": "cd26fecb93657d1cd9e9eaf4f8be720b56dd1d39f190c4e1c6b7ec66f077bb11",
"receiver_nonce": "f37ec61d84cea03dcc5e8385db93248584e8af4b4d1c832d8c7453c0089687a7",
"aes_secret": "c0458fa97a5230830e05f4f20b7c755c1d4e54b1ce5cf43260bb191eef4e418d",
"mac_secret": "48c938884d5067a1598272fcddaa4b833cd5e7d92e8228c0ecdfabbe68aef7f1",
"token": "3f9ec2592d1554852b1f54d228f042ed0a9310ea86d038dc2b401ba8cd7fdac4",
"initial_egress_MAC": "09771e93b1a6109e97074cbe2d2b0cf3d3878efafe68f53c41bb60c0ec49097e",
"initial_ingress_MAC": "75823d96e23136c89666ee025fb21a432be906512b3dd4a3049e898adb433847",
"initiator_hello_packet": "6ef23fcf1cec7312df623f9ae701e63b550cdb8517fefd8dd398fc2acd1d935e6e0434a2b96769078477637347b7b01924fff9ff1c06df2f804df3b0402bbb9f87365b3c6856b45e1e2b6470986813c3816a71bff9d69dd297a5dbd935ab578f6e5d7e93e4506a44f307c332d95e8a4b102585fd8ef9fc9e3e055537a5cec2e9",
"receiver_hello_packet": "6ef23fcf1cec7312df623f9ae701e63be36a1cdd1b19179146019984f3625d4a6e0434a2b96769050577657247b7b02bc6c314470eca7e3ef650b98c83e9d7dd4830b3f718ff562349aead2530a8d28a8484604f92e5fced2c6183f304344ab0e7c301a0c05559f4c25db65e36820b4b909a226171a60ac6cb7beea09376d6d8"
}
for k, v in test_values.items():
test_values[k] = v.decode('hex')
keys = ['initiator_private_key',
'receiver_private_key',
'initiator_ephemeral_private_key',
'receiver_ephemeral_private_key',
'initiator_nonce',
'receiver_nonce',
# auth
'auth_plaintext',
'auth_ciphertext',
# auth response
'authresp_plaintext',
'authresp_ciphertext',
# on ack receive
'ecdhe_shared_secret',
'aes_secret',
'mac_secret',
'token',
'initial_egress_MAC',
'initial_ingress_MAC',
# messages
'initiator_hello_packet',
'receiver_hello_packet'
]
assert set(keys) == set(test_values.keys())
# see also
# https://github.com/ethereum/cpp-ethereum/blob/develop/test/rlpx.cpp#L183
def test_ecies_decrypt():
tv = test_values
e = ECCx(raw_privkey=tv['receiver_private_key'])
_dec = e.ecies_decrypt(tv['auth_ciphertext'])
assert len(_dec) == len(tv['auth_plaintext'])
assert _dec == tv['auth_plaintext']
def test_handshake():
tv = test_values
initiator = RLPxSession(ECCx(raw_privkey=tv['initiator_private_key']),
is_initiator=True,
ephemeral_privkey=tv['initiator_ephemeral_private_key'])
initiator_pubkey = initiator.ecc.raw_pubkey
responder = RLPxSession(ECCx(raw_privkey=tv['receiver_private_key']),
ephemeral_privkey=tv['receiver_ephemeral_private_key'])
responder_pubkey = responder.ecc.raw_pubkey
# test encryption
_enc = initiator.encrypt_auth_message(tv['auth_plaintext'], responder_pubkey)
assert len(_enc) == len(tv['auth_ciphertext'])
assert len(tv['auth_ciphertext']) == 113 + len(tv['auth_plaintext']) # len
# test auth_msg plain
auth_msg = initiator.create_auth_message(remote_pubkey=responder_pubkey,
ephemeral_privkey=tv[
'initiator_ephemeral_private_key'],
nonce=tv['initiator_nonce'])
# test auth_msg plain
assert len(auth_msg) == len(tv['auth_plaintext']) == 194
assert auth_msg[65:] == tv['auth_plaintext'][65:] # starts with non deterministic k
_auth_msg_cipher = initiator.encrypt_auth_message(auth_msg, responder_pubkey)
# test shared
responder.ecc.get_ecdh_key(initiator_pubkey) == \
initiator.ecc.get_ecdh_key(responder_pubkey)
# test decrypt
assert auth_msg == responder.ecc.ecies_decrypt(_auth_msg_cipher)
# check receive
responder_ephemeral_pubkey = privtopub(tv['receiver_ephemeral_private_key'])
auth_msg_cipher = tv['auth_ciphertext']
auth_msg = responder.ecc.ecies_decrypt(auth_msg_cipher)
assert auth_msg[65:] == tv['auth_plaintext'][65:] # starts with non deterministic k
responder.decode_authentication(auth_msg_cipher)
auth_ack_msg = responder.create_auth_ack_message(responder_ephemeral_pubkey,
tv['receiver_nonce'],
responder.remote_token_found
)
assert auth_ack_msg == tv['authresp_plaintext']
auth_ack_msg_cipher = responder.encrypt_auth_ack_message(auth_ack_msg, responder.remote_pubkey)
# set auth ack msg cipher (needed later for mac calculation)
responder.auth_ack = tv['authresp_ciphertext']
responder.setup_cipher()
assert responder.ecdhe_shared_secret == tv['ecdhe_shared_secret']
assert len(responder.token) == len(tv['token'])
assert responder.token == tv['token']
assert responder.aes_secret == tv['aes_secret']
assert responder.mac_secret == tv['mac_secret']
assert responder.initiator_nonce == tv['initiator_nonce']
assert responder.responder_nonce == tv['receiver_nonce']
assert responder.auth_init == tv['auth_ciphertext']
assert responder.auth_ack == tv['authresp_ciphertext']
# test values are from initiator perspective?
assert responder.ingress_mac.digest() == tv['initial_egress_MAC']
assert responder.ingress_mac.digest() == tv['initial_egress_MAC']
assert responder.egress_mac.digest() == tv['initial_ingress_MAC']
assert responder.egress_mac.digest() == tv['initial_ingress_MAC']
r = responder.decrypt(tv['initiator_hello_packet'])
# unpack hello packet
import struct
import rlp
import rlp.sedes as sedes
from rlp.codec import consume_item
header = r['header']
frame_length = struct.unpack('>I', '\x00' + header[:3])[0]
header_sedes = sedes.List([sedes.big_endian_int, sedes.big_endian_int])
header_data = rlp.decode(header[3:], strict=False, sedes=header_sedes)
print 'header', repr(header_data)
# frame
frame = r['frame']
# normal: rlp(packet-type) [|| rlp(packet-data)] || padding
packet_type, end = consume_item(frame, start=0)
packet_type = rlp.decode(frame, sedes=sedes.big_endian_int, strict=False)
print 'packet_type', repr(packet_type)
# decode hello body
_sedes_capabilites_tuple = sedes.List([sedes.binary, sedes.big_endian_int])
structure = [
('version', sedes.big_endian_int),
('client_version', sedes.big_endian_int),
('capabilities', sedes.CountableList(_sedes_capabilites_tuple)),
('listen_port', sedes.big_endian_int),
('nodeid', sedes.binary)
]
hello_sedes = sedes.List([x[1] for x in structure])
frame_data = rlp.decode(frame[end:], sedes=hello_sedes)
frame_data = dict((structure[i][0], x) for i, x in enumerate(frame_data))
print 'frame', frame_data
| 53.083333 | 644 | 0.757405 |
acf82173a6fbbaa279afecb41f5b95b174c27cbe | 1,565 | py | Python | src/pycontw2016/urls.py | peihsuan/pycon.tw | 4d75e629295b3eef92eff78b3604ab034bd406b0 | [
"MIT"
] | null | null | null | src/pycontw2016/urls.py | peihsuan/pycon.tw | 4d75e629295b3eef92eff78b3604ab034bd406b0 | [
"MIT"
] | null | null | null | src/pycontw2016/urls.py | peihsuan/pycon.tw | 4d75e629295b3eef92eff78b3604ab034bd406b0 | [
"MIT"
] | null | null | null | from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.i18n import i18n_patterns
from django.conf.urls.static import static
from django.contrib import admin
from django.views.i18n import set_language
from core.views import error_page, flat_page, index
from users.views import user_dashboard
urlpatterns = i18n_patterns(
# Add top-level URL patterns here.
url(r'^$', index, name='index'),
url(r'^dashboard/$', user_dashboard, name='user_dashboard'),
url(r'^accounts/', include('users.urls')),
url(r'^events/', include('events.urls')),
url(r'^proposals/', include('proposals.urls')),
url(r'^reviews/', include('reviews.urls')),
url(r'^sponsors/', include('sponsors.urls')),
# Match everything except admin, media, static, and error pages.
url(r'^(?!admin|{media}|{static}|404|500/)(?P<path>.*)/$'.format(
media=settings.MEDIA_URL.strip('/'),
static=settings.STATIC_URL.strip('/')),
flat_page, name='page'),
url(r'^(?P<code>404|500)/$', error_page),
)
# These should not be prefixed with language.
urlpatterns += [
url(r'^ccip/', include('ccip.urls')),
url(r'^set-language/$', set_language, name='set_language'),
url(r'^admin/', admin.site.urls),
]
# User-uploaded files like profile pics need to be served in development.
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# Debug Toolbar's URL.
if settings.DEBUG:
import debug_toolbar
urlpatterns += [url(r'^__debug__/', include(debug_toolbar.urls))]
| 34.021739 | 76 | 0.692652 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.