blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a4a2676f4a768fd3892fd3b055e4e6c17d730790
|
e42cf94d74deff0422df545014092c080d34bf7b
|
/zillion/model.py
|
857a3099619c6ef1fa056b6a0e72a795c3bd5db2
|
[
"MIT"
] |
permissive
|
totalhack/zillion
|
13e9af40caab2922ecf358833d28af74ae088f02
|
0fd52dae55acf002083e31b7cf656c698e3a5a60
|
refs/heads/master
| 2023-09-01T13:01:45.357604
| 2023-08-28T13:31:26
| 2023-08-28T13:31:26
| 175,296,188
| 113
| 7
|
NOASSERTION
| 2022-11-22T16:31:48
| 2019-03-12T21:05:24
|
Python
|
UTF-8
|
Python
| false
| false
| 1,718
|
py
|
model.py
|
nlp_installed = False
import sqlalchemy as sa
from zillion.core import zillion_config, nlp_installed
zillion_engine = sa.create_engine(zillion_config["DB_URL"], pool_pre_ping=True)
zillion_metadata = sa.MetaData()
zillion_metadata.bind = zillion_engine
Warehouses = sa.Table(
"warehouses",
zillion_metadata,
sa.Column("id", sa.Integer, primary_key=True),
sa.Column("name", sa.String(128), nullable=False, unique=True),
sa.Column("params", sa.Text, nullable=False),
sa.Column("meta", sa.Text),
sa.Column("created_at", sa.DateTime, server_default=sa.func.NOW()),
)
ReportSpecs = sa.Table(
"report_specs",
zillion_metadata,
sa.Column("id", sa.Integer, primary_key=True),
sa.Column("warehouse_id", sa.Integer, nullable=False),
sa.Column("params", sa.Text, nullable=False),
sa.Column("meta", sa.Text),
sa.Column("created_at", sa.DateTime, server_default=sa.func.NOW()),
)
DimensionValues = sa.Table(
"dimension_values",
zillion_metadata,
sa.Column("name", sa.String(100), primary_key=True),
sa.Column("warehouse_id", sa.Integer, primary_key=True),
sa.Column("values", sa.Text, nullable=False),
sa.Column("created_at", sa.DateTime, server_default=sa.func.NOW()),
)
if nlp_installed:
EmbeddingsCache = sa.Table(
"embeddings_cache",
zillion_metadata,
sa.Column("text_hash", sa.String(100), primary_key=True),
sa.Column("model", sa.String(100), primary_key=True),
sa.Column("text", sa.Text, nullable=False),
sa.Column("vector", sa.LargeBinary, nullable=False),
sa.Column("created_at", sa.DateTime, server_default=sa.func.NOW()),
)
zillion_metadata.create_all(zillion_engine)
|
0848ca9cc0470f47118071afcaf31aad2a5cd202
|
c140ad38b1463024e289ceb0d5d6d44a45c91724
|
/test/test_Stage.py
|
172aae72c35b5a39c124f86e53955127fff794ef
|
[
"Apache-2.0"
] |
permissive
|
NVIDIA/hpc-container-maker
|
3a333526decbd18352ef8d1fb3bec0033be221e8
|
60fd2a51c171258a6b3f93c2523101cb7018ba1b
|
refs/heads/master
| 2023-08-21T13:32:27.132476
| 2023-06-12T21:12:40
| 2023-06-12T21:12:40
| 126,385,168
| 419
| 88
|
Apache-2.0
| 2023-09-11T18:33:26
| 2018-03-22T19:26:41
|
Python
|
UTF-8
|
Python
| false
| false
| 5,515
|
py
|
test_Stage.py
|
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""Test cases for the Stage module"""
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import unittest
from helpers import centos, docker, singularity32
from hpccm.building_blocks import boost
from hpccm.building_blocks import gnu
from hpccm.primitives.baseimage import baseimage
from hpccm.primitives.shell import shell
from hpccm.Stage import Stage
class Test_Stage(unittest.TestCase):
def setUp(self):
"""Disable logging output messages"""
logging.disable(logging.ERROR)
def test_value(self):
"""Single layer"""
s = Stage()
self.assertFalse(len(s))
s += 1
self.assertTrue(len(s))
def test_list(self):
"""List of layers"""
s = Stage()
self.assertEqual(len(s), 0)
s += [1, 2]
self.assertEqual(len(s), 2)
@docker
def test_baseimage(self):
"""Base image specification"""
s = Stage()
s.name = 'bar'
s.baseimage('foo')
self.assertEqual(str(s), 'FROM foo AS bar')
@docker
def test_baseimage_first(self):
"""Base image is always first"""
s = Stage()
s += shell(commands=['abc'])
s.name = 'bar'
s.baseimage('foo')
self.assertEqual(str(s), 'FROM foo AS bar\n\nRUN abc')
@centos
@docker
def test_runtime(self):
"""Runtime from a previous stage"""
s0 = Stage()
s0 += gnu()
s0 += shell(commands=['gcc -o hello hello.c'])
s1 = Stage()
s1 += s0.runtime()
self.assertEqual(str(s1),
r'''# GNU compiler runtime
RUN yum install -y \
libgfortran \
libgomp && \
rm -rf /var/cache/yum/*''')
@centos
@docker
def test_runtime_exclude(self):
"""Runtime from a previous stage with exclude"""
s0 = Stage()
s0 += gnu()
s0 += boost()
s1 = Stage()
s1 += s0.runtime(exclude=['boost'])
self.assertEqual(str(s1),
r'''# GNU compiler runtime
RUN yum install -y \
libgfortran \
libgomp && \
rm -rf /var/cache/yum/*''')
@docker
def test_multistage_noas_docker(self):
"""Multistage naming"""
s0 = Stage()
s0 += baseimage(image='centos:7')
s0 += boost()
s1 = Stage()
s1 += s0.runtime()
self.assertEqual(str(s1),
r'''# Boost
COPY --from=0 /usr/local/boost /usr/local/boost
ENV LD_LIBRARY_PATH=/usr/local/boost/lib:$LD_LIBRARY_PATH''')
@singularity32
def test_multistage_noas_singularity(self):
"""Multistage naming"""
s0 = Stage()
s0 += baseimage(image='centos:7')
s0 += boost()
s1 = Stage()
s1 += s0.runtime()
self.assertEqual(str(s1),
r'''# Boost
%files from 0
/usr/local/boost /usr/local/boost
%environment
export LD_LIBRARY_PATH=/usr/local/boost/lib:$LD_LIBRARY_PATH
%post
export LD_LIBRARY_PATH=/usr/local/boost/lib:$LD_LIBRARY_PATH''')
@docker
def test_multistage_as_docker(self):
"""Multistage naming"""
s0 = Stage()
s0 += baseimage(image='centos:7', _as='devel')
s0 += boost()
s1 = Stage()
s1 += s0.runtime()
self.assertEqual(str(s1),
r'''# Boost
COPY --from=devel /usr/local/boost /usr/local/boost
ENV LD_LIBRARY_PATH=/usr/local/boost/lib:$LD_LIBRARY_PATH''')
@singularity32
def test_multistage_as_singularity(self):
"""Multistage naming"""
s0 = Stage()
s0 += baseimage(image='centos:7', _as='devel')
s0 += boost()
s1 = Stage()
s1 += s0.runtime()
self.assertEqual(str(s1),
r'''# Boost
%files from devel
/usr/local/boost /usr/local/boost
%environment
export LD_LIBRARY_PATH=/usr/local/boost/lib:$LD_LIBRARY_PATH
%post
export LD_LIBRARY_PATH=/usr/local/boost/lib:$LD_LIBRARY_PATH''')
@docker
def test_multistage_as_override_docker(self):
"""Multistage naming"""
s0 = Stage()
s0 += baseimage(image='centos:7', _as='devel')
s0 += boost()
s1 = Stage()
s1 += s0.runtime(_from='build')
self.assertEqual(str(s1),
r'''# Boost
COPY --from=build /usr/local/boost /usr/local/boost
ENV LD_LIBRARY_PATH=/usr/local/boost/lib:$LD_LIBRARY_PATH''')
@singularity32
def test_multistage_as_override_singularity(self):
"""Multistage naming"""
s0 = Stage()
s0 += baseimage(image='centos:7', _as='devel')
s0 += boost()
s1 = Stage()
s1 += s0.runtime(_from='build')
self.assertEqual(str(s1),
r'''# Boost
%files from build
/usr/local/boost /usr/local/boost
%environment
export LD_LIBRARY_PATH=/usr/local/boost/lib:$LD_LIBRARY_PATH
%post
export LD_LIBRARY_PATH=/usr/local/boost/lib:$LD_LIBRARY_PATH''')
|
90783a17d74499edbd5c4efe40eda54361a02ee1
|
64ab5b65afdf8d950c4b56ad2259133b95fc2fec
|
/tests/zeus/providers/travis/test_webhook.py
|
ccc0a2afcbf8bd79a131b36279cb2a21bb7fdc86
|
[
"Apache-2.0"
] |
permissive
|
getsentry/zeus
|
3e88895443b23278fdb4c25121422ee214630512
|
6d4a490c19ebe406b551641a022ca08f26c21fcb
|
refs/heads/master
| 2023-09-01T14:20:11.396306
| 2021-04-30T17:08:33
| 2021-04-30T17:08:33
| 96,131,433
| 222
| 27
|
Apache-2.0
| 2022-06-01T03:17:16
| 2017-07-03T16:39:35
|
Python
|
UTF-8
|
Python
| false
| false
| 2,385
|
py
|
test_webhook.py
|
import json
from base64 import b64encode
CONFIG_RESPONSE = b"""
{
"config": {
"host": "travis-ci.org",
"shorten_host": "trvs.io",
"assets": {
"host": "travis-ci.org"
},
"pusher": {
"key": "5df8ac576dcccf4fd076"
},
"github": {
"api_url": "https://api.github.com",
"scopes": [
"read:org", "user:email",
"repo_deployment", "repo:status",
"write:repo_hook"
]
},
"notifications": {
"webhook": {
"public_key": "%(public_key)s"
}
}
}
}
"""
UNSET = object()
def make_signature(payload, private_key) -> bytes:
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
return private_key.sign(payload, padding.PKCS1v15(), hashes.SHA1())
def get_config_response(public_key_bytes):
return CONFIG_RESPONSE % {b"public_key": public_key_bytes.replace(b"\n", b"\\n")}
def post_request(client, hook, payload, public_key, signature):
path = "/hooks/{}/public/provider/travis/webhook".format(hook.id)
return client.post(
path, data={"payload": payload}, headers={"Signature": b64encode(signature)}
)
def test_missing_payload(client, default_repo, default_hook):
path = "/hooks/{}/public/provider/travis/webhook".format(default_hook.id)
resp = client.post(path)
assert resp.status_code == 400, repr(resp.data)
def test_missing_signature(client, default_repo, default_hook):
path = "/hooks/{}/public/provider/travis/webhook".format(default_hook.id)
resp = client.post(path)
assert resp.status_code == 400, repr(resp.data)
def test_queued_build(
client,
default_repo,
default_hook,
default_revision,
private_key,
public_key_bytes,
mocker,
responses,
sample_travis_build_commit,
):
responses.add(
responses.GET,
"https://api.travis-ci.org/config",
get_config_response(public_key_bytes),
)
payload = json.dumps(sample_travis_build_commit).encode("utf-8")
resp = post_request(
client,
default_hook,
payload,
public_key_bytes,
make_signature(payload, private_key),
)
assert resp.status_code == 202, repr(resp.data)
|
ff856cc54ffad18dfe9f71ed35824e344f0af671
|
b8bbdfc593b6d816e67a344f720f90ec05236778
|
/tests/dag_processing/test_processor.py
|
5d3a4cbd30a34cea693bfd09e777f3dc814f8803
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
apache/airflow
|
ed78db0a8bab7e096990e143926e52f518e288ab
|
1b122c15030e99cef9d4ff26d3781a7a9d6949bc
|
refs/heads/main
| 2023-09-01T08:37:34.556097
| 2023-09-01T06:49:05
| 2023-09-01T06:49:05
| 33,884,891
| 22,756
| 11,558
|
Apache-2.0
| 2023-09-14T20:12:36
| 2015-04-13T18:04:58
|
Python
|
UTF-8
|
Python
| false
| false
| 44,582
|
py
|
test_processor.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import datetime
import os
from unittest import mock
from unittest.mock import MagicMock, patch
from zipfile import ZipFile
import pytest
from airflow import PY311, settings
from airflow.callbacks.callback_requests import TaskCallbackRequest
from airflow.configuration import TEST_DAGS_FOLDER, conf
from airflow.dag_processing.manager import DagFileProcessorAgent
from airflow.dag_processing.processor import DagFileProcessor, DagFileProcessorProcess
from airflow.models import DagBag, DagModel, SlaMiss, TaskInstance, errors
from airflow.models.serialized_dag import SerializedDagModel
from airflow.models.taskinstance import SimpleTaskInstance
from airflow.operators.empty import EmptyOperator
from airflow.utils import timezone
from airflow.utils.session import create_session
from airflow.utils.state import State
from airflow.utils.types import DagRunType
from tests.test_utils.config import conf_vars, env_vars
from tests.test_utils.db import (
clear_db_dags,
clear_db_import_errors,
clear_db_jobs,
clear_db_pools,
clear_db_runs,
clear_db_serialized_dags,
clear_db_sla_miss,
)
from tests.test_utils.mock_executor import MockExecutor
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
# Include the words "airflow" and "dag" in the file contents,
# tricking airflow into thinking these
# files contain a DAG (otherwise Airflow will skip them)
PARSEABLE_DAG_FILE_CONTENTS = '"airflow DAG"'
UNPARSEABLE_DAG_FILE_CONTENTS = "airflow DAG"
INVALID_DAG_WITH_DEPTH_FILE_CONTENTS = "def something():\n return airflow_DAG\nsomething()"
# Filename to be used for dags that are created in an ad-hoc manner and can be removed/
# created at runtime
TEMP_DAG_FILENAME = "temp_dag.py"
@pytest.fixture(scope="class")
def disable_load_example():
with conf_vars({("core", "load_examples"): "false"}):
with env_vars({"AIRFLOW__CORE__LOAD_EXAMPLES": "false"}):
yield
@pytest.mark.usefixtures("disable_load_example")
class TestDagFileProcessor:
@staticmethod
def clean_db():
clear_db_runs()
clear_db_pools()
clear_db_dags()
clear_db_sla_miss()
clear_db_import_errors()
clear_db_jobs()
clear_db_serialized_dags()
def setup_class(self):
self.clean_db()
def setup_method(self):
# Speed up some tests by not running the tasks, just look at what we
# enqueue!
self.null_exec = MockExecutor()
self.scheduler_job = None
def teardown_method(self) -> None:
if self.scheduler_job and self.scheduler_job.job_runner.processor_agent:
self.scheduler_job.job_runner.processor_agent.end()
self.scheduler_job = None
self.clean_db()
def _process_file(self, file_path, dag_directory, session):
dag_file_processor = DagFileProcessor(
dag_ids=[], dag_directory=str(dag_directory), log=mock.MagicMock()
)
dag_file_processor.process_file(file_path, [], False, session)
@mock.patch("airflow.dag_processing.processor.DagFileProcessor._get_dagbag")
def test_dag_file_processor_sla_miss_callback(self, mock_get_dagbag, create_dummy_dag, get_test_dag):
"""
Test that the dag file processor calls the sla miss callback
"""
session = settings.Session()
sla_callback = MagicMock()
# Create dag with a start of 1 day ago, but a sla of 0, so we'll already have a sla_miss on the books.
test_start_date = timezone.utcnow() - datetime.timedelta(days=1)
dag, task = create_dummy_dag(
dag_id="test_sla_miss",
task_id="dummy",
sla_miss_callback=sla_callback,
default_args={"start_date": test_start_date, "sla": datetime.timedelta()},
)
session.merge(TaskInstance(task=task, execution_date=test_start_date, state="success"))
session.merge(SlaMiss(task_id="dummy", dag_id="test_sla_miss", execution_date=test_start_date))
mock_dagbag = mock.Mock()
mock_dagbag.get_dag.return_value = dag
mock_get_dagbag.return_value = mock_dagbag
DagFileProcessor.manage_slas(dag_folder=dag.fileloc, dag_id="test_sla_miss", session=session)
assert sla_callback.called
@mock.patch("airflow.dag_processing.processor.DagFileProcessor._get_dagbag")
def test_dag_file_processor_sla_miss_callback_invalid_sla(self, mock_get_dagbag, create_dummy_dag):
"""
Test that the dag file processor does not call the sla miss callback when
given an invalid sla
"""
session = settings.Session()
sla_callback = MagicMock()
# Create dag with a start of 1 day ago, but an sla of 0
# so we'll already have an sla_miss on the books.
# Pass anything besides a timedelta object to the sla argument.
test_start_date = timezone.utcnow() - datetime.timedelta(days=1)
dag, task = create_dummy_dag(
dag_id="test_sla_miss",
task_id="dummy",
sla_miss_callback=sla_callback,
default_args={"start_date": test_start_date, "sla": None},
)
session.merge(TaskInstance(task=task, execution_date=test_start_date, state="success"))
session.merge(SlaMiss(task_id="dummy", dag_id="test_sla_miss", execution_date=test_start_date))
mock_dagbag = mock.Mock()
mock_dagbag.get_dag.return_value = dag
mock_get_dagbag.return_value = mock_dagbag
DagFileProcessor.manage_slas(dag_folder=dag.fileloc, dag_id="test_sla_miss", session=session)
sla_callback.assert_not_called()
@mock.patch("airflow.dag_processing.processor.DagFileProcessor._get_dagbag")
def test_dag_file_processor_sla_miss_callback_sent_notification(self, mock_get_dagbag, create_dummy_dag):
"""
Test that the dag file processor does not call the sla_miss_callback when a
notification has already been sent
"""
session = settings.Session()
# Mock the callback function so we can verify that it was not called
sla_callback = MagicMock()
# Create dag with a start of 2 days ago, but an sla of 1 day
# ago so we'll already have an sla_miss on the books
test_start_date = timezone.utcnow() - datetime.timedelta(days=2)
dag, task = create_dummy_dag(
dag_id="test_sla_miss",
task_id="dummy",
sla_miss_callback=sla_callback,
default_args={"start_date": test_start_date, "sla": datetime.timedelta(days=1)},
)
# Create a TaskInstance for two days ago
session.merge(TaskInstance(task=task, execution_date=test_start_date, state="success"))
# Create an SlaMiss where notification was sent, but email was not
session.merge(
SlaMiss(
task_id="dummy",
dag_id="test_sla_miss",
execution_date=test_start_date,
email_sent=False,
notification_sent=True,
)
)
mock_dagbag = mock.Mock()
mock_dagbag.get_dag.return_value = dag
mock_get_dagbag.return_value = mock_dagbag
# Now call manage_slas and see if the sla_miss callback gets called
DagFileProcessor.manage_slas(dag_folder=dag.fileloc, dag_id="test_sla_miss", session=session)
sla_callback.assert_not_called()
@mock.patch("airflow.dag_processing.processor.Stats.incr")
@mock.patch("airflow.dag_processing.processor.DagFileProcessor._get_dagbag")
def test_dag_file_processor_sla_miss_doesnot_raise_integrity_error(
self, mock_get_dagbag, mock_stats_incr, dag_maker
):
"""
Test that the dag file processor does not try to insert already existing item into the database
"""
session = settings.Session()
# Create dag with a start of 2 days ago, but an sla of 1 day
# ago so we'll already have an sla_miss on the books
test_start_date = timezone.utcnow() - datetime.timedelta(days=2)
with dag_maker(
dag_id="test_sla_miss",
default_args={"start_date": test_start_date, "sla": datetime.timedelta(days=1)},
) as dag:
task = EmptyOperator(task_id="dummy")
dag_maker.create_dagrun(execution_date=test_start_date, state=State.SUCCESS)
# Create a TaskInstance for two days ago
ti = TaskInstance(task=task, execution_date=test_start_date, state="success")
session.merge(ti)
session.flush()
mock_dagbag = mock.Mock()
mock_dagbag.get_dag.return_value = dag
mock_get_dagbag.return_value = mock_dagbag
DagFileProcessor.manage_slas(dag_folder=dag.fileloc, dag_id="test_sla_miss", session=session)
sla_miss_count = (
session.query(SlaMiss)
.filter(
SlaMiss.dag_id == dag.dag_id,
SlaMiss.task_id == task.task_id,
)
.count()
)
assert sla_miss_count == 1
mock_stats_incr.assert_called_with("sla_missed", tags={"dag_id": "test_sla_miss", "task_id": "dummy"})
# Now call manage_slas and see that it runs without errors
# because of existing SlaMiss above.
# Since this is run often, it's possible that it runs before another
# ti is successful thereby trying to insert a duplicate record.
DagFileProcessor.manage_slas(dag_folder=dag.fileloc, dag_id="test_sla_miss", session=session)
@mock.patch("airflow.dag_processing.processor.Stats.incr")
@mock.patch("airflow.dag_processing.processor.DagFileProcessor._get_dagbag")
def test_dag_file_processor_sla_miss_continue_checking_the_task_instances_after_recording_missing_sla(
self, mock_get_dagbag, mock_stats_incr, dag_maker
):
"""
Test that the dag file processor continue checking subsequent task instances
even if the preceding task instance misses the sla ahead
"""
session = settings.Session()
# Create a dag with a start of 3 days ago and sla of 1 day,
# so we have 2 missing slas
now = timezone.utcnow()
test_start_date = now - datetime.timedelta(days=3)
with dag_maker(
dag_id="test_sla_miss",
default_args={"start_date": test_start_date, "sla": datetime.timedelta(days=1)},
) as dag:
task = EmptyOperator(task_id="dummy")
dag_maker.create_dagrun(execution_date=test_start_date, state=State.SUCCESS)
session.merge(TaskInstance(task=task, execution_date=test_start_date, state="success"))
session.merge(
SlaMiss(task_id=task.task_id, dag_id=dag.dag_id, execution_date=now - datetime.timedelta(days=2))
)
session.flush()
mock_dagbag = mock.Mock()
mock_dagbag.get_dag.return_value = dag
mock_get_dagbag.return_value = mock_dagbag
DagFileProcessor.manage_slas(dag_folder=dag.fileloc, dag_id="test_sla_miss", session=session)
sla_miss_count = (
session.query(SlaMiss)
.filter(
SlaMiss.dag_id == dag.dag_id,
SlaMiss.task_id == task.task_id,
)
.count()
)
assert sla_miss_count == 2
mock_stats_incr.assert_called_with("sla_missed", tags={"dag_id": "test_sla_miss", "task_id": "dummy"})
@patch.object(DagFileProcessor, "logger")
@mock.patch("airflow.dag_processing.processor.Stats.incr")
@mock.patch("airflow.dag_processing.processor.DagFileProcessor._get_dagbag")
def test_dag_file_processor_sla_miss_callback_exception(
self, mock_get_dagbag, mock_stats_incr, mock_get_log, create_dummy_dag
):
"""
Test that the dag file processor gracefully logs an exception if there is a problem
calling the sla_miss_callback
"""
session = settings.Session()
sla_callback = MagicMock(
__name__="function_name", side_effect=RuntimeError("Could not call function")
)
test_start_date = timezone.utcnow() - datetime.timedelta(days=1)
for i, callback in enumerate([[sla_callback], sla_callback]):
dag, task = create_dummy_dag(
dag_id=f"test_sla_miss_{i}",
task_id="dummy",
sla_miss_callback=callback,
default_args={"start_date": test_start_date, "sla": datetime.timedelta(hours=1)},
)
mock_stats_incr.reset_mock()
session.merge(TaskInstance(task=task, execution_date=test_start_date, state="Success"))
# Create an SlaMiss where notification was sent, but email was not
session.merge(
SlaMiss(task_id="dummy", dag_id=f"test_sla_miss_{i}", execution_date=test_start_date)
)
# Now call manage_slas and see if the sla_miss callback gets called
mock_log = mock.Mock()
mock_get_log.return_value = mock_log
mock_dagbag = mock.Mock()
mock_dagbag.get_dag.return_value = dag
mock_get_dagbag.return_value = mock_dagbag
DagFileProcessor.manage_slas(dag_folder=dag.fileloc, dag_id="test_sla_miss", session=session)
assert sla_callback.called
mock_log.exception.assert_called_once_with(
"Could not call sla_miss_callback(%s) for DAG %s",
sla_callback.__name__,
f"test_sla_miss_{i}",
)
mock_stats_incr.assert_called_once_with(
"sla_callback_notification_failure",
tags={"dag_id": f"test_sla_miss_{i}", "func_name": sla_callback.__name__},
)
@mock.patch("airflow.dag_processing.processor.send_email")
@mock.patch("airflow.dag_processing.processor.DagFileProcessor._get_dagbag")
def test_dag_file_processor_only_collect_emails_from_sla_missed_tasks(
self, mock_get_dagbag, mock_send_email, create_dummy_dag
):
session = settings.Session()
test_start_date = timezone.utcnow() - datetime.timedelta(days=1)
email1 = "test1@test.com"
dag, task = create_dummy_dag(
dag_id="test_sla_miss",
task_id="sla_missed",
email=email1,
default_args={"start_date": test_start_date, "sla": datetime.timedelta(hours=1)},
)
session.merge(TaskInstance(task=task, execution_date=test_start_date, state="Success"))
email2 = "test2@test.com"
EmptyOperator(task_id="sla_not_missed", dag=dag, owner="airflow", email=email2)
session.merge(SlaMiss(task_id="sla_missed", dag_id="test_sla_miss", execution_date=test_start_date))
mock_dagbag = mock.Mock()
mock_dagbag.get_dag.return_value = dag
mock_get_dagbag.return_value = mock_dagbag
DagFileProcessor.manage_slas(dag_folder=dag.fileloc, dag_id="test_sla_miss", session=session)
assert len(mock_send_email.call_args_list) == 1
send_email_to = mock_send_email.call_args_list[0][0][0]
assert email1 in send_email_to
assert email2 not in send_email_to
@patch.object(DagFileProcessor, "logger")
@mock.patch("airflow.dag_processing.processor.Stats.incr")
@mock.patch("airflow.utils.email.send_email")
@mock.patch("airflow.dag_processing.processor.DagFileProcessor._get_dagbag")
def test_dag_file_processor_sla_miss_email_exception(
self, mock_get_dagbag, mock_send_email, mock_stats_incr, mock_get_log, create_dummy_dag
):
"""
Test that the dag file processor gracefully logs an exception if there is a problem
sending an email
"""
session = settings.Session()
dag_id = "test_sla_miss"
task_id = "test_ti"
email = "test@test.com"
# Mock the callback function so we can verify that it was not called
mock_send_email.side_effect = RuntimeError("Could not send an email")
test_start_date = timezone.utcnow() - datetime.timedelta(days=1)
dag, task = create_dummy_dag(
dag_id=dag_id,
task_id=task_id,
email=email,
default_args={"start_date": test_start_date, "sla": datetime.timedelta(hours=1)},
)
mock_stats_incr.reset_mock()
session.merge(TaskInstance(task=task, execution_date=test_start_date, state="Success"))
# Create an SlaMiss where notification was sent, but email was not
session.merge(SlaMiss(task_id=task_id, dag_id=dag_id, execution_date=test_start_date))
mock_log = mock.Mock()
mock_get_log.return_value = mock_log
mock_dagbag = mock.Mock()
mock_dagbag.get_dag.return_value = dag
mock_get_dagbag.return_value = mock_dagbag
DagFileProcessor.manage_slas(dag_folder=dag.fileloc, dag_id=dag_id, session=session)
mock_log.exception.assert_called_once_with(
"Could not send SLA Miss email notification for DAG %s", dag_id
)
mock_stats_incr.assert_called_once_with("sla_email_notification_failure", tags={"dag_id": dag_id})
@mock.patch("airflow.dag_processing.processor.DagFileProcessor._get_dagbag")
def test_dag_file_processor_sla_miss_deleted_task(self, mock_get_dagbag, create_dummy_dag):
"""
Test that the dag file processor will not crash when trying to send
sla miss notification for a deleted task
"""
session = settings.Session()
test_start_date = timezone.utcnow() - datetime.timedelta(days=1)
dag, task = create_dummy_dag(
dag_id="test_sla_miss",
task_id="dummy",
email="test@test.com",
default_args={"start_date": test_start_date, "sla": datetime.timedelta(hours=1)},
)
session.merge(TaskInstance(task=task, execution_date=test_start_date, state="Success"))
# Create an SlaMiss where notification was sent, but email was not
session.merge(
SlaMiss(task_id="dummy_deleted", dag_id="test_sla_miss", execution_date=test_start_date)
)
mock_dagbag = mock.Mock()
mock_dagbag.get_dag.return_value = dag
mock_get_dagbag.return_value = mock_dagbag
DagFileProcessor.manage_slas(dag_folder=dag.fileloc, dag_id="test_sla_miss", session=session)
@patch.object(TaskInstance, "handle_failure")
def test_execute_on_failure_callbacks(self, mock_ti_handle_failure):
dagbag = DagBag(dag_folder="/dev/null", include_examples=True, read_dags_from_db=False)
dag_file_processor = DagFileProcessor(
dag_ids=[], dag_directory=TEST_DAGS_FOLDER, log=mock.MagicMock()
)
with create_session() as session:
session.query(TaskInstance).delete()
dag = dagbag.get_dag("example_branch_operator")
dagrun = dag.create_dagrun(
state=State.RUNNING,
execution_date=DEFAULT_DATE,
run_type=DagRunType.SCHEDULED,
session=session,
)
task = dag.get_task(task_id="run_this_first")
ti = TaskInstance(task, run_id=dagrun.run_id, state=State.RUNNING)
session.add(ti)
requests = [
TaskCallbackRequest(
full_filepath="A", simple_task_instance=SimpleTaskInstance.from_ti(ti), msg="Message"
)
]
dag_file_processor.execute_callbacks(dagbag, requests, session)
mock_ti_handle_failure.assert_called_once_with(
error="Message", test_mode=conf.getboolean("core", "unit_test_mode"), session=session
)
@pytest.mark.parametrize(
["has_serialized_dag"],
[pytest.param(True, id="dag_in_db"), pytest.param(False, id="no_dag_found")],
)
@patch.object(TaskInstance, "handle_failure")
def test_execute_on_failure_callbacks_without_dag(self, mock_ti_handle_failure, has_serialized_dag):
dagbag = DagBag(dag_folder="/dev/null", include_examples=True, read_dags_from_db=False)
dag_file_processor = DagFileProcessor(
dag_ids=[], dag_directory=TEST_DAGS_FOLDER, log=mock.MagicMock()
)
with create_session() as session:
session.query(TaskInstance).delete()
dag = dagbag.get_dag("example_branch_operator")
dagrun = dag.create_dagrun(
state=State.RUNNING,
execution_date=DEFAULT_DATE,
run_type=DagRunType.SCHEDULED,
session=session,
)
task = dag.get_task(task_id="run_this_first")
ti = TaskInstance(task, run_id=dagrun.run_id, state=State.QUEUED)
session.add(ti)
if has_serialized_dag:
assert SerializedDagModel.write_dag(dag, session=session) is True
session.flush()
requests = [
TaskCallbackRequest(
full_filepath="A", simple_task_instance=SimpleTaskInstance.from_ti(ti), msg="Message"
)
]
dag_file_processor.execute_callbacks_without_dag(requests, session)
mock_ti_handle_failure.assert_called_once_with(
error="Message", test_mode=conf.getboolean("core", "unit_test_mode"), session=session
)
def test_failure_callbacks_should_not_drop_hostname(self):
dagbag = DagBag(dag_folder="/dev/null", include_examples=True, read_dags_from_db=False)
dag_file_processor = DagFileProcessor(
dag_ids=[], dag_directory=TEST_DAGS_FOLDER, log=mock.MagicMock()
)
dag_file_processor.UNIT_TEST_MODE = False
with create_session() as session:
dag = dagbag.get_dag("example_branch_operator")
task = dag.get_task(task_id="run_this_first")
dagrun = dag.create_dagrun(
state=State.RUNNING,
execution_date=DEFAULT_DATE,
run_type=DagRunType.SCHEDULED,
session=session,
)
ti = TaskInstance(task, run_id=dagrun.run_id, state=State.RUNNING)
ti.hostname = "test_hostname"
session.add(ti)
requests = [
TaskCallbackRequest(
full_filepath="A", simple_task_instance=SimpleTaskInstance.from_ti(ti), msg="Message"
)
]
dag_file_processor.execute_callbacks(dagbag, requests)
with create_session() as session:
tis = session.query(TaskInstance)
assert tis[0].hostname == "test_hostname"
def test_process_file_should_failure_callback(self, monkeypatch, tmp_path, get_test_dag):
callback_file = tmp_path.joinpath("callback.txt")
callback_file.touch()
monkeypatch.setenv("AIRFLOW_CALLBACK_FILE", str(callback_file))
dag_file_processor = DagFileProcessor(
dag_ids=[], dag_directory=TEST_DAGS_FOLDER, log=mock.MagicMock()
)
dag = get_test_dag("test_on_failure_callback")
task = dag.get_task(task_id="test_on_failure_callback_task")
with create_session() as session:
dagrun = dag.create_dagrun(
state=State.RUNNING,
execution_date=DEFAULT_DATE,
run_type=DagRunType.SCHEDULED,
session=session,
)
ti = dagrun.get_task_instance(task.task_id)
ti.refresh_from_task(task)
requests = [
TaskCallbackRequest(
full_filepath=dag.fileloc,
simple_task_instance=SimpleTaskInstance.from_ti(ti),
msg="Message",
)
]
dag_file_processor.process_file(dag.fileloc, requests, session=session)
ti.refresh_from_db()
msg = " ".join([str(k) for k in ti.key.primary]) + " fired callback"
assert msg in callback_file.read_text()
@conf_vars({("core", "dagbag_import_error_tracebacks"): "False"})
def test_add_unparseable_file_before_sched_start_creates_import_error(self, tmpdir):
unparseable_filename = os.path.join(tmpdir, TEMP_DAG_FILENAME)
with open(unparseable_filename, "w") as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
with create_session() as session:
self._process_file(unparseable_filename, dag_directory=tmpdir, session=session)
import_errors = session.query(errors.ImportError).all()
assert len(import_errors) == 1
import_error = import_errors[0]
assert import_error.filename == unparseable_filename
assert import_error.stacktrace == f"invalid syntax ({TEMP_DAG_FILENAME}, line 1)"
session.rollback()
@conf_vars({("core", "dagbag_import_error_tracebacks"): "False"})
def test_add_unparseable_zip_file_creates_import_error(self, tmpdir):
zip_filename = os.path.join(tmpdir, "test_zip.zip")
invalid_dag_filename = os.path.join(zip_filename, TEMP_DAG_FILENAME)
with ZipFile(zip_filename, "w") as zip_file:
zip_file.writestr(TEMP_DAG_FILENAME, UNPARSEABLE_DAG_FILE_CONTENTS)
with create_session() as session:
self._process_file(zip_filename, dag_directory=tmpdir, session=session)
import_errors = session.query(errors.ImportError).all()
assert len(import_errors) == 1
import_error = import_errors[0]
assert import_error.filename == invalid_dag_filename
assert import_error.stacktrace == f"invalid syntax ({TEMP_DAG_FILENAME}, line 1)"
session.rollback()
@conf_vars({("core", "dagbag_import_error_tracebacks"): "False"})
def test_dag_model_has_import_error_is_true_when_import_error_exists(self, tmpdir, session):
dag_file = os.path.join(TEST_DAGS_FOLDER, "test_example_bash_operator.py")
temp_dagfile = os.path.join(tmpdir, TEMP_DAG_FILENAME)
with open(dag_file) as main_dag, open(temp_dagfile, "w") as next_dag:
for line in main_dag:
next_dag.write(line)
# first we parse the dag
self._process_file(temp_dagfile, dag_directory=tmpdir, session=session)
# assert DagModel.has_import_errors is false
dm = session.query(DagModel).filter(DagModel.fileloc == temp_dagfile).first()
assert not dm.has_import_errors
# corrupt the file
with open(temp_dagfile, "a") as file:
file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self._process_file(temp_dagfile, dag_directory=tmpdir, session=session)
import_errors = session.query(errors.ImportError).all()
assert len(import_errors) == 1
import_error = import_errors[0]
assert import_error.filename == temp_dagfile
assert import_error.stacktrace
dm = session.query(DagModel).filter(DagModel.fileloc == temp_dagfile).first()
assert dm.has_import_errors
def test_no_import_errors_with_parseable_dag(self, tmpdir):
parseable_filename = os.path.join(tmpdir, TEMP_DAG_FILENAME)
with open(parseable_filename, "w") as parseable_file:
parseable_file.writelines(PARSEABLE_DAG_FILE_CONTENTS)
with create_session() as session:
self._process_file(parseable_filename, dag_directory=tmpdir, session=session)
import_errors = session.query(errors.ImportError).all()
assert len(import_errors) == 0
session.rollback()
def test_no_import_errors_with_parseable_dag_in_zip(self, tmpdir):
zip_filename = os.path.join(tmpdir, "test_zip.zip")
with ZipFile(zip_filename, "w") as zip_file:
zip_file.writestr(TEMP_DAG_FILENAME, PARSEABLE_DAG_FILE_CONTENTS)
with create_session() as session:
self._process_file(zip_filename, dag_directory=tmpdir, session=session)
import_errors = session.query(errors.ImportError).all()
assert len(import_errors) == 0
session.rollback()
@conf_vars({("core", "dagbag_import_error_tracebacks"): "False"})
def test_new_import_error_replaces_old(self, tmpdir):
unparseable_filename = os.path.join(tmpdir, TEMP_DAG_FILENAME)
# Generate original import error
with open(unparseable_filename, "w") as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
session = settings.Session()
self._process_file(unparseable_filename, dag_directory=tmpdir, session=session)
# Generate replacement import error (the error will be on the second line now)
with open(unparseable_filename, "w") as unparseable_file:
unparseable_file.writelines(
PARSEABLE_DAG_FILE_CONTENTS + os.linesep + UNPARSEABLE_DAG_FILE_CONTENTS
)
self._process_file(unparseable_filename, dag_directory=tmpdir, session=session)
import_errors = session.query(errors.ImportError).all()
assert len(import_errors) == 1
import_error = import_errors[0]
assert import_error.filename == unparseable_filename
assert import_error.stacktrace == f"invalid syntax ({TEMP_DAG_FILENAME}, line 2)"
session.rollback()
def test_import_error_record_is_updated_not_deleted_and_recreated(self, tmpdir):
"""
Test that existing import error is updated and new record not created
for a dag with the same filename
"""
filename_to_parse = os.path.join(tmpdir, TEMP_DAG_FILENAME)
# Generate original import error
with open(filename_to_parse, "w") as file_to_parse:
file_to_parse.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
session = settings.Session()
self._process_file(filename_to_parse, dag_directory=tmpdir, session=session)
import_error_1 = (
session.query(errors.ImportError).filter(errors.ImportError.filename == filename_to_parse).one()
)
# process the file multiple times
for _ in range(10):
self._process_file(filename_to_parse, dag_directory=tmpdir, session=session)
import_error_2 = (
session.query(errors.ImportError).filter(errors.ImportError.filename == filename_to_parse).one()
)
# assert that the ID of the import error did not change
assert import_error_1.id == import_error_2.id
def test_remove_error_clears_import_error(self, tmpdir):
filename_to_parse = os.path.join(tmpdir, TEMP_DAG_FILENAME)
# Generate original import error
with open(filename_to_parse, "w") as file_to_parse:
file_to_parse.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
session = settings.Session()
self._process_file(filename_to_parse, dag_directory=tmpdir, session=session)
# Remove the import error from the file
with open(filename_to_parse, "w") as file_to_parse:
file_to_parse.writelines(PARSEABLE_DAG_FILE_CONTENTS)
self._process_file(filename_to_parse, dag_directory=tmpdir, session=session)
import_errors = session.query(errors.ImportError).all()
assert len(import_errors) == 0
session.rollback()
def test_remove_error_clears_import_error_zip(self, tmpdir):
session = settings.Session()
# Generate original import error
zip_filename = os.path.join(tmpdir, "test_zip.zip")
with ZipFile(zip_filename, "w") as zip_file:
zip_file.writestr(TEMP_DAG_FILENAME, UNPARSEABLE_DAG_FILE_CONTENTS)
self._process_file(zip_filename, dag_directory=tmpdir, session=session)
import_errors = session.query(errors.ImportError).all()
assert len(import_errors) == 1
# Remove the import error from the file
with ZipFile(zip_filename, "w") as zip_file:
zip_file.writestr(TEMP_DAG_FILENAME, "import os # airflow DAG")
self._process_file(zip_filename, dag_directory=tmpdir, session=session)
import_errors = session.query(errors.ImportError).all()
assert len(import_errors) == 0
session.rollback()
def test_import_error_tracebacks(self, tmpdir):
unparseable_filename = os.path.join(tmpdir, TEMP_DAG_FILENAME)
with open(unparseable_filename, "w") as unparseable_file:
unparseable_file.writelines(INVALID_DAG_WITH_DEPTH_FILE_CONTENTS)
with create_session() as session:
self._process_file(unparseable_filename, dag_directory=tmpdir, session=session)
import_errors = session.query(errors.ImportError).all()
assert len(import_errors) == 1
import_error = import_errors[0]
assert import_error.filename == unparseable_filename
if PY311:
expected_stacktrace = (
"Traceback (most recent call last):\n"
' File "{}", line 3, in <module>\n'
" something()\n"
' File "{}", line 2, in something\n'
" return airflow_DAG\n"
" ^^^^^^^^^^^\n"
"NameError: name 'airflow_DAG' is not defined\n"
)
else:
expected_stacktrace = (
"Traceback (most recent call last):\n"
' File "{}", line 3, in <module>\n'
" something()\n"
' File "{}", line 2, in something\n'
" return airflow_DAG\n"
"NameError: name 'airflow_DAG' is not defined\n"
)
assert import_error.stacktrace == expected_stacktrace.format(
unparseable_filename, unparseable_filename
)
session.rollback()
@conf_vars({("core", "dagbag_import_error_traceback_depth"): "1"})
def test_import_error_traceback_depth(self, tmpdir):
unparseable_filename = os.path.join(tmpdir, TEMP_DAG_FILENAME)
with open(unparseable_filename, "w") as unparseable_file:
unparseable_file.writelines(INVALID_DAG_WITH_DEPTH_FILE_CONTENTS)
with create_session() as session:
self._process_file(unparseable_filename, dag_directory=tmpdir, session=session)
import_errors = session.query(errors.ImportError).all()
assert len(import_errors) == 1
import_error = import_errors[0]
assert import_error.filename == unparseable_filename
if PY311:
expected_stacktrace = (
"Traceback (most recent call last):\n"
' File "{}", line 2, in something\n'
" return airflow_DAG\n"
" ^^^^^^^^^^^\n"
"NameError: name 'airflow_DAG' is not defined\n"
)
else:
expected_stacktrace = (
"Traceback (most recent call last):\n"
' File "{}", line 2, in something\n'
" return airflow_DAG\n"
"NameError: name 'airflow_DAG' is not defined\n"
)
assert import_error.stacktrace == expected_stacktrace.format(unparseable_filename)
session.rollback()
def test_import_error_tracebacks_zip(self, tmpdir):
invalid_zip_filename = os.path.join(tmpdir, "test_zip_invalid.zip")
invalid_dag_filename = os.path.join(invalid_zip_filename, TEMP_DAG_FILENAME)
with ZipFile(invalid_zip_filename, "w") as invalid_zip_file:
invalid_zip_file.writestr(TEMP_DAG_FILENAME, INVALID_DAG_WITH_DEPTH_FILE_CONTENTS)
with create_session() as session:
self._process_file(invalid_zip_filename, dag_directory=tmpdir, session=session)
import_errors = session.query(errors.ImportError).all()
assert len(import_errors) == 1
import_error = import_errors[0]
assert import_error.filename == invalid_dag_filename
if PY311:
expected_stacktrace = (
"Traceback (most recent call last):\n"
' File "{}", line 3, in <module>\n'
" something()\n"
' File "{}", line 2, in something\n'
" return airflow_DAG\n"
" ^^^^^^^^^^^\n"
"NameError: name 'airflow_DAG' is not defined\n"
)
else:
expected_stacktrace = (
"Traceback (most recent call last):\n"
' File "{}", line 3, in <module>\n'
" something()\n"
' File "{}", line 2, in something\n'
" return airflow_DAG\n"
"NameError: name 'airflow_DAG' is not defined\n"
)
assert import_error.stacktrace == expected_stacktrace.format(
invalid_dag_filename, invalid_dag_filename
)
session.rollback()
@conf_vars({("core", "dagbag_import_error_traceback_depth"): "1"})
def test_import_error_tracebacks_zip_depth(self, tmpdir):
invalid_zip_filename = os.path.join(tmpdir, "test_zip_invalid.zip")
invalid_dag_filename = os.path.join(invalid_zip_filename, TEMP_DAG_FILENAME)
with ZipFile(invalid_zip_filename, "w") as invalid_zip_file:
invalid_zip_file.writestr(TEMP_DAG_FILENAME, INVALID_DAG_WITH_DEPTH_FILE_CONTENTS)
with create_session() as session:
self._process_file(invalid_zip_filename, dag_directory=tmpdir, session=session)
import_errors = session.query(errors.ImportError).all()
assert len(import_errors) == 1
import_error = import_errors[0]
assert import_error.filename == invalid_dag_filename
if PY311:
expected_stacktrace = (
"Traceback (most recent call last):\n"
' File "{}", line 2, in something\n'
" return airflow_DAG\n"
" ^^^^^^^^^^^\n"
"NameError: name 'airflow_DAG' is not defined\n"
)
else:
expected_stacktrace = (
"Traceback (most recent call last):\n"
' File "{}", line 2, in something\n'
" return airflow_DAG\n"
"NameError: name 'airflow_DAG' is not defined\n"
)
assert import_error.stacktrace == expected_stacktrace.format(invalid_dag_filename)
session.rollback()
@conf_vars({("logging", "dag_processor_log_target"): "stdout"})
@mock.patch("airflow.dag_processing.processor.settings.dispose_orm", MagicMock)
@mock.patch("airflow.dag_processing.processor.redirect_stdout")
def test_dag_parser_output_when_logging_to_stdout(self, mock_redirect_stdout_for_file):
processor = DagFileProcessorProcess(
file_path="abc.txt",
pickle_dags=False,
dag_ids=[],
dag_directory=[],
callback_requests=[],
)
processor._run_file_processor(
result_channel=MagicMock(),
parent_channel=MagicMock(),
file_path="fake_file_path",
pickle_dags=False,
dag_ids=[],
thread_name="fake_thread_name",
callback_requests=[],
dag_directory=[],
)
mock_redirect_stdout_for_file.assert_not_called()
@conf_vars({("logging", "dag_processor_log_target"): "file"})
@mock.patch("airflow.dag_processing.processor.settings.dispose_orm", MagicMock)
@mock.patch("airflow.dag_processing.processor.redirect_stdout")
def test_dag_parser_output_when_logging_to_file(self, mock_redirect_stdout_for_file):
processor = DagFileProcessorProcess(
file_path="abc.txt",
pickle_dags=False,
dag_ids=[],
dag_directory=[],
callback_requests=[],
)
processor._run_file_processor(
result_channel=MagicMock(),
parent_channel=MagicMock(),
file_path="fake_file_path",
pickle_dags=False,
dag_ids=[],
thread_name="fake_thread_name",
callback_requests=[],
dag_directory=[],
)
mock_redirect_stdout_for_file.assert_called_once()
@mock.patch("airflow.dag_processing.processor.settings.dispose_orm", MagicMock)
@mock.patch.object(DagFileProcessorProcess, "_get_multiprocessing_context")
def test_no_valueerror_with_parseable_dag_in_zip(self, mock_context, tmpdir):
mock_context.return_value.Pipe.return_value = (MagicMock(), MagicMock())
zip_filename = os.path.join(tmpdir, "test_zip.zip")
with ZipFile(zip_filename, "w") as zip_file:
zip_file.writestr(TEMP_DAG_FILENAME, PARSEABLE_DAG_FILE_CONTENTS)
processor = DagFileProcessorProcess(
file_path=zip_filename,
pickle_dags=False,
dag_ids=[],
dag_directory=[],
callback_requests=[],
)
processor.start()
@mock.patch("airflow.dag_processing.processor.settings.dispose_orm", MagicMock)
@mock.patch.object(DagFileProcessorProcess, "_get_multiprocessing_context")
def test_nullbyte_exception_handling_when_preimporting_airflow(self, mock_context, tmpdir):
mock_context.return_value.Pipe.return_value = (MagicMock(), MagicMock())
dag_filename = os.path.join(tmpdir, "test_dag.py")
with open(dag_filename, "wb") as file:
file.write(b"hello\x00world")
processor = DagFileProcessorProcess(
file_path=dag_filename,
pickle_dags=False,
dag_ids=[],
dag_directory=[],
callback_requests=[],
)
processor.start()
class TestProcessorAgent:
@pytest.fixture(autouse=True)
def per_test(self):
self.processor_agent = None
yield
if self.processor_agent:
self.processor_agent.end()
def test_error_when_waiting_in_async_mode(self, tmp_path):
self.processor_agent = DagFileProcessorAgent(
dag_directory=tmp_path,
max_runs=1,
processor_timeout=datetime.timedelta(1),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
self.processor_agent.start()
with pytest.raises(RuntimeError, match="wait_until_finished should only be called in sync_mode"):
self.processor_agent.wait_until_finished()
def test_default_multiprocessing_behaviour(self, tmp_path):
self.processor_agent = DagFileProcessorAgent(
dag_directory=tmp_path,
max_runs=1,
processor_timeout=datetime.timedelta(1),
dag_ids=[],
pickle_dags=False,
async_mode=False,
)
self.processor_agent.start()
self.processor_agent.run_single_parsing_loop()
self.processor_agent.wait_until_finished()
@conf_vars({("core", "mp_start_method"): "spawn"})
def test_spawn_multiprocessing_behaviour(self, tmp_path):
self.processor_agent = DagFileProcessorAgent(
dag_directory=tmp_path,
max_runs=1,
processor_timeout=datetime.timedelta(1),
dag_ids=[],
pickle_dags=False,
async_mode=False,
)
self.processor_agent.start()
self.processor_agent.run_single_parsing_loop()
self.processor_agent.wait_until_finished()
|
b8f6b3a07c59dd714528e5474d00ea2baf2a8204
|
974d04d2ea27b1bba1c01015a98112d2afb78fe5
|
/test/collective/fleet/test_fleet_checkpoint.py
|
4b86e6d57fd7a47e074447353ec3141f5f384a86
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/Paddle
|
b3d2583119082c8e4b74331dacc4d39ed4d7cff0
|
22a11a60e0e3d10a3cf610077a3d9942a6f964cb
|
refs/heads/develop
| 2023-08-17T21:27:30.568889
| 2023-08-17T12:38:22
| 2023-08-17T12:38:22
| 65,711,522
| 20,414
| 5,891
|
Apache-2.0
| 2023-09-14T19:20:51
| 2016-08-15T06:59:08
|
C++
|
UTF-8
|
Python
| false
| false
| 4,435
|
py
|
test_fleet_checkpoint.py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import paddle
from paddle import fluid
from paddle.distributed.fleet.utils.fs import HDFSClient, LocalFS
from paddle.fluid.incubate.checkpoint.auto_checkpoint import ExeTrainStatus
from paddle.fluid.incubate.checkpoint.checkpoint_saver import CheckpointSaver
from paddle.incubate.distributed.fleet import role_maker
from paddle.incubate.distributed.fleet.collective import fleet
class FleetTest(unittest.TestCase):
def _test_checkpoint(self, fs, dir_path):
file_name = "persistables"
os.environ["TRAINING_ROLE"] = "TRAINER"
os.environ["PADDLE_TRAINER_ID"] = "0"
os.environ["PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:6070"
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
image = paddle.static.data(
name='img', shape=[None, 28, 28], dtype='float32'
)
label = paddle.static.data(name='label', shape=[None, 1], dtype='int64')
feeder = fluid.DataFeeder(
feed_list=[image, label], place=fluid.CPUPlace()
)
predict = paddle.static.nn.fc(x=image, size=10, activation='softmax')
loss = paddle.nn.functional.cross_entropy(
input=predict, label=label, reduction='none', use_softmax=False
)
avg_loss = paddle.mean(loss)
optimizer = paddle.optimizer.Adam(learning_rate=0.001)
dist_optimizer = fleet.distributed_optimizer(optimizer)
dist_optimizer.minimize(avg_loss)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
status = ExeTrainStatus()
status.epoch_no = 2
_, n1 = fleet.save_checkpoint(
exe, dir_path, trainer_id=0, train_status=status, fs=fs
)
status2 = ExeTrainStatus()
fleet.load_checkpoint(
exe, dir_path, trainer_id=0, fs=fs, train_status=status2
)
self.assertEqual(status2, status)
_, n2 = fleet.save_checkpoint(
exe,
dir_path,
trainer_id=0,
train_status=status,
fs=fs,
remain_all_checkpoint=False,
)
self.assertEqual(n2, n1 + 1)
c = CheckpointSaver(fs)
cp_nos = c.get_checkpoint_no(dir_path)
assert len(cp_nos) == 1 # cleanup all others
# unnormal
# test remain_all_checkpoint
fleet.save_checkpoint(
exe,
dir_path,
trainer_id=0,
train_status=status,
fs=fs,
remain_all_checkpoint=False,
)
# can't save under a file
fs = LocalFS()
cache_path = "./.load_cache"
fs.touch(cache_path)
try:
fleet.save_checkpoint(
exe,
dir_path,
trainer_id=0,
train_status=status,
fs=fs,
cache_path=cache_path,
)
self.assertFalse(True)
except:
pass
# can't load under a file
try:
fleet.load_checkpoint(
exe,
dir_path,
trainer_id=0,
train_status=status2,
fs=fs,
cache_path=cache_path,
)
self.assertFalse(True)
except:
pass
fs.delete(cache_path)
def test_hdfs_checkpoint(self):
fs = HDFSClient("/usr/local/hadoop-2.7.7", None)
dir_path = "./checkpoint_test_hdfs"
self._test_checkpoint(fs, os.path.abspath(dir_path))
def test_local_checkpoint(self):
fs = LocalFS()
dir_path = "./checkpoint_test_local"
self._test_checkpoint(fs, dir_path)
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
|
7f8dc30e3e42f2213c43b5313468c3b45a2a2116
|
f8dee139258b7d971bd1cfa16bd16e356537bbac
|
/Contents/Libraries/Shared/subliminal_patch/providers/argenteam.py
|
5fe3d28cc0b303dcc27e4bfb5216783ef0438ea0
|
[
"MIT"
] |
permissive
|
pannal/Sub-Zero.bundle
|
79673016ae68d1f2e9886fd30b8763b73a8f6cf8
|
4ced7d8c8f9f5fb47d12410f87fa33d782e9f0f4
|
refs/heads/master
| 2023-07-27T23:04:32.925845
| 2023-07-09T13:07:38
| 2023-07-09T13:08:04
| 21,959,699
| 1,820
| 178
|
NOASSERTION
| 2022-11-28T03:23:13
| 2014-07-17T22:19:13
|
Python
|
UTF-8
|
Python
| false
| false
| 10,895
|
py
|
argenteam.py
|
# coding=utf-8
import logging
import os
import io
import time
from zipfile import ZipFile
from guessit import guessit
from requests import Session
from subliminal import Episode, Movie
from subliminal.score import get_equivalent_release_groups
from subliminal.utils import sanitize_release_group, sanitize
from subliminal_patch.providers import Provider
from subliminal_patch.subtitle import Subtitle, guess_matches
from subliminal_patch.providers.mixins import ProviderSubtitleArchiveMixin
from subzero.language import Language
logger = logging.getLogger(__name__)
class ArgenteamSubtitle(Subtitle):
provider_name = 'argenteam'
hearing_impaired_verifiable = False
_release_info = None
def __init__(self, language, page_link, download_link, movie_kind, title, season, episode, year, release, version, source,
video_codec, tvdb_id, imdb_id, asked_for_episode=None, asked_for_release_group=None, *args, **kwargs):
super(ArgenteamSubtitle, self).__init__(language, page_link=page_link, *args, **kwargs)
self.page_link = page_link
self.download_link = download_link
self.movie_kind = movie_kind
self.title = title
self.year = year
self.season = season
self.episode = episode
self.release = release
self.version = version
self.asked_for_release_group = asked_for_release_group
self.asked_for_episode = asked_for_episode
self.matches = None
self.format = source
self.video_codec = video_codec
self.tvdb_id = tvdb_id
self.imdb_id = "tt" + imdb_id if imdb_id else None
self.releases = self.release_info
@property
def id(self):
return self.download_link
@property
def release_info(self):
if self._release_info:
return self._release_info
combine = []
for attr in ("format", "version", "video_codec"):
value = getattr(self, attr)
if value:
combine.append(value)
self._release_info = u".".join(combine) + (u"-"+self.release if self.release else "")
return self._release_info
def __repr__(self):
ep_addon = (" S%02dE%02d" % (self.season, self.episode)) if self.episode else ""
return '<%s %r [%s]>' % (
self.__class__.__name__, u"%s%s%s." % (self.title, " (%s)" % self.year if self.year else "", ep_addon) +
self.release_info, self.language)
def get_matches(self, video):
matches = set()
# series
if isinstance(video, Episode) and self.movie_kind == 'episode':
if video.series and (sanitize(self.title) in (
sanitize(name) for name in [video.series] + video.alternative_series)):
matches.add('series')
# season
if video.season and self.season == video.season:
matches.add('season')
# episode
if video.episode and self.episode == video.episode:
matches.add('episode')
# tvdb_id
if video.tvdb_id and str(self.tvdb_id) == str(video.tvdb_id):
matches.add('tvdb_id')
elif isinstance(video, Movie) and self.movie_kind == 'movie':
# title
if video.title and (sanitize(self.title) in (
sanitize(name) for name in [video.title] + video.alternative_titles)):
matches.add('title')
# imdb_id
if video.imdb_id and self.imdb_id and str(self.imdb_id) == str(video.imdb_id):
matches.add('imdb_id')
# year
if video.year and self.year == video.year:
matches.add('year')
else:
logger.info('%r is not a valid movie_kind', self.movie_kind)
return matches
# release_group
if video.release_group and self.release:
rg = sanitize_release_group(video.release_group)
if any(r in sanitize_release_group(self.release) for r in get_equivalent_release_groups(rg)):
matches.add('release_group')
# blatantly assume we've got a matching format if the release group matches
# fixme: smart?
#matches.add('format')
# resolution
if video.resolution and self.version and str(video.resolution) in self.version.lower():
matches.add('resolution')
# format
if video.format and self.format:
formats = [video.format]
if video.format == "WEB-DL":
formats.append("WEB")
for fmt in formats:
if fmt.lower() in self.format.lower():
matches.add('format')
break
matches |= guess_matches(video, guessit(self.release_info), partial=True)
self.matches = matches
return matches
class ArgenteamProvider(Provider, ProviderSubtitleArchiveMixin):
provider_name = 'argenteam'
languages = {Language.fromalpha2(l) for l in ['es']}
video_types = (Episode, Movie)
BASE_URL = "https://argenteam.net/"
API_URL = BASE_URL + "api/v1/"
subtitle_class = ArgenteamSubtitle
hearing_impaired_verifiable = False
language_list = list(languages)
multi_result_throttle = 2 # seconds
def __init__(self):
self.session = None
def initialize(self):
self.session = Session()
self.session.headers = {'User-Agent': os.environ.get("SZ_USER_AGENT", "Sub-Zero/2")}
def terminate(self):
self.session.close()
def search_ids(self, title, year=None, imdb_id=None, season=None, episode=None, titles=None):
"""Search movie or episode id from the `title`, `season` and `episode`.
:param imdb_id: imdb id of the given movie
:param titles: all titles of the given series or movie
:param year: release year of the given movie
:param str title: series of the episode or movie name
:param int season: season of the episode.
:param int episode: episode number.
:return: list of ids
:rtype: list
"""
# make the search
query = title
titles = titles or []
is_episode = False
if season and episode:
is_episode = True
query = '%s S%#02dE%#02d' % (title, season, episode)
logger.info(u'Searching %s ID for %r', "episode" if is_episode else "movie", query)
r = self.session.get(self.API_URL + 'search', params={'q': query}, timeout=10)
r.raise_for_status()
results = r.json()
match_ids = []
if results['total'] >= 1:
for result in results["results"]:
if (result['type'] == "episode" and not is_episode) or (result['type'] == "movie" and is_episode):
continue
# shortcut in case of matching imdb id
if not is_episode and imdb_id and "imdb" in result and "tt%s" % result["imdb"] == str(imdb_id):
logger.debug("Movie matched by IMDB ID %s, taking shortcut", imdb_id)
match_ids = [result['id']]
break
# advanced title check in case of multiple movie results
if results['total'] > 1:
if not is_episode and year:
if result["title"] and not (sanitize(result["title"]) in (u"%s %s" % (sanitize(name), year)
for name in titles)):
continue
match_ids.append(result['id'])
else:
logger.error(u'No episode ID found for %r', query)
if match_ids:
logger.debug(u"Found matching IDs: %s", ", ".join(str(id) for id in match_ids))
return match_ids
def query(self, title, video, titles=None):
is_episode = isinstance(video, Episode)
season = episode = None
url = self.API_URL + 'movie'
if is_episode:
season = video.season
episode = video.episode
url = self.API_URL + 'episode'
argenteam_ids = self.search_ids(title, season=season, episode=episode, titles=titles)
else:
argenteam_ids = self.search_ids(title, year=video.year, imdb_id=video.imdb_id, titles=titles)
if not argenteam_ids:
return []
language = self.language_list[0]
subtitles = []
has_multiple_ids = len(argenteam_ids) > 1
for aid in argenteam_ids:
response = self.session.get(url, params={'id': aid}, timeout=10)
response.raise_for_status()
content = response.json()
imdb_id = year = None
returned_title = title
if not is_episode and "info" in content:
imdb_id = content["info"].get("imdb")
year = content["info"].get("year")
returned_title = content["info"].get("title", title)
for r in content['releases']:
for s in r['subtitles']:
movie_kind = "episode" if is_episode else "movie"
page_link = self.BASE_URL + movie_kind + "/" + str(aid)
# use https and new domain
download_link = s['uri'].replace('http://www.argenteam.net/', self.BASE_URL)
sub = ArgenteamSubtitle(language, page_link, download_link, movie_kind, returned_title,
season, episode, year, r.get('team'), r.get('tags'),
r.get('source'), r.get('codec'), content.get("tvdb"), imdb_id,
asked_for_release_group=video.release_group,
asked_for_episode=episode)
subtitles.append(sub)
if has_multiple_ids:
time.sleep(self.multi_result_throttle)
return subtitles
def list_subtitles(self, video, languages):
if isinstance(video, Episode):
titles = [video.series] + video.alternative_series
else:
titles = [video.title] + video.alternative_titles
for title in titles:
subs = self.query(title, video, titles=titles)
if subs:
return subs
time.sleep(self.multi_result_throttle)
return []
def download_subtitle(self, subtitle):
# download as a zip
logger.info('Downloading subtitle %r', subtitle)
r = self.session.get(subtitle.download_link, timeout=10)
r.raise_for_status()
# open the zip
with ZipFile(io.BytesIO(r.content)) as zf:
subtitle.content = self.get_subtitle_from_archive(subtitle, zf)
|
10e5dce38b02542dd541ad7d4f4e1f6994010992
|
820b6af9fd43b270749224bb278e5f714f655ac9
|
/Rendering/OpenGL2/Testing/Python/TestUserShader2.py
|
a40bd504432415b95b6cd2abbc10c6f6b3f4a0d4
|
[
"BSD-3-Clause"
] |
permissive
|
Kitware/VTK
|
49dee7d4f83401efce8826f1759cd5d9caa281d1
|
dd4138e17f1ed5dfe6ef1eab0ff6643fdc07e271
|
refs/heads/master
| 2023-09-01T10:21:57.496189
| 2023-09-01T08:20:15
| 2023-09-01T08:21:05
| 631,615
| 2,253
| 1,243
|
NOASSERTION
| 2023-09-14T07:53:03
| 2010-04-27T15:12:58
|
C++
|
UTF-8
|
Python
| false
| false
| 3,034
|
py
|
TestUserShader2.py
|
#!/usr/bin/env python
import sys
from vtkmodules.vtkCommonCore import (
VTK_OBJECT,
vtkCommand,
)
from vtkmodules.vtkFiltersCore import vtkTriangleMeshPointNormals
from vtkmodules.vtkIOPLY import vtkPLYReader
from vtkmodules.vtkRenderingCore import (
vtkActor,
vtkRenderWindow,
vtkRenderWindowInteractor,
vtkRenderer,
)
from vtkmodules.vtkRenderingOpenGL2 import vtkOpenGLPolyDataMapper
from vtkmodules.util.misc import calldata_type
import vtkmodules.vtkInteractionStyle
import vtkmodules.vtkRenderingFreeType
import vtkmodules.vtkRenderingOpenGL2
from vtkmodules.util.misc import vtkGetDataRoot
'''
Prevent .pyc files from being created.
Stops the vtk source being polluted
by .pyc files.
'''
sys.dont_write_bytecode = True
@calldata_type(VTK_OBJECT)
def vtkShaderCallback(caller, event, calldata):
program = calldata
if program is not None:
diffuseColor = [0.4, 0.7, 0.6]
program.SetUniform3f("diffuseColorUniform", diffuseColor)
renWin = vtkRenderWindow()
renWin.SetSize(400, 400)
iren = vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren = vtkRenderer()
ren.SetBackground(0.0, 0.0, 0.0)
ren.GradientBackgroundOn()
renWin.AddRenderer(ren)
actor = vtkActor()
ren.AddActor(actor)
reader = vtkPLYReader()
reader.SetFileName(vtkGetDataRoot() + "/Data/dragon.ply")
norms = vtkTriangleMeshPointNormals()
norms.SetInputConnection(reader.GetOutputPort())
mapper = vtkOpenGLPolyDataMapper()
mapper.SetInputConnection(norms.GetOutputPort())
actor.SetMapper(mapper)
actor.GetProperty().SetAmbientColor(0.2, 0.2, 1.0)
actor.GetProperty().SetDiffuseColor(1.0, 0.65, 0.7)
actor.GetProperty().SetSpecularColor(1.0, 1.0, 1.0)
actor.GetProperty().SetSpecular(0.5)
actor.GetProperty().SetDiffuse(0.7)
actor.GetProperty().SetAmbient(0.5)
actor.GetProperty().SetSpecularPower(20.0)
actor.GetProperty().SetOpacity(1.0)
sp = actor.GetShaderProperty()
sp.SetVertexShaderCode(
"//VTK::System::Dec\n"
"in vec4 vertexMC;\n"
"//VTK::Normal::Dec\n"
"uniform mat4 MCDCMatrix;\n"
"void main () {\n"
" normalVCVSOutput = normalMatrix * normalMC;\n"
" vec4 tmpPos = MCDCMatrix * vertexMC;\n"
" gl_Position = tmpPos*vec4(0.2+0.8*abs(tmpPos.x),0.2+0.8*abs(tmpPos.y),1.0,1.0);\n"
"}\n"
)
sp.SetFragmentShaderCode(
"//VTK::System::Dec\n"
"//VTK::Output::Dec\n"
"in vec3 normalVCVSOutput;\n"
"uniform vec3 diffuseColorUniform;\n"
"void main () {\n"
" float df = max(0.0, normalVCVSOutput.z);\n"
" float sf = pow(df, 20.0);\n"
" vec3 diffuse = df * diffuseColorUniform;\n"
" vec3 specular = sf * vec3(0.4,0.4,0.4);\n"
" gl_FragData[0] = vec4(0.3*abs(normalVCVSOutput) + 0.7*diffuse + specular, 1.0);\n"
"}\n"
)
mapper.AddObserver(vtkCommand.UpdateShaderEvent, vtkShaderCallback)
renWin.Render()
ren.GetActiveCamera().SetPosition(-0.2, 0.4, 1)
ren.GetActiveCamera().SetFocalPoint(0, 0, 0)
ren.GetActiveCamera().SetViewUp(0, 1, 0)
ren.ResetCamera()
ren.GetActiveCamera().Zoom(2.0)
renWin.Render()
iren.Start()
|
d0b7c64323ac25a82bf4ac0d58c87f0f77086d5d
|
f3dfbfb9c128ac5bc7c0098f7eff91a2119d6183
|
/tests/sequence/test_gff.py
|
5c6ee77b43ad822488a0de112a5cb17b910d318a
|
[
"BSD-3-Clause"
] |
permissive
|
biotite-dev/biotite
|
2c2afafc6c4dad51af023c50c156c8f19a20154d
|
67d801683bfe79087a8e67e82de7333e79c827bb
|
refs/heads/master
| 2023-09-06T00:03:24.761607
| 2023-09-03T14:28:27
| 2023-09-03T14:28:27
| 98,795,444
| 463
| 80
|
BSD-3-Clause
| 2023-09-09T16:47:12
| 2017-07-30T12:18:33
|
Python
|
UTF-8
|
Python
| false
| false
| 6,481
|
py
|
test_gff.py
|
# This source code is part of the Biotite package and is distributed
# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further
# information.
from tempfile import TemporaryFile
from os.path import join
import biotite.sequence as seq
import biotite.sequence.io.gff as gff
import biotite.sequence.io.genbank as gb
import numpy as np
import pytest
from ..util import data_dir
@pytest.mark.parametrize(
"path",
["bt_lysozyme.gff3", "gg_avidin.gff3", "ec_bl21.gff3", "sc_chrom1.gff3"]
)
def test_conversion_lowlevel(path):
"""
Test whether the low-level GFF3 interface can properly read
a GenBank file and write a file, without data changing.
"""
gff_file = gff.GFFFile.read(join(data_dir("sequence"), path))
ref_entries = [entry for entry in gff_file]
gff_file = gff.GFFFile()
for entry in ref_entries:
gff_file.append(*entry)
temp = TemporaryFile("w+")
gff_file.write(temp)
temp.seek(0)
gff_file = gff.GFFFile.read(temp)
temp.close()
test_entries = [field for field in gff_file]
assert test_entries == ref_entries
@pytest.mark.parametrize(
"path",
["bt_lysozyme.gff3", "gg_avidin.gff3", "ec_bl21.gff3", "sc_chrom1.gff3"]
)
def test_conversion_highlevel(path):
"""
Test whether the high-level GFF3 interface can properly read
the features from a GFF3 file and write these properties to a file
without data changing.
The 'phase' is tested additionally, since it is not part of a
`Feature` object.
"""
gff_file = gff.GFFFile.read(join(data_dir("sequence"), path))
ref_annot = gff.get_annotation(gff_file)
ref_phases = []
for _, _, type, _, _, _, _, phase, _ in gff_file:
if type == "CDS":
ref_phases.append(phase)
gff_file = gff.GFFFile()
gff.set_annotation(gff_file, ref_annot)
temp = TemporaryFile("w+")
gff_file.write(temp)
temp.seek(0)
gff_file = gff.GFFFile.read(temp)
temp.close()
test_annot = gff.get_annotation(gff_file)
test_phases = []
for _, _, type, _, _, _, _, phase, _ in gff_file:
if type == "CDS":
test_phases.append(phase)
assert ref_annot == test_annot
assert test_phases == ref_phases
@pytest.mark.parametrize(
"path", ["bt_lysozyme.gp", "gg_avidin.gb", "ec_bl21.gb", "sc_chrom1.gb"]
)
def test_genbank_consistency(path):
"""
Test whether the same annotation (if reasonable) can be read from a
GFF3 file and a GenBank file.
"""
gb_file = gb.GenBankFile.read(join(data_dir("sequence"), path))
ref_annot = gb.get_annotation(gb_file)
gff_file = gff.GFFFile.read(join(data_dir("sequence"), path[:-3] + ".gff3"))
test_annot = gff.get_annotation(gff_file)
# Remove qualifiers, since they will be different
# in GFF3 and GenBank
ref_annot = seq.Annotation(
[seq.Feature(feature.key, feature.locs) for feature in ref_annot]
)
test_annot = seq.Annotation(
[seq.Feature(feature.key, feature.locs) for feature in test_annot]
)
for feature in test_annot:
# Only CDS, gene, intron and exon should be equal
# in GenBank and GFF3
if feature.key in ["CDS", "gene", "intron", "exon"]:
try:
assert feature in test_annot
except AssertionError:
print(feature.key)
for loc in feature.locs:
print(loc)
raise
def test_file_access():
"""
Test getting, setting, deleting and inserting entries in a GFF3
file.
"""
file = gff.GFFFile()
entry_scaffold = ("ab", "cd", 1, 2, None, None, None, {"Id":"foo"})
entry = ("a",) + entry_scaffold
file.append(*entry)
assert file[0] == entry
file.append(*(("b",) + entry_scaffold))
file.insert(1, *(("c",) + entry_scaffold))
file[1] = ("d",) + entry_scaffold
file.insert(3, *(("e",) + entry_scaffold))
del file[2]
assert [seqid for seqid, _, _, _, _, _, _, _, _ in file] \
== ["a", "d", "e", ]
def test_entry_indexing():
"""
Test whether a GFF3 file is indexed correctly based on an artificial
test file with multiple directives, including '##FASTA'.
"""
with pytest.warns(UserWarning):
file = gff.GFFFile.read(
join(data_dir("sequence"), "indexing_test.gff3")
)
assert file._directives == [
("directive 1", 1),
("directive 2", 2),
("directive 3", 7),
("FASTA", 8),
]
assert file._entries == [3,4,6]
def test_percent_encoding():
"""
Test whether percent encoding is working correctly based on an
artificial test file.
"""
file = gff.GFFFile.read(join(data_dir("sequence"), "percent_test.gff3"))
seqid, source, type, start, end, score, strand, phase, attrib \
= file[0]
assert seqid == "123,456"
assert source == "ääh"
assert type == "regi&n"
assert attrib == {
"ID" : "AnID;AnotherID",
"Name" : "Ångström",
"c$l$r": "red\tgreen\tblue"
}
file2 = gff.GFFFile()
file.append(seqid, source, type, start, end, score, strand, phase, attrib)
assert (seqid, source, type, start, end, score, strand, phase, attrib) \
== file[0]
def test_error():
"""
Assert that certain exceptions are raised
"""
file = gff.GFFFile()
with pytest.raises(ValueError):
# 'seqid' beginning with '>' is not legal
file.append(">xyz", "ab", "cd", 1, 2, None, None, None, {"Id":"foo"})
with pytest.raises(ValueError):
# String fields must not be empty
file.append("", "ab", "cd", 1, 2, None, None, None, {"Id":"foo"})
with pytest.raises(ValueError):
# String fields must not be empty
file.append("xyz", "", "cd", 1, 2, None, None, None, {"Id":"foo"})
with pytest.raises(ValueError):
# String fields must not be empty
file.append("xyz", "ab", "", 1, 2, None, None, None, {"Id":"foo"})
def test_feature_without_id():
"""
A feature without 'ID' should raise an error if it has multiple
locations and consequently multiple entries in the GFF3 file.
"""
annot = seq.Annotation(
[seq.Feature(
key = "CDS",
locs = [seq.Location(1,2), seq.Location(4,5)],
qual = {"some" : "qualifiers"}
)]
)
file = gff.GFFFile()
with pytest.raises(ValueError):
gff.set_annotation(file, annot)
|
e5bdb1d3f47446f6ce98e98508bba2f691a6cc46
|
88d555a009f9075e59177fac70036892f397b439
|
/bin/save_model.py
|
5c02af7c0a257fe658113f8eca93aa7a348d7a50
|
[
"Apache-2.0"
] |
permissive
|
calico/basenji
|
f9f406971d355dda81821dcf274696a7d27e332d
|
615b9eec8a591783b16d959029ddad08edae853d
|
refs/heads/master
| 2023-09-04T11:14:15.620786
| 2023-07-27T00:05:13
| 2023-07-27T00:05:13
| 96,346,574
| 326
| 143
|
Apache-2.0
| 2023-08-16T00:36:32
| 2017-07-05T17:54:18
|
Python
|
UTF-8
|
Python
| false
| false
| 2,206
|
py
|
save_model.py
|
#!/usr/bin/env python
# Copyright 2020 Calico LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from __future__ import print_function
from optparse import OptionParser
import os
import json
import tensorflow as tf
if tf.__version__[0] == '1':
tf.compat.v1.enable_eager_execution()
from basenji import seqnn
"""
save_model.py
Restore a model, and then re-save in a different format and/or with the trunk only.
"""
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] <params_file> <in_model_file> <out_model_file>'
parser = OptionParser(usage)
parser.add_option('-t','--trunk', dest='trunk',
default=False, action='store_true',
help='Save only trunk [Default: %default]')
(options, args) = parser.parse_args()
if len(args) != 3:
parser.error('Must provide parameters, input model, and output model')
else:
params_file = args[0]
in_model_file = args[1]
out_model_file = args[2]
# read model parameters
with open(params_file) as params_open:
params = json.load(params_open)
params_model = params['model']
# restore model
seqnn_model = seqnn.SeqNN(params_model)
if os.path.isfile(in_model_file):
seqnn_model.restore(in_model_file)
# save
seqnn_model.save(out_model_file, trunk=options.trunk)
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
main()
|
86eb6f54be39b5c72be192882d2affe850049aad
|
d5a3aa96b30a5a6a355b4e004e494a6ef41a339c
|
/d3/constitutionaljews.py
|
47137d55c348f52acbb9c9f892602fb641f73c55
|
[
"MIT"
] |
permissive
|
Udzu/pudzu
|
4c1c134503f62fd1cc08a56e257b864033b38561
|
df5019802bc32064870f31cda8397ad14868cda0
|
refs/heads/master
| 2023-07-10T06:16:35.342990
| 2023-07-04T06:28:00
| 2023-07-04T06:28:00
| 97,936,607
| 120
| 28
|
MIT
| 2021-02-21T16:15:31
| 2017-07-21T10:34:16
|
Roff
|
UTF-8
|
Python
| false
| false
| 1,576
|
py
|
constitutionaljews.py
|
from pudzu.charts import *
from generate import *
FONT = sans
PALETTE = {
"j": "#2166ac",
"f": "#92c5de",
"i": "#9970ab",
}
DESCRIPTIONS = {
"j": "explicitly recognises Jewish community",
"i": """indirectly mentions Jews
- Russia: "Jewish Autonomous Oblast"
- South Africa: "Hebrew for religious purposes"
- Syria: "Zionist enemy\"""",
"f": """formerly mentioned Jews (//incomplete//)
- Ireland: "the Jewish Congregations" (1937-72)
- Norway: "Jews are excluded" (1814-51)""",
}
colormap = {}
for x in ["Egypt", "Iran", "Morocco", "Croatia", "North Macedonia"]: colormap[x] = "j"
for x in ["Ireland", "Norway"]: colormap[x] = "f"
for x in ["South Africa", "Syria", "Russia"] : colormap[x] = "i"
generate_datamap("constituionaljews", colormap, palette=PALETTE, codifier=partial(codify_countries, dependencies=False))
chart = Image.open("temp/constituionaljews.png")
legend = generate_legend(
[PALETTE[c] for c in DESCRIPTIONS],
[DESCRIPTIONS.get(c,c) for c in DESCRIPTIONS],
(40,...), partial(sans, 16), header="Written constitution...".upper())
chart = chart.place(legend, align=(0,1), padding=100)
title = Image.from_column([
Image.from_text("JEWS IN THE CONSTITUTION", sans(72, bold=True)),
Image.from_text("countries with written constitutions that mention Jews or Judaism", sans(36, italics=True))
], bg="white")
img = Image.from_column([title, chart], bg="white", padding=5)
img.place(Image.from_text("/u/Udzu", sans(16), fg="black", bg="white", padding=5).pad((1,1,0,0), "black"), align=1, padding=10, copy=False)
img.save("output/constituionaljews.png")
|
a22587b53aa1dd370bf91bb003976b6dfe1a26ac
|
c4b8e1e09dedbccd37ca008ecaaca4438610bbaf
|
/cpmpy/set_covering.py
|
88562d3ad9b1f030ca6f7378e18cb2d384343089
|
[
"MIT"
] |
permissive
|
hakank/hakank
|
4806598b98cb36dd51b24b0ab688f52dadfe9626
|
c337aaf8187f15dcdc4d5b09cd2ed0dbdb2e72c2
|
refs/heads/master
| 2023-08-15T00:21:52.750270
| 2023-07-27T16:21:40
| 2023-07-27T16:21:40
| 11,933,517
| 336
| 97
|
MIT
| 2023-07-27T11:19:42
| 2013-08-06T20:12:10
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,075
|
py
|
set_covering.py
|
"""
Set covering in cpmpy.
Placing of firestations, from Winston 'Operations Research', page 486.
This cpmpy model was written by Hakan Kjellerstrand (hakank@gmail.com)
See also my cpmpy page: http://hakank.org/cpmpy/
"""
from cpmpy import *
import cpmpy.solvers
import numpy as np
from cpmpy_hakank import *
def set_covering():
# data
min_distance = 15
num_cities = 6
distance = [
[0, 10, 20, 30, 30, 20],
[10, 0, 25, 35, 20, 10],
[20, 25, 0, 15, 30, 20],
[30, 35, 15, 0, 15, 25],
[30, 20, 30, 15, 0, 14],
[20, 10, 20, 25, 14, 0]
]
# declare variables
x = boolvar(shape=num_cities,name="x")
# objective to minimize
z = intvar(0,10,name="z")
model = Model(minimize=z)
# constraints
model += [z == sum(x) ]
# ensure that all cities are covered
for i in range(num_cities):
model += [sum([x[j] for j in range(num_cities) if distance[i][j] <= min_distance]) >= 1]
ss = CPM_ortools(model)
if ss.solve():
print("z:", z.value())
print("x:", x.value())
print()
set_covering()
|
90b42554cf39999061ef7913899c6d714a98cf80
|
1c024dfbca35f4c829d4b47bdfe900d1b1a2303b
|
/build/fuses/build.py
|
f5c9b89417a60a1e4f0e48f36798df41b0c37f20
|
[
"MIT"
] |
permissive
|
electron/electron
|
1ba6e31cf40f876044bff644ec49f8ec4923ca6a
|
0b0707145b157343c42266d2586ed9413a1d54f5
|
refs/heads/main
| 2023-09-01T04:28:11.016383
| 2023-08-31T14:36:43
| 2023-08-31T14:36:43
| 9,384,267
| 99,768
| 18,388
|
MIT
| 2023-09-14T19:50:49
| 2013-04-12T01:47:36
|
C++
|
UTF-8
|
Python
| false
| false
| 2,356
|
py
|
build.py
|
#!/usr/bin/env python3
from collections import OrderedDict
import json
import os
import sys
dir_path = os.path.dirname(os.path.realpath(__file__))
SENTINEL = "dL7pKGdnNz796PbbjQWNKmHXBZaB9tsX"
TEMPLATE_H = """
#ifndef ELECTRON_FUSES_H_
#define ELECTRON_FUSES_H_
#if defined(WIN32)
#define FUSE_EXPORT __declspec(dllexport)
#else
#define FUSE_EXPORT __attribute__((visibility("default")))
#endif
namespace electron::fuses {
extern const volatile char kFuseWire[];
{getters}
} // namespace electron::fuses
#endif // ELECTRON_FUSES_H_
"""
TEMPLATE_CC = """
#include "electron/fuses.h"
namespace electron::fuses {
const volatile char kFuseWire[] = { /* sentinel */ {sentinel}, /* fuse_version */ {fuse_version}, /* fuse_wire_length */ {fuse_wire_length}, /* fuse_wire */ {initial_config}};
{getters}
} // namespace electron:fuses
"""
with open(os.path.join(dir_path, "fuses.json5"), 'r') as f:
fuse_defaults = json.loads(''.join(line for line in f.readlines() if not line.strip()[0] == "/"), object_pairs_hook=OrderedDict)
fuse_version = fuse_defaults['_version']
del fuse_defaults['_version']
del fuse_defaults['_schema']
del fuse_defaults['_comment']
if fuse_version >= pow(2, 8):
raise Exception("Fuse version can not exceed one byte in size")
fuses = fuse_defaults.keys()
initial_config = ""
getters_h = ""
getters_cc = ""
index = len(SENTINEL) + 1
for fuse in fuses:
index += 1
initial_config += fuse_defaults[fuse]
name = ''.join(word.title() for word in fuse.split('_'))
getters_h += "FUSE_EXPORT bool Is{name}Enabled();\n".replace("{name}", name)
getters_cc += """
bool Is{name}Enabled() {
return kFuseWire[{index}] == '1';
}
""".replace("{name}", name).replace("{index}", str(index))
def c_hex(n):
s = hex(n)[2:]
return "0x" + s.rjust(2, '0')
def hex_arr(s):
arr = []
for char in s:
arr.append(c_hex(ord(char)))
return ",".join(arr)
header = TEMPLATE_H.replace("{getters}", getters_h.strip())
impl = TEMPLATE_CC.replace("{sentinel}", hex_arr(SENTINEL))
impl = impl.replace("{fuse_version}", c_hex(fuse_version))
impl = impl.replace("{fuse_wire_length}", c_hex(len(fuses)))
impl = impl.replace("{initial_config}", hex_arr(initial_config))
impl = impl.replace("{getters}", getters_cc.strip())
with open(sys.argv[1], 'w') as f:
f.write(header)
with open(sys.argv[2], 'w') as f:
f.write(impl)
|
9744069c9ab0415f92832b0391ec620221284f1a
|
6b43d43b8c0dbdb94fbd9146a24c91997bd3897b
|
/steam/protobufs/offline_ticket_pb2.py
|
4988f22d9ee567937460de15e44a580e8d3b0b89
|
[
"MIT"
] |
permissive
|
ValvePython/steam
|
9690390ec174e868635d0de9d4868d5af1f57ab7
|
26166e047b66a7be10bdf3c90e2e14de9283ab5a
|
refs/heads/master
| 2023-07-09T03:35:42.455132
| 2023-05-05T23:19:39
| 2023-05-05T23:25:11
| 38,554,391
| 1,001
| 194
|
MIT
| 2023-06-13T22:51:03
| 2015-07-05T02:41:30
|
Python
|
UTF-8
|
Python
| false
| true
| 4,208
|
py
|
offline_ticket_pb2.py
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: offline_ticket.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='offline_ticket.proto',
package='',
syntax='proto2',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x14offline_ticket.proto\"w\n\x0eOffline_Ticket\x12\x18\n\x10\x65ncrypted_ticket\x18\x01 \x01(\x0c\x12\x11\n\tsignature\x18\x02 \x01(\x0c\x12\x0c\n\x04kdf1\x18\x03 \x01(\x05\x12\r\n\x05salt1\x18\x04 \x01(\x0c\x12\x0c\n\x04kdf2\x18\x05 \x01(\x05\x12\r\n\x05salt2\x18\x06 \x01(\x0c'
)
_OFFLINE_TICKET = _descriptor.Descriptor(
name='Offline_Ticket',
full_name='Offline_Ticket',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='encrypted_ticket', full_name='Offline_Ticket.encrypted_ticket', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='signature', full_name='Offline_Ticket.signature', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='kdf1', full_name='Offline_Ticket.kdf1', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='salt1', full_name='Offline_Ticket.salt1', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='kdf2', full_name='Offline_Ticket.kdf2', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='salt2', full_name='Offline_Ticket.salt2', index=5,
number=6, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=24,
serialized_end=143,
)
DESCRIPTOR.message_types_by_name['Offline_Ticket'] = _OFFLINE_TICKET
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Offline_Ticket = _reflection.GeneratedProtocolMessageType('Offline_Ticket', (_message.Message,), {
'DESCRIPTOR' : _OFFLINE_TICKET,
'__module__' : 'offline_ticket_pb2'
# @@protoc_insertion_point(class_scope:Offline_Ticket)
})
_sym_db.RegisterMessage(Offline_Ticket)
# @@protoc_insertion_point(module_scope)
|
000f5c643295fe555d2812df2a9f0fa595d99283
|
f80ef3a3cf859b13e8af8433af549b6b1043bf6e
|
/pyobjc-core/Tools/comparebench.py
|
2db24bf49ec794d91839fcc1b46d2d7a0fbad8b0
|
[
"MIT"
] |
permissive
|
ronaldoussoren/pyobjc
|
29dc9ca0af838a56105a9ddd62fb38ec415f0b86
|
77b98382e52818690449111cd2e23cd469b53cf5
|
refs/heads/master
| 2023-09-01T05:15:21.814504
| 2023-06-13T20:00:17
| 2023-06-13T20:00:17
| 243,933,900
| 439
| 49
| null | 2023-06-25T02:49:07
| 2020-02-29T08:43:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,446
|
py
|
comparebench.py
|
#!/usr/bin/env python3
import pathlib
def readinfo(path):
result = {}
bench = result["bench"] = {}
for line in path.open():
if line.startswith("@"):
item = line.strip()[1:].split(None, 1)
result[item[0]] = item[1]
else:
if not line.strip():
continue
item = line.split(":", 1)
bench[item[0].strip()] = float(item[1].strip())
return result
def main():
curdir = pathlib.Path(__file__).parent
results = []
for p in (curdir / "results").glob("pyobjcbench*.txt"):
results.append(readinfo(p))
results.sort(key=lambda item: (item["python"], item["objc"]))
keys = list(results[0]["bench"].keys())
headers = ("test name",) + tuple(r["objc"] for r in results)
fmt = " | ".join(["{:40s}"] + ["{:16s}"] * len(results))
print(fmt.format(*headers))
print("+".join(["-" * 41] + ["-" * 18] * len(results)))
for key in keys:
base = results[0]["bench"][key]
row = [key]
for r in results:
try:
cur = r["bench"][key]
except KeyError as exc:
print(f"{key}: {exc!r}")
continue
if cur == base:
row.append(f"{cur:.3f}")
else:
row.append(f"{cur:.3f} ({((cur-base)/base)*100:+.1f}%)")
print(fmt.format(*row))
if __name__ == "__main__":
main()
|
9dae09ddcfe3a12a64e2b203495687ec3e9fd2db
|
52e1e441e7dddca4edb73a56ca67be8c1a173a0f
|
/zhihurss/util/config.py
|
cf40a8bfd66298d70cef5d4482bd020dc6d85c46
|
[] |
no_license
|
SimplyY/zhihu-rss
|
cd05ea6101108d2ee40f1fbdfd17fba8850e472e
|
fe338f3d8421be2a9c32eada8ac988ed90b6acc9
|
refs/heads/master
| 2021-01-15T07:54:14.306023
| 2019-08-05T02:02:54
| 2019-08-05T02:02:54
| 38,658,837
| 153
| 43
| null | 2015-11-02T12:30:58
| 2015-07-07T02:27:34
|
Python
|
UTF-8
|
Python
| false
| false
| 1,311
|
py
|
config.py
|
#!/usr/bin/env python
__author__ = 'yuwei'
from .fs import ensure_dir
from .const import CONFIG_PATH, COOKIES_PATH
from ..model.noticer import Noticer
from ..model.feeds_list import FeedsList
from zhihu import ZhihuClient
import json
def set_config(my_app):
ensure_dir(CONFIG_PATH)
try:
with open(CONFIG_PATH, 'r') as f:
data = f.readline()
if data:
try:
config = json.loads(data)
except ValueError:
return
except FileNotFoundError: # first run or delete config file
with open(CONFIG_PATH, 'w'):
pass
if data:
if config["is_sign"] and config["is_sign"] is not str or list:
try:
with open(COOKIES_PATH, 'r') as f:
FeedsList.client = ZhihuClient(COOKIES_PATH)
Noticer.client = ZhihuClient(COOKIES_PATH)
except FileNotFoundError: # first run or delete config file
with open(CONFIG_PATH, 'w'):
pass
else:
FeedsList.client = ZhihuClient()
Noticer.client = ZhihuClient()
if config["proxy"]:
FeedsList.client.set_proxy(config["proxy"])
Noticer.client.set_proxy(config["proxy"])
|
7288f00d2e47b0ee4c50f4a024e09ea8ff477cf7
|
8d44e796eaf0c8e11bbc2a27ef093e97a25b6f4a
|
/test/nodes/test_join_answers.py
|
67567ceabdfb0613c57c61b09980384287d7a5d4
|
[
"Apache-2.0"
] |
permissive
|
deepset-ai/haystack
|
caa5287051d1771395ea624b58097000825bad81
|
5f1256ac7e5734c2ea481e72cb7e02c34baf8c43
|
refs/heads/main
| 2023-09-01T02:41:23.490526
| 2023-08-31T15:33:12
| 2023-08-31T15:33:12
| 221,654,678
| 10,599
| 1,558
|
Apache-2.0
| 2023-09-14T17:09:42
| 2019-11-14T09:05:28
|
Python
|
UTF-8
|
Python
| false
| false
| 1,103
|
py
|
test_join_answers.py
|
import pytest
from haystack.schema import Answer
from haystack.nodes import JoinAnswers
@pytest.mark.unit
@pytest.mark.parametrize("join_mode", ["concatenate", "merge"])
def test_joinanswers(join_mode):
inputs = [{"answers": [Answer(answer="answer 1", score=0.7)]}, {"answers": [Answer(answer="answer 2", score=0.8)]}]
join_answers = JoinAnswers(join_mode=join_mode)
result, _ = join_answers.run(inputs)
assert len(result["answers"]) == 2
assert result["answers"] == sorted(result["answers"], reverse=True)
result, _ = join_answers.run(inputs, top_k_join=1)
assert len(result["answers"]) == 1
assert result["answers"][0].answer == "answer 2"
@pytest.mark.unit
def test_joinanswers_preserves_root_node():
# https://github.com/deepset-ai/haystack-private/issues/51
inputs = [
{"answers": [Answer(answer="answer 1", score=0.7)], "root_node": "Query"},
{"answers": [Answer(answer="answer 2", score=0.8)], "root_node": "Query"},
]
join_docs = JoinAnswers()
result, _ = join_docs.run(inputs)
assert result["root_node"] == "Query"
|
14ead28134ae98361f6109cf6c5752b702632470
|
308c5e295f95f71a01282b75bee0769dc10e906b
|
/Lung Segmentation/Prepare_data.py
|
9ad3e9eaf21a7b1c6a4fa0979809a53347f0c563
|
[] |
no_license
|
rezazad68/BCDU-Net
|
fe9164db2495820201d5fadef16a6ed34efc2aac
|
993543d16dd37bd0f910a150e03c89297d8a9c5d
|
refs/heads/master
| 2023-08-15T23:14:41.029932
| 2022-02-26T07:37:18
| 2022-02-26T07:37:18
| 193,223,873
| 863
| 256
| null | 2023-01-30T15:15:22
| 2019-06-22T10:59:00
|
Python
|
UTF-8
|
Python
| false
| false
| 2,255
|
py
|
Prepare_data.py
|
from __future__ import division
import nibabel as nib
import numpy as np
import Reza_functions as RF
import nibabel as nib
import glob
import os
# Define Train data and mask
Data_train = []
Mask_train = []
Maska_train = []
FOV_train = []
idx_count =1
Tr_add = '3d_images'
Tr_list = glob.glob(Tr_add+'/*.gz')
for idx in range(len(Tr_list)):
b = Tr_list[idx]
a = b[len(Tr_add)+1:len(Tr_add)+4]
if a=='IMG':
print(idx_count)
a = b[len(Tr_add)+5:len(b)]
add = (Tr_add+'/MASK_' + a)
vol = nib.load(Tr_list[idx])
seg = nib.load(add)
# Get the axials images and corresponding masks
vol_ims, lung, around_lung, FOV = RF.return_axials(vol, seg)
segmentation = seg.get_data()
# Insert samples to the Train data, which has the segmentation label
for idx in range(vol.shape[0]):
if ~( np.sum(np.sum(np.sum(segmentation[idx, :, :]))) == 0):
Data_train.append(vol_ims [idx, :, :])
Mask_train.append(lung[idx, :, :])
Maska_train.append(around_lung[idx, :, :])
FOV_train.append(FOV[idx, :, :])
idx_count += 1
Data_train = np.array(Data_train)
Mask_train = np.array(Mask_train)
Maska_train = np.array(Maska_train)
FOV_train = np.array(FOV_train)
# We use 70% of the data for training and 30% for test
alpha = np.int16(np.floor(Data_train.shape[0]* 0.7))
en_d = Data_train.shape[0]
Train_img = Data_train[0:alpha,:,:]
Test_img = Data_train[alpha:en_d,:,:]
Train_mask = Mask_train[0:alpha,:,:]
Test_mask = Mask_train[alpha:en_d,:,:]
Train_maska = Maska_train[0:alpha,:,:]
Test_maska = Maska_train[alpha:en_d,:,:]
FOV_tr = FOV_train[0:alpha,:,:]
FOV_te = FOV_train[alpha:en_d,:,:]
folder = './processed_data/'
if not os.path.exists(folder):
os.makedirs(folder)
np.save(folder+'data_train' , Train_img)
np.save(folder+'data_test' , Test_img)
np.save(folder+'mask_train' , Train_mask)
np.save(folder+'mask_test' , Test_mask)
np.save(folder+'Train_maska' , Train_maska)
np.save(folder+'Test_maska' , Test_maska)
np.save(folder+'FOV_tr' , FOV_tr)
np.save(folder+'FOV_te' , FOV_te)
|
d03a04754e54034459760bc30efe729f0969048c
|
450916eee7580beb928ed8f387db4f0a8c1aa508
|
/examples/syllabus/stellar_minimal.py
|
b658769b8a8ffcaffe8712bae9443ecb5f18df35
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
amusecode/amuse
|
42095545893f5a86ea79c2a52ce54d3ce8eb204f
|
b57c1e2fda1457d5025307be105c2aa59b19b574
|
refs/heads/main
| 2023-08-31T04:50:48.880044
| 2023-08-30T12:00:20
| 2023-08-30T12:00:20
| 18,516,331
| 158
| 118
|
Apache-2.0
| 2023-08-30T12:00:22
| 2014-04-07T12:35:07
|
AMPL
|
UTF-8
|
Python
| false
| false
| 1,519
|
py
|
stellar_minimal.py
|
"""
Minimalistic routine for running a stellar evolution code
"""
from amuse.lab import *
def main(M, z, model_time):
# stellar = MESA()
stellar = SSE()
stellar.parameters.metallicity = z
stellar.particles.add_particle(Particle(mass=M))
stellar.commit_particles()
initial_luminosity = stellar.particles.luminosity
dt = 0.1 | units.Myr
time = 0 | units.Myr
while stellar.particles[0].age<model_time:
time+=dt
stellar.evolve_model(time)
final_luminosity = stellar.particles.luminosity
print("L(t=0)=", initial_luminosity, \
", L (t=", stellar.particles.age, ")=", \
final_luminosity, stellar.particles.radius, stellar.particles.mass.in_(units.MSun))
stellar.stop()
def new_option_parser():
from amuse.units.optparse import OptionParser
result = OptionParser()
result.add_option("-M", unit= units.MSun,
dest="M", type="float",default = 1.0 | units.MSun,
help="stellar mass [%default]")
result.add_option("-t", unit = units.Myr,
dest="model_time", type="float",
default = 4700.0|units.Myr,
help="end time of the simulation [%default]")
result.add_option("-z", dest="z", type="float",
default = 0.02, help="metalicity [%default]")
return result
if __name__ in ('__main__', '__plot__'):
o, arguments = new_option_parser().parse_args()
main(**o.__dict__)
|
8f3044d8d7b583a65174adb9c1013d8130e8a848
|
1b364500b756c5096d94358d3ad745e248c20dc4
|
/wradlib/comp.py
|
7c47b20d78f056eaf36ab4504c5ba35825856705
|
[
"MIT"
] |
permissive
|
wradlib/wradlib
|
fdf3b3670aa8b2ea6ddf4bb6083321992eb361a9
|
17f876c2c6257171888d6e04f5cbb86f0ac46f90
|
refs/heads/main
| 2023-08-26T09:07:45.866267
| 2023-05-31T06:12:50
| 2023-05-31T06:12:50
| 52,089,638
| 228
| 89
|
MIT
| 2023-09-11T23:29:48
| 2016-02-19T13:32:22
|
Python
|
UTF-8
|
Python
| false
| false
| 6,063
|
py
|
comp.py
|
#!/usr/bin/env python
# Copyright (c) 2011-2020, wradlib developers.
# Distributed under the MIT License. See LICENSE.txt for more info.
"""
Composition
^^^^^^^^^^^
Combine data from different radar locations on one common set of locations
.. autosummary::
:nosignatures:
:toctree: generated/
{}
"""
__all__ = ["extract_circle", "togrid", "compose_ko", "compose_weighted"]
__doc__ = __doc__.format("\n ".join(__all__))
import numpy as np
def extract_circle(center, radius, coords):
"""Extract the indices of ``coords`` which fall within a circle \
defined by ``center`` and ``radius``.
Parameters
----------
center : float
radius : float
coords : :class:`numpy:numpy.ndarray`
array of float with shape (numpoints, 2)
Returns
-------
output : :class:`numpy:numpy.ndarray`
1-darray of integers, index array referring to the ``coords`` array
"""
return np.where(((coords - center) ** 2).sum(axis=-1) < radius**2)[0]
def togrid(src, trg, radius, center, data, interpol, *args, **kwargs):
"""Interpolate data from a radar location to the composite grid or set of \
locations
Parameters
----------
src : :class:`numpy:numpy.ndarray`
array of float of shape (numpoints, ndim),
cartesian x / y coordinates of the radar bins
trg : :class:`numpy:numpy.ndarray`
array of float of shape (numpoints, ndim),
cartesian x / y coordinates of the composite
radius : float
the radius of the radar circle (same units as src and trg)
center : :class:`numpy:numpy.ndarray`
array of float, the location coordinates of the radar
data : :class:`numpy:numpy.ndarray`
array of float, the data that should be transferred to composite
interpol : :class:`~wradlib.ipol.IpolBase`
an interpolation class name from :mod:`wradlib.ipol`
e.g. :class:`~wradlib.ipol.Nearest` or :class:`~wradlib.ipol.Idw`
Other Parameters
----------------
*args : dict
arguments of Interpolator (see class documentation)
Keyword Arguments
-----------------
**kwargs : dict
keyword arguments of Interpolator (see class documentation)
Returns
-------
output : :class:`numpy:numpy.ndarray`
array of float, data of the radar circle which is interpolated on
the composite grid
Note
----
Keyword arguments to be used while calling the interpolator can be issued as
`call_kwargs`, eg. togrid(..., call_kwargs=dict(maxdist=10))
Examples
--------
See :ref:`/notebooks/basics/wradlib_workflow.ipynb#Gridding`.
"""
# get indices to select the subgrid from the composite grid
ix = extract_circle(center, radius, trg)
call_kwargs = kwargs.pop("call_kwargs", {})
# interpolate on subgrid
ip = interpol(src, trg[ix], *args, **kwargs)
data_on_subgrid = ip(data, **call_kwargs).reshape(len(ix))
# create container for entire grid
composegridshape = [len(trg)]
composegridshape.extend(data.shape[1:])
compose_grid = np.repeat(np.nan, len(trg) * np.prod(data.shape[1:])).reshape(
composegridshape
)
# push subgrid results into the large grid
compose_grid[ix] = data_on_subgrid
return compose_grid
def compose_ko(radargrids, qualitygrids):
"""Composes grids according to quality information using quality \
information as a knockout criterion.
The value of the composed pixel is taken from the radargrid whose
quality grid has the highest value.
Parameters
----------
radargrids : list
radar data to be composited. Each item in the list corresponds to the
data of one radar location. All items must have the same shape.
qualitygrids : list
quality data to decide upon which radar site will contribute its pixel
to the composite. Then length of this list must be the same as that
of `radargrids`. All items must have the same shape and be aligned with
the items in `radargrids`.
Returns
-------
composite : :class:`numpy:numpy.ndarray`
"""
# first add a fallback array for all pixels having missing values in all
# radargrids
radarfallback = np.repeat(np.nan, np.prod(radargrids[0].shape)).reshape(
radargrids[0].shape
)
radargrids.append(radarfallback)
radarinfo = np.array(radargrids)
# then do the same for the quality grids
qualityfallback = np.repeat(-np.inf, np.prod(radargrids[0].shape)).reshape(
radargrids[0].shape
)
qualitygrids.append(qualityfallback)
qualityinfo = np.array(qualitygrids)
select = np.nanargmax(qualityinfo, axis=0)
composite = radarinfo.reshape((radarinfo.shape[0], -1))[
select.ravel(), np.arange(np.prod(radarinfo.shape[1:]))
].reshape(radarinfo.shape[1:])
radargrids.pop()
qualitygrids.pop()
return composite
def compose_weighted(radargrids, qualitygrids):
"""Composes grids according to quality information using a weighted \
averaging approach.
The value of the composed pixel is the weighted average of all radar
pixels with the quality values being the weights.
Parameters
----------
radargrids : list
list of arrays
qualitygrids : list
list of arrays
Returns
-------
composite : :class:`numpy:numpy.ndarray`
Examples
--------
See :ref:`/notebooks/workflow/recipe1.ipynb`.
See Also
--------
:func:`~wradlib.comp.compose_ko`
"""
radarinfo = np.array(radargrids)
qualityinfo = np.array(qualitygrids)
# overall nanmask
nanmask = np.all(np.isnan(radarinfo), axis=0)
# quality grids must contain values only where radarinfo does
qualityinfo[np.isnan(radarinfo)] = np.nan
qualityinfo /= np.nansum(qualityinfo, axis=0)
composite = np.nansum(radarinfo * qualityinfo, axis=0)
composite[nanmask] = np.nan
return composite
if __name__ == "__main__":
print("wradlib: Calling module <comp> as main...")
|
82ede46c28550fee8485f34c4acb937edb758263
|
ca593f5a272ce0478ba6f52d2670cb9dd8564b00
|
/mycroft/skills/fallback_skill.py
|
a616accbb83e4ead97175f6b53d805d3067869a8
|
[
"Apache-2.0"
] |
permissive
|
MycroftAI/mycroft-core
|
d41ce0fccfe4c29d8d802dcc6bcf583dc356d9ce
|
8051e4e1f89d5ed1f63f06db5d3570371ae92e5d
|
refs/heads/master
| 2023-08-23T17:45:10.569985
| 2021-12-10T04:51:59
| 2021-12-10T04:51:59
| 59,299,524
| 6,838
| 1,719
|
Apache-2.0
| 2023-08-15T10:25:32
| 2016-05-20T14:11:07
|
Python
|
UTF-8
|
Python
| false
| false
| 7,989
|
py
|
fallback_skill.py
|
# Copyright 2019 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""The fallback skill implements a special type of skill handling
utterances not handled by the intent system.
"""
import operator
from mycroft.metrics import report_timing, Stopwatch
from mycroft.util.log import LOG
from .mycroft_skill import MycroftSkill, get_handler_name
class FallbackSkill(MycroftSkill):
"""Fallbacks come into play when no skill matches an Adapt or closely with
a Padatious intent. All Fallback skills work together to give them a
view of the user's utterance. Fallback handlers are called in an order
determined the priority provided when the the handler is registered.
======== ======== ================================================
Priority Who? Purpose
======== ======== ================================================
1-4 RESERVED Unused for now, slot for pre-Padatious if needed
5 MYCROFT Padatious near match (conf > 0.8)
6-88 USER General
89 MYCROFT Padatious loose match (conf > 0.5)
90-99 USER Uncaught intents
100+ MYCROFT Fallback Unknown or other future use
======== ======== ================================================
Handlers with the numerically lowest priority are invoked first.
Multiple fallbacks can exist at the same priority, but no order is
guaranteed.
A Fallback can either observe or consume an utterance. A consumed
utterance will not be see by any other Fallback handlers.
"""
fallback_handlers = {}
wrapper_map = [] # Map containing (handler, wrapper) tuples
def __init__(self, name=None, bus=None, use_settings=True):
super().__init__(name, bus, use_settings)
# list of fallback handlers registered by this instance
self.instance_fallback_handlers = []
@classmethod
def make_intent_failure_handler(cls, bus):
"""Goes through all fallback handlers until one returns True"""
def handler(message):
start, stop = message.data.get('fallback_range', (0, 101))
# indicate fallback handling start
LOG.debug('Checking fallbacks in range '
'{} - {}'.format(start, stop))
bus.emit(message.forward("mycroft.skill.handler.start",
data={'handler': "fallback"}))
stopwatch = Stopwatch()
handler_name = None
with stopwatch:
sorted_handlers = sorted(cls.fallback_handlers.items(),
key=operator.itemgetter(0))
handlers = [f[1] for f in sorted_handlers
if start <= f[0] < stop]
for handler in handlers:
try:
if handler(message):
# indicate completion
status = True
handler_name = get_handler_name(handler)
bus.emit(message.forward(
'mycroft.skill.handler.complete',
data={'handler': "fallback",
"fallback_handler": handler_name}))
break
except Exception:
LOG.exception('Exception in fallback.')
else:
status = False
# indicate completion with exception
warning = 'No fallback could handle intent.'
bus.emit(message.forward('mycroft.skill.handler.complete',
data={'handler': "fallback",
'exception': warning}))
# return if the utterance was handled to the caller
bus.emit(message.response(data={'handled': status}))
# Send timing metric
if message.context.get('ident'):
ident = message.context['ident']
report_timing(ident, 'fallback_handler', stopwatch,
{'handler': handler_name})
return handler
@classmethod
def _register_fallback(cls, handler, wrapper, priority):
"""Register a function to be called as a general info fallback
Fallback should receive message and return
a boolean (True if succeeded or False if failed)
Lower priority gets run first
0 for high priority 100 for low priority
Args:
handler (callable): original handler, used as a reference when
removing
wrapper (callable): wrapped version of handler
priority (int): fallback priority
"""
while priority in cls.fallback_handlers:
priority += 1
cls.fallback_handlers[priority] = wrapper
cls.wrapper_map.append((handler, wrapper))
def register_fallback(self, handler, priority):
"""Register a fallback with the list of fallback handlers and with the
list of handlers registered by this instance
"""
def wrapper(*args, **kwargs):
if handler(*args, **kwargs):
self.make_active()
return True
return False
self.instance_fallback_handlers.append(handler)
self._register_fallback(handler, wrapper, priority)
@classmethod
def _remove_registered_handler(cls, wrapper_to_del):
"""Remove a registered wrapper.
Args:
wrapper_to_del (callable): wrapped handler to be removed
Returns:
(bool) True if one or more handlers were removed, otherwise False.
"""
found_handler = False
for priority, handler in list(cls.fallback_handlers.items()):
if handler == wrapper_to_del:
found_handler = True
del cls.fallback_handlers[priority]
if not found_handler:
LOG.warning('No fallback matching {}'.format(wrapper_to_del))
return found_handler
@classmethod
def remove_fallback(cls, handler_to_del):
"""Remove a fallback handler.
Args:
handler_to_del: reference to handler
Returns:
(bool) True if at least one handler was removed, otherwise False
"""
# Find wrapper from handler or wrapper
wrapper_to_del = None
for h, w in cls.wrapper_map:
if handler_to_del in (h, w):
wrapper_to_del = w
break
if wrapper_to_del:
cls.wrapper_map.remove((h, w))
remove_ok = cls._remove_registered_handler(wrapper_to_del)
else:
LOG.warning('Could not find matching fallback handler')
remove_ok = False
return remove_ok
def remove_instance_handlers(self):
"""Remove all fallback handlers registered by the fallback skill."""
self.log.info('Removing all handlers...')
while len(self.instance_fallback_handlers):
handler = self.instance_fallback_handlers.pop()
self.remove_fallback(handler)
def default_shutdown(self):
"""Remove all registered handlers and perform skill shutdown."""
self.remove_instance_handlers()
super(FallbackSkill, self).default_shutdown()
|
dd7bbebc86de325dcb2c35407ed8b48fda8cb7b6
|
8d281aaa134e3505397ec5b07306288964e6df35
|
/samples/SampleTorchSegmentation.py
|
2e2a236b93ffdf1b5ce00b31c440a99781124374
|
[
"Apache-2.0"
] |
permissive
|
NVIDIA/VideoProcessingFramework
|
eeb5b53cd214f1f37dad8f5a0e56598c45b17be9
|
82b51e7c29cb1c8259721170f39e95f3e95b4ad4
|
refs/heads/master
| 2023-08-31T06:13:07.858428
| 2023-08-15T10:19:19
| 2023-08-15T10:19:19
| 227,658,982
| 1,176
| 230
|
Apache-2.0
| 2023-08-27T04:45:36
| 2019-12-12T17:17:45
|
C++
|
UTF-8
|
Python
| false
| false
| 8,070
|
py
|
SampleTorchSegmentation.py
|
#
# Copyright 2022 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Starting from Python 3.8 DLL search policy has changed.
# We need to add path to CUDA DLLs explicitly.
import sys
import os
import cv2
import numpy as np
import torch
import torchvision
if os.name == "nt":
# Add CUDA_PATH env variable
cuda_path = os.environ["CUDA_PATH"]
if cuda_path:
os.add_dll_directory(cuda_path)
else:
print("CUDA_PATH environment variable is not set.", file=sys.stderr)
print("Can't set CUDA DLLs search path.", file=sys.stderr)
exit(1)
# Add PATH as well for minor CUDA releases
sys_path = os.environ["PATH"]
if sys_path:
paths = sys_path.split(";")
for path in paths:
if os.path.isdir(path):
os.add_dll_directory(path)
else:
print("PATH environment variable is not set.", file=sys.stderr)
exit(1)
import PyNvCodec as nvc
try:
import PytorchNvCodec as pnvc
except ImportError as err:
raise (f"""Could not import `PytorchNvCodec`: {err}.
Please make sure it is installed! Run
`pip install git+https://github.com/NVIDIA/VideoProcessingFramework#subdirectory=src/PytorchNvCodec` or
`pip install src/PytorchNvCodec` if using a local copy of the VideoProcessingFramework repository""") # noqa
coco_names = [
"__background__",
"person",
"bicycle",
"car",
"motorcycle",
"airplane",
"bus",
"train",
"truck",
"boat",
"traffic light",
"fire hydrant",
"N/A",
"stop sign",
"parking meter",
"bench",
"bird",
"cat",
"dog",
"horse",
"sheep",
"cow",
"elephant",
"bear",
"zebra",
"giraffe",
"N/A",
"backpack",
"umbrella",
"N/A",
"N/A",
"handbag",
"tie",
"suitcase",
"frisbee",
"skis",
"snowboard",
"sports ball",
"kite",
"baseball bat",
"baseball glove",
"skateboard",
"surfboard",
"tennis racket",
"bottle",
"N/A",
"wine glass",
"cup",
"fork",
"knife",
"spoon",
"bowl",
"banana",
"apple",
"sandwich",
"orange",
"broccoli",
"carrot",
"hot dog",
"pizza",
"donut",
"cake",
"chair",
"couch",
"potted plant",
"bed",
"N/A",
"dining table",
"N/A",
"N/A",
"toilet",
"N/A",
"tv",
"laptop",
"mouse",
"remote",
"keyboard",
"cell phone",
"microwave",
"oven",
"toaster",
"sink",
"refrigerator",
"N/A",
"book",
"clock",
"vase",
"scissors",
"teddy bear",
"hair drier",
"toothbrush",
]
def tensor_to_mat(img_tensor: torch.tensor):
"""Convert planar RGB cuda float tensor to OpenCV uint8 rgb Mat"""
img_r = img_tensor[0].cpu().numpy()
img_g = img_tensor[1].cpu().numpy()
img_b = img_tensor[2].cpu().numpy()
img_rgb = np.empty((img_r.shape[0], img_r.shape[1], 3), "uint8")
img_rgb[..., 0] = img_r * 255
img_rgb[..., 1] = img_g * 255
img_rgb[..., 2] = img_b * 255
return img_rgb
COLORS = np.random.uniform(0, 255, size=(len(coco_names), 3))
def draw_boxes(boxes, classes, labels, image):
"""
Draws the bounding box around a detected object.
"""
out_image = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)
for i, box in enumerate(boxes):
color = COLORS[labels[i]]
cv2.rectangle(
out_image, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), color, 2
)
cv2.putText(
out_image,
classes[i],
(int(box[0]), int(box[1] + 15)),
cv2.FONT_HERSHEY_SIMPLEX,
0.8,
color,
2,
lineType=cv2.LINE_AA,
)
return out_image
def run_inference_on_video(gpu_id: int, input_video: str):
# Init resnet
model = torchvision.models.detection.ssd300_vgg16(pretrained=True)
model.eval()
model.to("cuda")
# Init HW decoder
nvDec = nvc.PyNvDecoder(input_video, gpu_id)
# NN expects images to be 3 channel planar RGB.
# No requirements for input image resolution, it will be rescaled internally.
target_w, target_h = nvDec.Width(), nvDec.Height()
# Converter from NV12 which is Nvdec native pixel fomat.
to_rgb = nvc.PySurfaceConverter(
target_w, target_h, nvc.PixelFormat.NV12, nvc.PixelFormat.RGB, gpu_id
)
# Converter from RGB to planar RGB because that's the way
# pytorch likes to store the data in it's tensors.
to_pln = nvc.PySurfaceConverter(
target_w, target_h, nvc.PixelFormat.RGB, nvc.PixelFormat.RGB_PLANAR, gpu_id
)
# Use bt709 and jpeg just for illustration purposes.
cc_ctx = nvc.ColorspaceConversionContext(nvc.ColorSpace.BT_709, nvc.ColorRange.JPEG)
# Decoding cycle + inference on video frames.
while True:
# Decode 1 compressed video frame to CUDA memory.
nv12_surface = nvDec.DecodeSingleSurface()
if nv12_surface.Empty():
print("Can not decode frame")
break
# Convert NV12 > RGB.
rgb24_small = to_rgb.Execute(nv12_surface, cc_ctx)
if rgb24_small.Empty():
print("Can not convert nv12 -> rgb")
break
# Convert RGB > planar RGB.
rgb24_planar = to_pln.Execute(rgb24_small, cc_ctx)
if rgb24_planar.Empty():
print("Can not convert rgb -> rgb planar")
break
# Export to PyTorch tensor.
surf_plane = rgb24_planar.PlanePtr()
img_tensor = pnvc.makefromDevicePtrUint8(
surf_plane.GpuMem(),
surf_plane.Width(),
surf_plane.Height(),
surf_plane.Pitch(),
surf_plane.ElemSize(),
)
# This step is essential because rgb24_planar.PlanePtr() returns a simple
# 2D CUDA pitched memory allocation. Here we convert it the way
# pytorch expects it's tensor data to be arranged.
img_tensor.resize_(3, target_h, target_w)
img_tensor = img_tensor.type(dtype=torch.cuda.FloatTensor)
img_tensor = torch.divide(img_tensor, 255.0)
data_transforms = torchvision.transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
surface_tensor = data_transforms(img_tensor)
input_batch = surface_tensor.unsqueeze(0).to("cuda")
# Run inference.
with torch.no_grad():
outputs = model(input_batch)
# Collect segmentation results.
pred_classes = [coco_names[i] for i in outputs[0]["labels"].cpu().numpy()]
pred_scores = outputs[0]["scores"].detach().cpu().numpy()
pred_bboxes = outputs[0]["boxes"].detach().cpu().numpy()
boxes = pred_bboxes[pred_scores >= 0.5].astype(np.int32)
# Convert tensor to OpenCV Mat, draw labels and boxes.
img_rgb = tensor_to_mat(img_tensor)
image = draw_boxes(boxes, pred_classes, outputs[0]["labels"], img_rgb)
# Show in GUI.
cv2.imshow("Decode image", image)
k = cv2.waitKey(1000 // 30)
if k == 27:
print("ESC")
cv2.destroyAllWindows()
break
if cv2.getWindowProperty("Decode image", cv2.WND_PROP_VISIBLE) == -1:
break
if __name__ == "__main__":
if len(sys.argv) < 3:
print("Provide gpu ID, paths to input video file.")
exit
gpu_id = int(sys.argv[1])
input_video = sys.argv[2]
run_inference_on_video(gpu_id, input_video)
|
581f7fda1189d0eb4b244b03ad7da74beacb3bf5
|
8d402df39c18eba7e1c86c762f205c944357c5df
|
/www/tests/modtest/__init__.py
|
3df49c14d2e843712cab8dda5a2bc5338b5cc67c
|
[
"BSD-3-Clause"
] |
permissive
|
brython-dev/brython
|
87cc023e25550dec9ce459ba68774189f33712b6
|
b33958bff0e8c7a280babc30232dc389a2500a7a
|
refs/heads/master
| 2023-09-04T04:49:29.156209
| 2023-09-01T06:36:08
| 2023-09-01T06:36:08
| 24,046,239
| 6,569
| 625
|
BSD-3-Clause
| 2023-07-05T06:13:32
| 2014-09-15T06:58:21
|
Python
|
UTF-8
|
Python
| false
| false
| 73
|
py
|
__init__.py
|
from . import hello
from . import bye
print('main modtest was imported')
|
ab055fd3e3a0d05e823c1026016ba05ceceb3408
|
0eb78414767c4dd1d49127f44b1204abe08115a6
|
/inst2vec/inst2vec_embedding.py
|
155d697450bda2329f9523ef44a3aa7c0afaad20
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
spcl/ncc
|
9b688bb80d247d6b718fda2bec3f07e2257a1459
|
45c3619ba193585579c7feedbe046bac3ff9e7b5
|
refs/heads/master
| 2023-08-29T00:35:57.669037
| 2021-08-12T22:21:37
| 2021-08-12T22:21:37
| 138,141,023
| 201
| 60
|
BSD-3-Clause
| 2023-08-14T22:15:12
| 2018-06-21T08:20:41
|
Python
|
UTF-8
|
Python
| false
| false
| 28,250
|
py
|
inst2vec_embedding.py
|
# NCC: Neural Code Comprehension
# https://github.com/spcl/ncc
# Copyright 2018 ETH Zurich
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ==============================================================================
"""inst2vec embedding training"""
from inst2vec import inst2vec_evaluate as i2v_eval
from inst2vec import inst2vec_appflags
from inst2vec import inst2vec_utils as i2v_utils
import numpy as np
import pickle
import os
import subprocess
import datetime
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
from tensorflow.python.client import timeline
from datetime import datetime
import random
import sys
import math
from absl import flags
FLAGS = flags.FLAGS
########################################################################################################################
# Reading, writing and dumping files
########################################################################################################################
def get_data_pair_files(folders, context_width):
"""
Given a data set composed of several raw folders, return a list of the files containing the binary records
:param folders: list of sub-folders containing pre-processed LLVM IR code
:param context_width
:return:
"""
assert len(folders) > 1, "Expected combineable dataset"
data_pairs_strings_filenames = list()
for folder in folders:
folder_dataset = folder + '_dataset' + '_cw_' + str(context_width)
file = os.path.join(folder_dataset, 'data_pairs' + '_cw_' + str(context_width) + '.rec')
assert os.path.exists(file), 'File ' + file + ' does not exist'
data_pairs_strings_filenames.append(file)
# Return
return data_pairs_strings_filenames
def record_parser(record):
"""
Read the bytes of a string as a vector of numbers
:return pair of integers (target-context indices)
"""
return tf.decode_raw(record, tf.int32)
########################################################################################################################
# Helper functions for training and evaluation
########################################################################################################################
def print_neighbors(op, examples, top_k, reverse_dictionary):
"""
Print the nearest neighbours of certain statements
:param op: "nearest-neighbour" tensorflow operation
:param examples: list of statements indices to evaluate on
:param top_k: number of nearest neighbours to print
:param reverse_dictionary: [keys=statement index, values=statement]
"""
# compute cosine similarity
sim = op.eval()
# search for nearest neighbor and print
for i, ex in enumerate(examples):
valid_word = reverse_dictionary[ex] # get dictionary index
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = 'Nearest to %s:\n ' % valid_word
for k in range(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = '%s %s\n ' % (log_str, close_word)
print(log_str)
########################################################################################################################
# Training embeddings
########################################################################################################################
def train_skip_gram(V, data_folder, data_folders, dataset_size, reverse_dictionary,
param, valid_examples, log_dir, vocab_metada_file, embeddings_pickle,
ckpt_saver_file, ckpt_saver_file_init, ckpt_saver_file_final,
restore_variables):
"""
Train embeddings (Skip-Gram model)
:param V: vocabulary size
:param data_folder: string containing the path to the parent directory of raw data sub-folders
:param data_folders: list of sub-folders containing pre-processed LLVM IR code
:param dataset_size: number of data pairs in total in the training data set
:param reverse_dictionary: [keys=statement index, values=statement]
:param param: parameters of the inst2vec training
:param valid_examples: statements to be used as validation examples (list of indices)
:param log_dir: logging directory for Tensorboard output
:param vocab_metada_file: vocabulary metadata file for Tensorboard
:param embeddings_pickle: file in which to pickle embeddings
:param ckpt_saver_file: checkpoint saver file (intermediate states of training)
:param ckpt_saver_file_init: checkpoint saver file (initial state of training)
:param ckpt_saver_file_final: checkpoint saver file (final state of training)
:param restore_variables: boolean: whether to restore variables from a previous training
:return: embeddings matrix
"""
####################################################################################################################
# Extract parameters from dictionary "param"
N = param['embedding_size']
mini_batch_size = param['mini_batch_size']
num_sampled = param['num_sampled']
num_epochs = param['num_epochs']
learning_rate = param['learning_rate']
l2_reg_scale = param['beta']
freq_print_loss = param['freq_print_loss']
step_print_neighbors = param['step_print_neighbors']
context_width = param['context_width']
####################################################################################################################
# Set up for analogies
analogies, analogy_types, n_questions_total, n_questions_relevant = i2v_eval.load_analogies(data_folder)
folder_evaluation = embeddings_pickle.replace('.p', '') + 'eval'
if not os.path.exists(folder_evaluation):
os.makedirs(folder_evaluation)
analogy_evaluation_file = os.path.join(folder_evaluation, "analogy_results")
config = None
options = None
metadata = None
if FLAGS.profile:
options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
metadata = tf.RunMetadata()
if FLAGS.xla:
config = tf.ConfigProto()
config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
####################################################################################################################
# Read data using Tensorflow's data API
data_files = get_data_pair_files(data_folders, context_width)
print('\ttraining with data from files:', data_files)
with tf.name_scope("Reader") as scope:
random.shuffle(data_files)
dataset_raw = tf.data.FixedLengthRecordDataset(filenames=data_files,
record_bytes=8) # <TFRecordDataset shapes: (), types: tf.string>
dataset = dataset_raw.map(record_parser)
dataset = dataset.shuffle(int(1e5))
dataset_batched = dataset.apply(tf.contrib.data.batch_and_drop_remainder(mini_batch_size))
dataset_batched = dataset_batched.prefetch(int(100000000))
iterator = dataset_batched.make_initializable_iterator()
saveable_iterator = tf.contrib.data.make_saveable_from_iterator(iterator)
next_batch = iterator.get_next() # Tensor("Shape:0", shape=(2,), dtype=int32)
####################################################################################################################
# Tensorflow computational graph
# Placeholders for inputs
with tf.name_scope("Input_Data") as scope:
train_inputs = next_batch[:, 0]
train_labels = tf.reshape(next_batch[:, 1], shape=[mini_batch_size, 1], name="training_labels")
# (input) Embedding matrix
with tf.name_scope("Input_Layer") as scope:
W_in = tf.Variable(tf.random_uniform([V, N], -1.0, 1.0), name="input-embeddings")
# Look up the vector representing each source word in the batch (fetches rows of the embedding matrix)
h = tf.nn.embedding_lookup(W_in, train_inputs, name="input_embedding_vectors")
# Normalized embedding matrix
with tf.name_scope("Embeddings_Normalized") as scope:
normalized_embeddings = tf.nn.l2_normalize(W_in, name="embeddings_normalized")
# (output) Embedding matrix ("output weights")
with tf.name_scope("Output_Layer") as scope:
if FLAGS.softmax:
W_out = tf.Variable(tf.truncated_normal([N, V], stddev=1.0 / math.sqrt(N)), name="output_embeddings")
else:
W_out = tf.Variable(tf.truncated_normal([V, N], stddev=1.0 / math.sqrt(N)), name="output_embeddings")
# Biases between hidden layer and output layer
b_out = tf.Variable(tf.zeros([V]), name="nce_bias")
# Optimization
with tf.name_scope("Optimization_Block") as scope:
# Loss function
if FLAGS.softmax:
logits = tf.layers.dense(inputs=h, units=V)
onehot = tf.one_hot(train_labels, V)
loss_tensor = tf.nn.softmax_cross_entropy_with_logits_v2(labels=onehot, logits=logits)
else:
loss_tensor = tf.nn.nce_loss(weights=W_out,
biases=b_out,
labels=train_labels,
inputs=h,
num_sampled=num_sampled,
num_classes=V)
train_loss = tf.reduce_mean(loss_tensor, name="nce_loss")
# Regularization (optional)
if l2_reg_scale > 0:
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, W_in)
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, W_out)
regularizer = tf.contrib.layers.l2_regularizer(l2_reg_scale)
reg_variables = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
reg_term = tf.contrib.layers.apply_regularization(regularizer, reg_variables)
loss = train_loss + reg_term
else:
loss = train_loss
# Optimizer
if FLAGS.optimizer == 'adam':
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)
elif FLAGS.optimizer == 'nadam':
optimizer = tf.contrib.opt.NadamOptimizer(learning_rate=learning_rate).minimize(loss)
elif FLAGS.optimizer == 'momentum':
global_train_step = tf.Variable(0, trainable=False, dtype=tf.int32, name="global_step")
# Passing global_step to minimize() will increment it at each step.
optimizer = (
tf.train.MomentumOptimizer(learning_rate, 0.9).minimize(loss, global_step=global_train_step)
)
else:
raise ValueError('Unrecognized optimizer ' + FLAGS.optimizer)
if FLAGS.optimizer != 'momentum':
global_train_step = tf.Variable(0, trainable=False, dtype=tf.int32, name="global_step")
####################################################################################################################
# Validation block
with tf.name_scope("Validation_Block") as scope:
valid_dataset = tf.constant(valid_examples, dtype=tf.int32, name="validation_data_size")
valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings, valid_dataset)
cosine_similarity = tf.matmul(valid_embeddings, normalized_embeddings, transpose_b=True)
####################################################################################################################
# Summaries
with tf.name_scope("Summaries") as scope:
tf.summary.histogram("input_embeddings", W_in)
tf.summary.histogram("input_embeddings_normalized", normalized_embeddings)
tf.summary.histogram("output_embeddings", W_out)
tf.summary.scalar("nce_loss", loss)
analogy_score_tensor = tf.Variable(0, trainable=False, dtype=tf.int32, name="analogy_score")
tf.summary.scalar("analogy_score", analogy_score_tensor)
####################################################################################################################
# Misc.
restore_completed = False
init = tf.global_variables_initializer() # variables initializer
summary_op = tf.summary.merge_all() # merge summaries into one operation
####################################################################################################################
# Training
with tf.Session(config=config) as sess:
# Add TensorBoard components
writer = tf.summary.FileWriter(log_dir) # create summary writer
writer.add_graph(sess.graph)
gvars = [gvar for gvar in tf.global_variables() if 'analogy_score' not in gvar.name]
saver = tf.train.Saver(gvars, max_to_keep=5) # create checkpoint saver
config = projector.ProjectorConfig() # create projector config
embedding = config.embeddings.add() # add embeddings visualizer
embedding.tensor_name = W_in.name
embedding.metadata_path = vocab_metada_file # link metadata
projector.visualize_embeddings(writer, config) # add writer and config to projector
# Set up variables
if restore_variables: # restore variables from disk
restore_file = tf.train.latest_checkpoint(log_dir)
assert restore_file is not None, "No restore file found in folder " + log_dir
assert os.path.exists(restore_file + ".index"), \
"Trying to restore Tensorflow session from non-existing file: " + restore_file + ".index"
init.run()
saver.restore(sess, restore_file)
print("\tVariables restored from file", ckpt_saver_file, "in TensorFlow ")
else: # save the computational graph to file and initialize variables
graph_saver = tf.train.Saver(allow_empty=True)
init.run()
graph_saver.save(sess, ckpt_saver_file_init, global_step=0, write_meta_graph=True)
tf.add_to_collection(tf.GraphKeys.SAVEABLE_OBJECTS, saveable_iterator)
print("\tVariables initialized in TensorFlow")
# Compute the necessary number of steps for this epoch as well as how often to print the avg loss
num_steps = int(math.ceil(dataset_size / mini_batch_size))
step_print_loss = int(math.ceil(num_steps / freq_print_loss))
print('\tPrinting loss every ', step_print_loss, 'steps, i.e.', freq_print_loss, 'times per epoch')
################################################################################################################
# Epoch loop
epoch = 0
global_step = 0
while epoch < int(num_epochs):
print('\n\tStarting epoch ', epoch)
sess.run(iterator.initializer) # initialize iterator
# If restoring a previous training session, set the right training epoch
if restore_variables and not restore_completed:
epoch = int(math.floor(global_train_step.eval() / (dataset_size / mini_batch_size)))
global_step = global_train_step.eval()
print('Starting from epoch', epoch)
############################################################################################################
# Loop over steps (mini batches) inside of epoch
step = 0
avg_loss = 0
while True:
try:
# Print average loss every x steps
if step_print_loss > 0 and step % int(step_print_loss) == 0: # update step with logging
# If restoring a previous training session, set the right training epoch
if restore_variables and not restore_completed:
restore_completed = True
# Write global step
if FLAGS.optimizer != 'momentum':
global_train_step.assign(global_step).eval()
# Perform an update
# print('\tStarting local step {:>6}'.format(step)) # un-comment for debugging
[_, loss_val, train_loss_val, global_step] = sess.run(
[optimizer, loss, train_loss, global_train_step], options=options,
run_metadata=metadata)
assert not np.isnan(loss_val), "Loss at step " + str(step) + " is nan"
assert not np.isinf(loss_val), "Loss at step " + str(step) + " is inf"
avg_loss += loss_val
if step > 0:
avg_loss /= step_print_loss
analogy_score = i2v_eval.evaluate_analogies(W_in.eval(), reverse_dictionary, analogies,
analogy_types, analogy_evaluation_file,
session=sess, print=i2v_eval.nop)
total_analogy_score = sum([a[0] for a in analogy_score])
analogy_score_tensor.assign(total_analogy_score).eval() # for tf.summary
[summary, W_in_val] = sess.run([summary_op, W_in])
if FLAGS.savebest is not None:
filelist = [f for f in os.listdir(FLAGS.savebest)]
scorelist = [int(s.split('-')[1]) for s in filelist]
if len(scorelist) == 0 or total_analogy_score > sorted(scorelist)[-1]:
i2v_utils.safe_pickle(W_in_val, FLAGS.savebest + '/' + 'score-' +
str(total_analogy_score) + '-w.p')
# Display average loss
print('{} Avg. loss at epoch {:>6,d}, step {:>12,d} of {:>12,d}, global step {:>15} : {:>12.3f}, analogies: {})'.format(
str(datetime.now()), epoch, step, num_steps, global_step, avg_loss, str(analogy_score)))
avg_loss = 0
# Pickle intermediate embeddings
i2v_utils.safe_pickle(W_in_val, embeddings_pickle)
# Write to TensorBoard
saver.save(sess, ckpt_saver_file, global_step=global_step, write_meta_graph=False)
writer.add_summary(summary, global_step=global_step)
if FLAGS.profile:
fetched_timeline = timeline.Timeline(metadata.step_stats)
chrome_trace = fetched_timeline.generate_chrome_trace_format()
with open('timeline_step_%d.json' % step, 'w') as f:
f.write(chrome_trace)
if step > 0 and FLAGS.extreme:
sys.exit(22)
else: # ordinary update step
[_, loss_val] = sess.run([optimizer, loss])
avg_loss += loss_val
# Compute and print nearest neighbors every x steps
if step_print_neighbors > 0 and step % int(step_print_neighbors) == 0:
print_neighbors(op=cosine_similarity, examples=valid_examples, top_k=6,
reverse_dictionary=reverse_dictionary)
# Update loop index (steps in epoch)
step += 1
global_step += 1
except tf.errors.OutOfRangeError:
# We reached the end of the epoch
print('\n\t Writing embeddings to file ', embeddings_pickle)
i2v_utils.safe_pickle([W_in.eval()], embeddings_pickle) # WEIRD!
epoch += 1 # update loop index (epochs)
break # from this inner loop
################################################################################################################
# End of training:
# Print the nearest neighbors at the end of the run
if step_print_neighbors == -1:
print_neighbors(op=cosine_similarity, examples=valid_examples, top_k=6,
reverse_dictionary=reverse_dictionary)
# Save state of training and close the TensorBoard summary writer
save_path = saver.save(sess, ckpt_saver_file_final, global_step)
writer.add_summary(summary, global_step)
writer.close()
return W_in.eval()
########################################################################################################################
# Main function for embedding training workflow
########################################################################################################################
def train_embeddings(data_folder, data_folders):
"""
Main function for embedding training workflow
:param data_folder: string containing the path to the parent directory of raw data sub-folders
:param data_folders: list of sub-folders containing pre-processed LLVM IR code
:return embedding matrix
Folders produced:
data_folder/FLAGS.embeddings_folder/emb_cw_X_embeddings
data_folder/FLAGS.embeddings_folder/emb_cw_X_train
"""
# Get flag values
restore_tf_variables_from_ckpt = FLAGS.restore
context_width = FLAGS.context_width
outfolder = FLAGS.embeddings_folder
param = {k: FLAGS[k].value for k in FLAGS}
# Set file signature
file_signature = i2v_utils.set_file_signature(param, data_folder)
# Print model parameters
out_ = '\n--- Data files: '
print(out_)
out = out_ + '\n'
num_data_pairs = 0
data_pair_files = get_data_pair_files(data_folders, context_width)
for data_pair_file in data_pair_files:
filesize_bytes = os.path.getsize(data_pair_file) # num pairs = filesize_bytes / 2 (pairs) / 4 (32-bit integers)
file_pairs = int(filesize_bytes / 8)
num_data_pairs += file_pairs
out_ = '\t{:<60}: {:>12,d} pairs'.format(data_pair_file, file_pairs)
print(out_)
out += out_ + '\n'
out_ = '\t{:<60}: {:>12,d} pairs'.format('total', num_data_pairs)
print(out_)
out += out_ + '\n'
# Get dictionary and vocabulary
print('\n\tGetting dictionary ...')
folder_vocabulary = os.path.join(data_folder, 'vocabulary')
dictionary_pickle = os.path.join(folder_vocabulary, 'dic_pickle')
with open(dictionary_pickle, 'rb') as f:
dictionary = pickle.load(f)
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
del dictionary
vocabulary_size = len(reverse_dictionary.keys())
# Print Skip-Gram model parameters
out_ = '\n--- Skip Gram model parameters'
print(out_)
out += out_ + '\n'
out_ = '\tData folder : {:<}'.format(data_folder)
print(out_)
out += out_ + '\n'
out_ = '\tNumber of data pairs : {:>15,d}'.format(num_data_pairs)
print(out_)
out += out_ + '\n'
out_ = '\tVocabulary size : {:>15,d}'.format(vocabulary_size)
print(out_)
out += out_ + '\n'
out_ = '\tEmbedding size : {:>15,d}'.format(param['embedding_size'])
print(out_)
out += out_ + '\n'
out_ = '\tContext width : {:>15,d}'.format(param['context_width'])
print(out_)
out += out_ + '\n'
out_ = '\tMini-batch size : {:>15,d}'.format(param['mini_batch_size'])
print(out_)
out += out_ + '\n'
out_ = '\tNegative samples in NCE : {:>15,d}'.format(param['num_sampled'])
print(out_)
out += out_ + '\n'
out_ = '\tL2 regularization scale : {:>15,e}'.format(param['beta'])
print(out_)
out += out_ + '\n'
out_ = '\tNumber of epochs : {:>15,d}'.format(param['num_epochs'])
print(out_)
out += out_ + '\n'
out_ = '\tRestoring a prev. train : {}'.format(restore_tf_variables_from_ckpt)
print(out_)
out += out_ + '\n'
# Print training information to file
log_dir_ = os.path.join(outfolder, 'emb_cw_' + str(context_width) + '_train/')
log_dir = os.path.join(log_dir_, file_signature[1:])
if not os.path.exists(log_dir):
os.makedirs(log_dir)
train_info_file = os.path.join(log_dir, 'training_info.txt')
with open(train_info_file, 'w') as f:
f.write(out)
# Validation set used to sample nearest neighbors
# Limit to the words that have a low numeric ID,
# which by construction are also the most frequent.
valid_size = 30 # Random set of words to evaluate similarity on.
valid_window = 50 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
# Copy metadata file into TensorBoard folder
vocab_metada_file_ = os.path.join(folder_vocabulary, 'vocabulary_metadata_for_tboard')
v_metadata_file_name = 'vocab_metada_' + file_signature
vocab_metada_file = os.path.join(log_dir, v_metadata_file_name)
ckpt_saver_file = os.path.join(log_dir, "inst2vec.ckpt")
ckpt_saver_file_init = os.path.join(log_dir, "inst2vec-init.ckpt")
ckpt_saver_file_final = os.path.join(log_dir, "inst2vec-final.ckpt")
os.makedirs(os.path.dirname(vocab_metada_file), exist_ok=True)
subprocess.call('cp ' + vocab_metada_file_ + ' ' + vocab_metada_file, shell=True)
# Train the embeddings (Skip-Gram model)
print('\n--- Setup completed, starting to train the embeddings')
folder_embeddings = os.path.join(outfolder, 'emb_cw_' + str(context_width) + '_embeddings')
if not os.path.exists(folder_embeddings):
os.makedirs(folder_embeddings)
embeddings_pickle = os.path.join(folder_embeddings, "emb_" + file_signature + ".p")
embeddings = train_skip_gram(vocabulary_size, data_folder, data_folders, num_data_pairs, reverse_dictionary,
param, valid_examples, log_dir, v_metadata_file_name, embeddings_pickle,
ckpt_saver_file, ckpt_saver_file_init, ckpt_saver_file_final,
restore_tf_variables_from_ckpt)
# Save the embeddings and dictionaries in an external file to be reused later
print('\n\tWriting embeddings to file', embeddings_pickle)
i2v_utils.safe_pickle(embeddings, embeddings_pickle)
# Write the embeddings to CSV file
embeddings_csv = os.path.join(folder_embeddings, "emb_" + file_signature + ".csv")
print('\t Writing embeddings to file ', embeddings_csv)
np.savetxt(embeddings_csv, embeddings, delimiter=',',
header='Embeddings matrix, rows correspond to the embedding vector of statements')
return embeddings, embeddings_pickle
|
d5512c83d6ca2271294bdd97732694ca2c51a933
|
e00cf9d99e5ec3d5eb6dc991989068a1a9dc5d23
|
/daemon/core/scripts/player.py
|
07728939cf540f1b51d6ade7b1ab3d9bd1c4df75
|
[
"BSD-2-Clause"
] |
permissive
|
coreemu/core
|
dee8e2b34aa596654b373733ea0a3e29c69b79d9
|
20071eed2e73a2287aa385698dd604f4933ae7ff
|
refs/heads/master
| 2023-08-10T13:59:51.420075
| 2023-08-01T17:18:11
| 2023-08-01T17:18:11
| 39,257,883
| 606
| 201
|
BSD-2-Clause
| 2023-08-17T01:41:48
| 2015-07-17T14:23:03
|
Python
|
UTF-8
|
Python
| false
| false
| 1,212
|
py
|
player.py
|
import argparse
import logging
import sys
from pathlib import Path
from core.player import CorePlayer
logger = logging.getLogger(__name__)
def path_type(value: str) -> Path:
file_path = Path(value)
if not file_path.is_file():
raise argparse.ArgumentTypeError(f"file does not exist: {value}")
return file_path
def parse_args() -> argparse.Namespace:
"""
Setup and parse command line arguments.
:return: parsed arguments
"""
parser = argparse.ArgumentParser(
description="core player runs files that can move nodes and send commands",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"-f", "--file", required=True, type=path_type, help="core file to play"
)
parser.add_argument(
"-s",
"--session",
type=int,
help="session to play to, first found session otherwise",
)
return parser.parse_args()
def main() -> None:
logging.basicConfig(level=logging.INFO)
args = parse_args()
player = CorePlayer(args.file)
result = player.init(args.session)
if not result:
sys.exit(1)
player.start()
if __name__ == "__main__":
main()
|
e62e541f689f6fe8c222b572b1a19f0ebd235d10
|
e22fd36933c9114a9df1694e7a6274bf059de2a6
|
/selfdrive/controls/lib/tests/test_alertmanager.py
|
dbd42858a0797742bfc6c30a3b6cd3e95501554d
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
commaai/openpilot
|
66dfb7f31290bc8f58c9ead95d56697a52b45afb
|
a0b49d54222c52ff0112c402bc0e0d9262e77a66
|
refs/heads/master
| 2023-09-05T21:34:14.076796
| 2023-09-05T21:15:18
| 2023-09-05T21:15:18
| 74,627,617
| 46,071
| 9,878
|
MIT
| 2023-09-14T21:51:23
| 2016-11-24T01:33:30
|
Python
|
UTF-8
|
Python
| false
| false
| 1,423
|
py
|
test_alertmanager.py
|
#!/usr/bin/env python3
import random
import unittest
from openpilot.selfdrive.controls.lib.events import Alert, EVENTS
from openpilot.selfdrive.controls.lib.alertmanager import AlertManager
class TestAlertManager(unittest.TestCase):
def test_duration(self):
"""
Enforce that an alert lasts for max(alert duration, duration the alert is added)
"""
for duration in range(1, 100):
alert = None
while not isinstance(alert, Alert):
event = random.choice([e for e in EVENTS.values() if len(e)])
alert = random.choice(list(event.values()))
alert.duration = duration
# check two cases:
# - alert is added to AM for <= the alert's duration
# - alert is added to AM for > alert's duration
for greater in (True, False):
if greater:
add_duration = duration + random.randint(1, 10)
else:
add_duration = random.randint(1, duration)
show_duration = max(duration, add_duration)
AM = AlertManager()
for frame in range(duration+10):
if frame < add_duration:
AM.add_many(frame, [alert, ])
current_alert = AM.process_alerts(frame, {})
shown = current_alert is not None
should_show = frame <= show_duration
self.assertEqual(shown, should_show, msg=f"{frame=} {add_duration=} {duration=}")
if __name__ == "__main__":
unittest.main()
|
fcc4619e3cee862d71c8e34ee87f968eeeae70db
|
5db0fab37c2b8a618d85d3b60fab9f806c416474
|
/src/python/pants/backend/cue/goals/fix.py
|
20d35139e8bbe203a21b1115fe2f4b57505e94e9
|
[
"Apache-2.0"
] |
permissive
|
pantsbuild/pants
|
4988d1ac5474ec95f94ce2218aeb759401e4b011
|
98cbda8545f0d58c586ed2daa76fefd729d5e0d5
|
refs/heads/main
| 2023-09-05T03:44:17.646899
| 2023-09-01T19:52:09
| 2023-09-01T19:52:09
| 7,209,075
| 2,708
| 593
|
Apache-2.0
| 2023-09-14T19:33:33
| 2012-12-17T17:39:04
|
Python
|
UTF-8
|
Python
| false
| false
| 1,215
|
py
|
fix.py
|
# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from pants.backend.cue.rules import _run_cue
from pants.backend.cue.subsystem import Cue
from pants.backend.cue.target_types import CueFieldSet
from pants.core.goals.fmt import FmtResult, FmtTargetsRequest
from pants.core.util_rules.partitions import PartitionerType
from pants.engine.platform import Platform
from pants.engine.rules import collect_rules, rule
from pants.util.logging import LogLevel
class CueFmtRequest(FmtTargetsRequest):
field_set_type = CueFieldSet
tool_subsystem = Cue
partitioner_type = PartitionerType.DEFAULT_SINGLE_PARTITION
@rule(desc="Format with cue", level=LogLevel.DEBUG)
async def run_cue_fmt(request: CueFmtRequest.Batch, cue: Cue, platform: Platform) -> FmtResult:
process_result = await _run_cue(
"fmt",
cue=cue,
snapshot=request.snapshot,
platform=platform,
output_files=request.snapshot.files,
)
return await FmtResult.create(request, process_result)
def rules():
return [
*collect_rules(),
*CueFmtRequest.rules(),
]
|
1f69a9a1e7bd398a65fb9e42bf051b83926c9a60
|
2ac03b8c24df220ea32ea525e1d65aeb294cd1a4
|
/custom_components/waste_collection_schedule/waste_collection_schedule/source/croydon_gov_uk.py
|
fdcabb75b35a7b7667df6a90048360cc9c202fe3
|
[
"MIT"
] |
permissive
|
mampfes/hacs_waste_collection_schedule
|
a7b98319a7483dedc8cf78b724f93932934c1702
|
1dc9476efef9963a141b9ac987e2708224b9eaaf
|
refs/heads/master
| 2023-08-16T21:14:46.088962
| 2023-08-16T10:05:24
| 2023-08-16T10:05:24
| 254,347,436
| 495
| 428
|
MIT
| 2023-09-12T18:59:07
| 2020-04-09T11:02:16
|
Python
|
UTF-8
|
Python
| false
| false
| 8,467
|
py
|
croydon_gov_uk.py
|
# Credit where it's due:
# This is predominantly a refactoring of the Croydon Council script from the UKBinCollectionData repo
# https://github.com/robbrad/UKBinCollectionData
import json
import re
import requests
from bs4 import BeautifulSoup
from datetime import datetime
from waste_collection_schedule import Collection # type: ignore[attr-defined]
TITLE = "Croydon Council"
DESCRIPTION = "Source for croydon.gov.uk services for Croydon Council, UK."
URL = "https://croydon.gov.uk"
# Website stops responding if repeated queries are made in quick succession.
# Shouldn't be an issue in normal use where 1 query/day is made, but repeated HA restarts might cause the query to fail.
# When testing, it may be worth testing them individually by commenting out two of the test cases.
TEST_CASES = {
"Test_001": {"postcode": "CR0 6LN", "houseID": "64 Coniston Road"},
"Test_002": {"postcode": "SE25 5BU", "houseID": "23B Howard Road"},
"Test_003": {"postcode": "CR0 6EG", "houseID": "48 Exeter Road"},
}
ICON_MAP = {
"Food waste": "mdi:food",
"General rubbish": "mdi:trash-can",
"Paper and card recycling": "mdi:newspaper",
"Glass, plastics, cans and cartons recycling": "mdi:bottle-wine",
}
API_URLS = {
"BASE": "https://service.croydon.gov.uk",
"CSRF": "/wasteservices/w/webpage/bin-day-enter-address",
"SEARCH": "/wasteservices/w/webpage/bin-day-enter-address?webpage_subpage_id=PAG0000898EECEC1&webpage_token=faab02e1f62a58f7bad4c2ae5b8622e19846b97dde2a76f546c4bb1230cee044&widget_action=fragment_action",
"SCHEDULE": "/wasteservices/w/webpage/bin-day-enter-address?webpage_subpage_id=PAG0000898EECEC1&webpage_token=faab02e1f62a58f7bad4c2ae5b8622e19846b97dde2a76f546c4bb1230cee044",
}
HEADER_COMPONENTS = {
"BASE": {
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "en-GB,en-US;q=0.9,en;q=0.8",
"Cache-Control": "max-age=0",
"Connection": "keep-alive",
"Host": "service.croydon.gov.uk",
"Origin": API_URLS["BASE"],
"sec-ch-ua": '"Not_A Brand";v="99", "Google Chrome";v="109", "Chromium";v="109"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": "Windows",
"Sec-Fetch-Dest": "document",
"Sec-Fetch-User": "?1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36",
},
"GET": {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Sec-Fetch-Mode": "navigate",
"Sec-Fetch-Mode": "none",
},
"POST": {
"Accept": "application/json, text/javascript, */*; q=0.01",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Mode": "same-origin",
"X-Requested-With": "XMLHttpRequest",
}
}
SESSION_STORAGE = {
"destination_stack": [
"w/webpage/bin-day-enter-address",
"w/webpage/your-bin-collection-details?context_record_id=86086077"
"&webpage_token=5c047b2c10b4aad66bef2054aac6bea52ad7a5e185ffdf7090b01f8ddc96728f",
"w/webpage/bin-day-enter-address",
"w/webpage/your-bin-collection-details?context_record_id=86085229"
"&webpage_token=cf1b8fd6213f4823277d98c1dd8a992e6ebef1fabc7d892714e5d9dade448c37",
"w/webpage/bin-day-enter-address",
"w/webpage/your-bin-collection-details?context_record_id=86084221"
"&webpage_token=7f52fb51019bf0e6bfe9647b1b31000124bd92a9d95781f1557f58b3ed40da52",
"w/webpage/bin-day-enter-address",
"w/webpage/your-bin-collection-details?context_record_id=86083209"
"&webpage_token=de50c265da927336f526d9d9a44947595c3aa38965aa8c495ac2fb73d272ece8",
"w/webpage/bin-day-enter-address",
],
"last_context_record_id": "86086077",
}
class Source:
def __init__(self, postcode, houseID):
self._postcode = str(postcode).upper()
self._houseID = str(houseID)
def fetch(self):
s = requests.Session()
### Get token
csrf_token = ""
url = API_URLS["BASE"] + API_URLS["CSRF"]
headers = {**HEADER_COMPONENTS["BASE"],**HEADER_COMPONENTS["GET"]}
r0 = s.get(url, headers=headers)
soup = BeautifulSoup(r0.text, features="html.parser")
app_body = soup.find("div", {"class": "app-body"})
script = app_body.find("script", {"type": "text/javascript"}).string
p = re.compile("var CSRF = ('|\")(.*?)('|\");")
m = p.search(script)
csrf_token = m.groups()[1]
# print(csrf_token)
### Use postcode and houseID to find address
addressID = "0"
url = API_URLS["BASE"] + API_URLS["SEARCH"]
headers = {**HEADER_COMPONENTS["BASE"], **HEADER_COMPONENTS["POST"],}
form_data = {
"code_action": "search",
"code_params": '{"search_item":"' + self._postcode + '","is_ss":true}',
"fragment_action": "handle_event",
"fragment_id": "PCF0020408EECEC1",
"fragment_collection_class": "formtable",
"fragment_collection_editable_values": '{"PCF0021449EECEC1":"1"}',
"_session_storage": json.dumps(
{
"/wasteservices/w/webpage/bin-day-enter-address": {},
"_global": SESSION_STORAGE,
}
),
"action_cell_id": "PCL0005629EECEC1",
"action_page_id": "PAG0000898EECEC1",
"form_check_ajax": csrf_token,
}
r1 = s.post(url, headers=headers, data=form_data)
addresses = json.loads(r1.text)["response"]["items"]
for address in addresses:
# print(address)
if self._houseID in str(address["address_single_line"]):
addressID = str(address["id"])
# print(addressID)
### Use addressID to get schedule
collection_data = ""
url = API_URLS["BASE"] + API_URLS["SCHEDULE"]
headers = {**HEADER_COMPONENTS["BASE"], **HEADER_COMPONENTS["POST"]}
form_data = {
"form_check": csrf_token,
"submitted_page_id": "PAG0000898EECEC1",
"submitted_widget_group_id": "PWG0002644EECEC1",
"submitted_widget_group_type": "modify",
"submission_token": "63e9126bacd815.12997577",
"payload[PAG0000898EECEC1][PWG0002644EECEC1][PCL0005629EECEC1][formtable]"
"[C_63e9126bacfb3][PCF0020408EECEC1]": addressID,
"payload[PAG0000898EECEC1][PWG0002644EECEC1][PCL0005629EECEC1][formtable]"
"[C_63e9126bacfb3][PCF0021449EECEC1]": "1",
"payload[PAG0000898EECEC1][PWG0002644EECEC1][PCL0005629EECEC1][formtable]"
"[C_63e9126bacfb3][PCF0020072EECEC1]": "Next",
"submit_fragment_id": "PCF0020072EECEC1",
"_session_storage": json.dumps({"_global": SESSION_STORAGE}),
"_update_page_content_request": 1,
"form_check_ajax": csrf_token,
}
r2 = s.post(url, headers=headers, data=form_data)
json_response = json.loads(r2.text)
url = API_URLS["BASE"] + json_response["redirect_url"]
headers = {**HEADER_COMPONENTS["BASE"], **HEADER_COMPONENTS["POST"]}
form_data = {
"_dummy": 1,
"_session_storage": json.dumps(
{"_global": SESSION_STORAGE}
),
"_update_page_content_request": 1,
"form_check_ajax": csrf_token,
}
r3 = s.post(url, headers=headers, data=form_data)
json_response = json.loads(r3.text)
collection_data = json_response["data"]
soup = BeautifulSoup(collection_data, features="html.parser")
schedule = soup.find_all("div", {"class": "listing_template_record"})
entries = []
for pickup in schedule:
waste_type = pickup.find_all("div", {"class": "fragment_presenter_template_show"})[0].text.strip()
waste_date = pickup.find("div", {"class": "bin-collection-next"}).attrs["data-current_value"].strip()
entries.append(
Collection(
date=datetime.strptime(waste_date, "%d/%m/%Y %H:%M").date(),
t=waste_type,
icon=ICON_MAP.get(waste_type),
)
)
return entries
|
dc0151ad829eb199571ed7cae0f1482b17a791a8
|
3ca67d69abd4e74b7145b340cdda65532f90053b
|
/programmers/난이도별/level04.징검다리/6047198844.py
|
15b036780b82cbec21421accbbb72c3a0820c357
|
[] |
no_license
|
DKU-STUDY/Algorithm
|
19549516984b52a1c5cd73e1ed1e58f774d6d30e
|
6f78efdbefd8eedab24e43d74c7dae7f95c2893b
|
refs/heads/master
| 2023-02-18T06:48:39.309641
| 2023-02-09T07:16:14
| 2023-02-09T07:16:14
| 258,455,710
| 175
| 49
| null | 2023-02-09T07:16:16
| 2020-04-24T08:42:27
|
Python
|
UTF-8
|
Python
| false
| false
| 1,591
|
py
|
6047198844.py
|
def solution(distance, rocks, n):
answer = 0
rocks.sort()
# 정답의 범위
begin = 0
end = distance
# print(rocks)
while begin <= end:
# 우리가 목표하는 거리
mid = (begin + end) // 2
# 현재 남아있는 바위의 위치
stack = [0]
for rock in rocks:
# 현재 거리와 stack위의 rock거리를 비교
between = rock - stack[-1]
if between < mid:
# 제거해야한다.-> stack에 넣지 않는다.
continue
stack.append(rock)
# 마지막돌과 도착지점의 거리를 계산한다.
# 해당 거리가 우리가 목표하는바 보다 작다면 해당 돌을 제거한다.
last_distance = distance - stack[-1]
if last_distance < mid:
stack.pop()
stack.append(distance)
min_distance = distance
# 돌간 최소거리
for idx in range(1, len(stack)):
min_distance = min(min_distance, stack[idx] - stack[idx - 1])
# 삭제한 돌의 개수 (출발지점은 제외함)
del_rock_n = len(rocks) - (len(stack) - 2)
if del_rock_n <= n:
# 삭제한 돌이 N보다 작은경우 -> 거리가 짧으므로 거리를 늘린다.
# 삭제한 돌이 N인경우 -> 정답 갱신
answer = max(answer, min_distance)
begin = mid + 1
# 삭제한 돌이 N보다 큰경우 -> 거리가 기므로 거리를 좁힌다.
else:
end = mid - 1
return answer
|
98dad1e8c7ef69677381bd055d79936c7af9c6ce
|
1822440a663916fc4b38ba09edb91f7b0d313c6f
|
/tests/test_decorations.py
|
5e7b04998187fe48557a93e4d09c5009672201d3
|
[
"BSD-2-Clause"
] |
permissive
|
herbstluftwm/herbstluftwm
|
09fb4318ed1a00bf96335a93cb7d3ebba58b3f2d
|
d6be49251ccaf0fed6edc30ee80017d40b5eeccf
|
refs/heads/master
| 2023-08-11T00:40:40.568941
| 2023-06-06T09:51:17
| 2023-06-06T09:51:17
| 24,884,178
| 1,167
| 131
|
NOASSERTION
| 2023-08-30T13:00:19
| 2014-10-07T09:27:20
|
C++
|
UTF-8
|
Python
| false
| false
| 21,560
|
py
|
test_decorations.py
|
from herbstluftwm.types import Point
from conftest import RawImage
from conftest import HlwmBridge
import itertools
import pytest
font_pool = [
'-*-fixed-medium-r-*-*-13-*-*-*-*-*-*-*',
'Dejavu Sans:pixelsize=14:bold'
]
@pytest.mark.parametrize("xvfb", [{'xrender': v} for v in [True, False]], indirect=True)
@pytest.mark.parametrize("hlwm_process", [{'transparency': v} for v in [True, False]], indirect=True)
def test_window_border_plain(hlwm, x11):
color = (0x9f, 0xbc, 0x12)
bw = 5 # border width
handle, _ = x11.create_client()
hlwm.attr.theme.color = RawImage.rgb2string(color)
hlwm.attr.theme.border_width = bw
img = x11.decoration_screenshot(handle)
assert img.pixel(0, 0) == color
expected_count = 2 * bw * img.width # horizontal border line
expected_count += 2 * bw * img.height # vertical border
expected_count -= 4 * bw * bw # we counted each twice
assert img.color_count(color) == expected_count
@pytest.mark.parametrize("xvfb", [{'xrender': v} for v in [True, False]], indirect=True)
@pytest.mark.parametrize("hlwm_process", [{'transparency': v} for v in [True, False]], indirect=True)
def test_window_border_inner(hlwm, x11):
color = (239, 2, 190)
bw = 5 # border width
inner_color = (48, 225, 26)
inner_bw = 2
hlwm.attr.theme.color = RawImage.rgb2string(color)
hlwm.attr.theme.border_width = bw
hlwm.attr.theme.inner_color = RawImage.rgb2string(inner_color)
hlwm.attr.theme.inner_width = inner_bw
handle, _ = x11.create_client()
img = x11.decoration_screenshot(handle)
# we check the inner border color in the upper left corner
for x in range(0, bw):
for y in range(0, bw):
threshold = bw - inner_bw
expected_color = inner_color if x >= threshold and y >= threshold else color
assert img.pixel(x, y) == expected_color
@pytest.mark.parametrize("xvfb", [{'xrender': v} for v in [True, False]], indirect=True)
@pytest.mark.parametrize("hlwm_process", [{'transparency': v} for v in [True, False]], indirect=True)
def test_window_border_outer(hlwm, x11):
color = (239, 2, 190)
bw = 6 # border width
outer_color = (48, 225, 26)
outer_bw = 3
hlwm.attr.theme.color = RawImage.rgb2string(color)
hlwm.attr.theme.border_width = bw
hlwm.attr.theme.outer_color = RawImage.rgb2string(outer_color)
hlwm.attr.theme.outer_width = outer_bw
handle, _ = x11.create_client()
img = x11.decoration_screenshot(handle)
# check the upper left corner
for x in range(0, bw):
for y in range(0, bw):
threshold = outer_bw
expected_color = outer_color if x < threshold or y < threshold else color
assert img.pixel(x, y) == expected_color
def screenshot_with_title(x11, win_handle, title):
""" set the win_handle's window title and then
take a screenshot
"""
x11.set_window_title(win_handle, title)
# double check that hlwm has updated the client's title:
winid = x11.winid_str(win_handle)
hlwm = HlwmBridge.INSTANCE
assert hlwm.attr.clients[winid].title() == title
# then, take the screenshot:
return x11.decoration_screenshot(win_handle)
@pytest.mark.parametrize("font", font_pool)
def test_title_every_letter_is_drawn(hlwm, x11, font):
"""the number of letters has some effect"""
font_color = (255, 0, 0) # a color available everywhere
hlwm.attr.theme.color = 'black'
hlwm.attr.theme.title_color = RawImage.rgb2string(font_color)
hlwm.attr.theme.title_height = 14
hlwm.attr.theme.padding_top = 4
hlwm.attr.theme.title_font = font
handle, _ = x11.create_client()
# set the window title to some word
count1 = screenshot_with_title(x11, handle, 'test').color_count(font_color)
# duplicate the word in the title
count2 = screenshot_with_title(x11, handle, 'test test').color_count(font_color)
# then the number of pixels of the font_color should have doubled:
assert count1 != 0
assert count1 * 2 == count2
@pytest.mark.parametrize("font", font_pool)
def test_title_different_letters_are_drawn(hlwm, x11, font):
"""changing letters changes the image"""
font_color = (255, 0, 0) # a color available everywhere
hlwm.attr.theme.color = 'black'
hlwm.attr.theme.title_color = RawImage.rgb2string(font_color)
hlwm.attr.theme.title_height = 14
hlwm.attr.theme.padding_top = 4
hlwm.attr.theme.title_font = font
handle, _ = x11.create_client()
# put some characters in the title that take only few pixels
count1 = screenshot_with_title(x11, handle, ',.b').color_count(font_color)
# alter characters to others taking more pixels
count2 = screenshot_with_title(x11, handle, ';:B').color_count(font_color)
# then the number of pixels should have increased
assert count1 < count2
@pytest.mark.parametrize("font", font_pool)
@pytest.mark.parametrize("ellipsis", [
'',
'...',
'…',
10 * 'a_very_long_string_that_takes_all_the_available_space',
])
def test_title_does_not_exceed_width(hlwm, x11, font, ellipsis):
font_color = (255, 0, 0) # a color available everywhere
bw = 30
hlwm.attr.theme.color = 'black'
hlwm.attr.theme.title_color = RawImage.rgb2string(font_color)
hlwm.attr.theme.title_height = 14
hlwm.attr.theme.padding_top = 0
hlwm.attr.theme.padding_left = 0
hlwm.attr.theme.padding_right = 0
hlwm.attr.theme.border_width = bw
hlwm.attr.theme.title_font = font
hlwm.attr.settings.ellipsis = ellipsis
handle, winid = x11.create_client()
# set a title that is too wide to be displayed in its entirety:
w = hlwm.attr.clients[winid].decoration_geometry().width
if font[0] != '-':
three_bytes_per_glyph = 'ヘールブストルフト'
assert len(three_bytes_per_glyph.encode('UTF-8')) == 3 * len(three_bytes_per_glyph)
# for xft fonts, also test utf8 window titles
utf8titles = [
w * '♥',
(w // 3) * 'äüöß',
(w // len(three_bytes_per_glyph)) * three_bytes_per_glyph,
]
else:
# for plain X fonts, it does not seem to work in tox/pytest
# (but strangely, it works in a manual Xephyr session)
utf8titles = []
for title in [w * '=', w * '|'] + utf8titles:
img = screenshot_with_title(x11, handle, title)
# verify that the title does not span too wide to the
# left or to the right:
# find leftmost non-black pixel:
leftmost_font_x = None
for x in range(0, w):
for y in range(0, 14): # only verify top `title_height`-many pixels
if img.pixel(x, y) != (0, 0, 0):
leftmost_font_x = x
break
if leftmost_font_x is not None:
break
# find rightmost non-black pixel:
rightmost_font_x = None
for x in range(w - 1, 0, -1):
for y in range(0, 14): # only verify top `title_height`-many pixels
if img.pixel(x, y) != (0, 0, 0):
rightmost_font_x = x
break
if rightmost_font_x is not None:
break
assert leftmost_font_x >= bw
assert rightmost_font_x < bw + hlwm.attr.clients[winid].content_geometry().width
@pytest.mark.parametrize("font", font_pool)
def test_title_ellipsis_is_used(hlwm, x11, font):
font_color = (255, 0, 0) # a color available everywhere
bw = 30
hlwm.attr.theme.color = 'black'
hlwm.attr.theme.title_color = RawImage.rgb2string(font_color)
hlwm.attr.theme.title_height = 14
hlwm.attr.theme.border_width = bw
hlwm.attr.theme.title_font = font
hlwm.attr.settings.ellipsis = 'abc'
handle, winid = x11.create_client()
assert screenshot_with_title(x11, handle, ' ').color_count(font_color) == 0
# set a title that is too wide to be displayed in its entirety:
w = hlwm.attr.clients[winid].decoration_geometry().width
count1 = screenshot_with_title(x11, handle, w * ' ').color_count(font_color)
assert count1 > 0
hlwm.attr.settings.ellipsis = 'abcabc'
count2 = screenshot_with_title(x11, handle, w * ' ').color_count(font_color)
assert count2 > 0
assert count2 == count1 * 2
@pytest.mark.parametrize("frame_bg_transparent", ['on', 'off'])
def test_frame_bg_transparent(hlwm, x11, frame_bg_transparent):
hlwm.attr.settings.show_frame_decorations = 'all'
hlwm.attr.settings.frame_gap = 24 # should not matter
hlwm.attr.settings.frame_border_width = 0
hlwm.attr.settings.frame_bg_active_color = '#ef0000'
hlwm.attr.settings.frame_bg_transparent = frame_bg_transparent
tw = 8
hlwm.attr.settings.frame_transparent_width = tw
[frame_win] = x11.get_hlwm_frames()
img = x11.screenshot(frame_win)
w = img.width
h = img.height
for x, y in [(2, 2), (4, 2), (2, 8), (3, 4), (7, 7), (w - 1, h - 1), (w - tw, h - tw)]:
assert img.pixel(x, y) == (0xef, 0, 0), \
f"pixel at {x}, {y}"
# if there is a hole in the frame decoration, it seems that black is used
# (either as a default value or because that's the color of the root window)
color_expected = (0, 0, 0) if frame_bg_transparent == 'on' else (0xef, 0, 0)
for x, y in [(tw, tw), (tw, tw + 2), (w - tw - 1, h - tw - 1), (50, h - tw - 1), (w // 2, h // 2)]:
assert img.pixel(x, y) == color_expected, \
f"pixel at {x}, {y}"
@pytest.mark.parametrize("frame_bg_transparent", ['on', 'off'])
def test_frame_holes_for_tiled_client(hlwm, x11, frame_bg_transparent):
hlwm.attr.settings.show_frame_decorations = 'all'
hlwm.attr.settings.frame_bg_active_color = '#efcd32'
hlwm.attr.settings.frame_bg_transparent = frame_bg_transparent
hlwm.attr.settings.frame_transparent_width = 8
def expect_frame_bg_color(winid, expected_color):
img = x11.screenshot(frame_win)
w = img.width
h = img.height
for x, y in [(0, 0), (0, h - 1), (w - 1, 0), (w - 1, h - 1)]:
assert img.pixel(x, y) == expected_color, \
f"pixel at {x}, {y}"
[frame_win] = x11.get_hlwm_frames()
expect_frame_bg_color(frame_win, (0xef, 0xcd, 0x32))
x11.create_client()
# one big tiled client should hide all of the frames bg color:
expect_frame_bg_color(frame_win, (0, 0, 0))
@pytest.mark.parametrize("frame_bg_transparent", ['on', 'off'])
def test_frame_holes_for_pseudotiled_client(hlwm, x11, frame_bg_transparent):
hlwm.attr.settings.show_frame_decorations = 'all'
bgcol = (0xef, 0xcd, 0x32)
hlwm.attr.settings.frame_bg_active_color = RawImage.rgb2string(bgcol)
hlwm.attr.settings.frame_bg_transparent = frame_bg_transparent
hlwm.attr.settings.frame_transparent_width = 8
[frame_win] = x11.get_hlwm_frames()
geo = frame_win.get_geometry()
w = geo.width
h = geo.height
# create a pseudotiled client that is very wide but not very high:
winhandle, winid = x11.create_client(geometry=(0, 0, w + 10, h // 3 - 10))
hlwm.attr.clients[winid].pseudotile = 'on'
img = x11.screenshot(frame_win)
assert (img.width, img.height) == (w, h)
# the frame is visible on the top and bottom
img.pixel(0, 0) == bgcol
img.pixel(0, h - 1) == bgcol
img.pixel(w // 2, 0) == bgcol
img.pixel(w // 2, h - 1) == bgcol
# but the frame is not visible on the left and right
black = (0, 0, 0)
img.pixel(0, h // 2) == black
img.pixel(w - 1, h // 2) == black
img.pixel(w // 2, h // 2) == black
@pytest.mark.parametrize("method", ['tab_*-attributes', 'other scheme'])
@pytest.mark.parametrize("running_clients_num", [3])
def test_decoration_tab_colors(hlwm, x11, method, running_clients, running_clients_num):
active_color = (200, 23, 0) # something unique
normal_color = (23, 200, 0) # something unique
hlwm.attr.theme.active.color = RawImage.rgb2string(active_color)
hlwm.attr.theme.active.title_color = RawImage.rgb2string(active_color)
if method == 'tab_*-attributes':
hlwm.attr.theme.active.tab_color = RawImage.rgb2string(normal_color)
hlwm.attr.theme.active.tab_title_color = RawImage.rgb2string(normal_color)
hlwm.attr.theme.active.tab_outer_color = RawImage.rgb2string(normal_color)
hlwm.attr.theme.active.tab_outer_width = 1
if method == 'other scheme':
hlwm.attr.theme.normal.color = RawImage.rgb2string(normal_color)
hlwm.attr.theme.normal.title_color = RawImage.rgb2string(normal_color)
hlwm.attr.theme.title_height = 20
hlwm.call(['set_layout', 'max'])
# split twice to make tab area smaller and screenshots faster :-)
hlwm.call(['split', 'bottom'])
hlwm.call(['split', 'bottom'])
winhandle = x11.window(hlwm.attr.clients.focus.winid())
img = x11.decoration_screenshot(winhandle)
color_count = img.color_count_dict()
# we have three tabs, and one of them should have the active color:
assert color_count[active_color] * (running_clients_num - 1) == color_count[normal_color]
# if we disable tabs, then the 'normal_color' should disappear:
hlwm.attr.settings.tabbed_max = False
winhandle = x11.window(hlwm.attr.clients.focus.winid())
img = x11.decoration_screenshot(winhandle)
new_color_count = img.color_count_dict()
assert normal_color not in new_color_count
assert new_color_count[active_color] == running_clients_num * color_count[active_color]
@pytest.mark.parametrize("running_clients_num", [3])
def test_decoration_tab_urgent(hlwm, x11, running_clients, running_clients_num):
active_color = (200, 23, 0) # something unique
normal_color = (23, 200, 0) # something unique
urgent_color = (17, 2, 189) # something unique
hlwm.attr.theme.active.color = RawImage.rgb2string(active_color)
hlwm.attr.theme.active.title_color = RawImage.rgb2string(active_color)
hlwm.attr.theme.normal.color = RawImage.rgb2string(normal_color)
hlwm.attr.theme.normal.title_color = RawImage.rgb2string(normal_color)
hlwm.attr.theme.urgent.color = RawImage.rgb2string(urgent_color)
hlwm.attr.theme.urgent.title_color = RawImage.rgb2string(urgent_color)
hlwm.attr.theme.title_height = 20
hlwm.call(['load', '(clients max:0 {})'.format(' '.join(running_clients))])
# split twice to make tab area smaller and screenshots faster :-)
hlwm.call(['split', 'bottom'])
hlwm.call(['split', 'bottom'])
winhandle = x11.window(running_clients[0])
img = x11.decoration_screenshot(winhandle)
color_count = img.color_count_dict()
assert urgent_color not in color_count
# make one of the unfocused tabs urgent
x11.make_window_urgent(x11.window(running_clients[2]))
img = x11.decoration_screenshot(winhandle)
new_color_count = img.color_count_dict()
assert urgent_color in new_color_count
# there is one 'normal' and one 'urgent' tab, so the colors should
# appear similarly often:
assert new_color_count[urgent_color] == new_color_count[normal_color]
assert new_color_count[normal_color] == color_count[normal_color] / 2
def test_decoration_tab_title_update(hlwm, x11):
text_color = (212, 189, 140)
hlwm.attr.theme.title_color = RawImage.rgb2string(text_color)
hlwm.attr.theme.title_height = 20
hlwm.call(['set_layout', 'max'])
# split twice to make tab area smaller and screenshots faster :-)
hlwm.call(['split', 'bottom'])
hlwm.call(['split', 'bottom'])
count = 5
win_handles = [x11.create_client()[0] for _ in range(0, count)]
# empty all window titles:
for wh in win_handles:
x11.set_window_title(wh, '')
# focus handle 0:
hlwm.call(['jumpto', x11.winid_str(win_handles[0])])
# take a screenshot, it should not contain the text_color:
assert x11.decoration_screenshot(win_handles[0]).color_count(text_color) == 0
# change the title of an unfocused window:
x11.set_window_title(win_handles[2], 'SOMETHING')
# this change should now be visible in the tab bar, at least 5 pixels
# should have this color now:
assert x11.decoration_screenshot(win_handles[0]).color_count(text_color) > 5
@pytest.mark.parametrize("running_clients_num", [4])
def test_decoration_click_changes_tab(hlwm, mouse, running_clients, running_clients_num):
hlwm.call(['load', '(clients max:0 {})'.format(' '.join(running_clients))])
hlwm.attr.settings.tabbed_max = True
hlwm.attr.theme.title_height = 10
geo = hlwm.attr.clients.focus.decoration_geometry()
tabbar_top_left = geo.topleft()
tabbar_bottom_right = geo.topleft() + Point(geo.width, int(hlwm.attr.theme.title_height()))
for idx in reversed(range(0, running_clients_num)):
# pick a point between top left corner of the title bar
# and the bottom right corner of the title bar:
# the extra 0.5 makes that we click in the middle of the tab
ratio = (idx + 0.5) / running_clients_num
cursor = tabbar_top_left * (1 - ratio) + tabbar_bottom_right * ratio
mouse.move_to(cursor.x, cursor.y)
mouse.click('1')
assert hlwm.attr.clients.focus.winid() == running_clients[idx]
def test_decoration_click_into_window_does_not_change_tab(hlwm, mouse):
wins = hlwm.create_clients(2)
hlwm.call(['load', '(clients max:1 {})'.format(' '.join(wins))])
hlwm.attr.settings.tabbed_max = True
hlwm.attr.theme.title_height = 10
assert hlwm.attr.clients.focus.winid() == wins[1]
# move into the window and click:
mouse.move_into(wins[0], x=4, y=4)
mouse.click('1')
# this does not change the focus:
assert hlwm.attr.clients.focus.winid() == wins[1]
# to double check:
# if the cursor was 8px further up, the click
# however would change the tab
mouse.move_relative(0, -8)
mouse.click('1')
assert hlwm.attr.clients.focus.winid() == wins[0]
def test_textalign_completion(hlwm):
"""Test the TextAlign converter"""
assert hlwm.complete(['attr', 'theme.title_align']) \
== sorted(['left', 'right', 'center'])
for k in hlwm.complete(['attr', 'theme.title_align']):
hlwm.attr.theme.title_align = k
assert hlwm.attr.theme.title_align() == k
def test_title_position_remains(hlwm, x11):
active_color = (212, 189, 140)
normal_color = (221, 198, 104)
hlwm.attr.theme.active.title_color = RawImage.rgb2string(active_color)
hlwm.attr.theme.normal.title_color = RawImage.rgb2string(normal_color)
hlwm.attr.settings.tabbed_max = True
hlwm.attr.theme.title_height = 10
hlwm.attr.theme.outer_width = 3
hlwm.attr.tags.focus.tiling.focused_frame.algorithm = 'max'
handle1, win1 = x11.create_client()
x11.set_window_title(handle1, 'client 1')
handle2, win2 = x11.create_client()
x11.set_window_title(handle1, 'client 2')
for align in ['left', 'center', 'right']:
hlwm.attr.theme.title_align = align
hlwm.call(['jumpto', win1])
focus1 = x11.decoration_screenshot(handle1)
hlwm.call(['jumpto', win2])
focus2 = x11.decoration_screenshot(handle2)
assert focus1.height == focus2.height
assert focus1.width == focus2.width
titlebar_height = 10
for x, y in itertools.product(range(0, focus1.width), range(0, titlebar_height)):
assert (focus1.pixel(x, y) == active_color) == (focus2.pixel(x, y) == normal_color), \
f'mismatch at pixel ({x}, {y})'
assert (focus1.pixel(x, y) == normal_color) == (focus2.pixel(x, y) == active_color), \
f'mismatch at pixel ({x}, {y})'
@pytest.mark.parametrize("client_count", [1, 2])
def test_decoration_title_align(hlwm, x11, client_count):
"""test the title_align attribute,
by computing the 'average' position of the title
"""
text_color = (212, 189, 140)
hlwm.attr.theme.title_color = RawImage.rgb2string(text_color)
hlwm.attr.settings.tabbed_max = True
hlwm.attr.theme.title_height = 10
win_handle, winid = x11.create_client()
hlwm.attr.tags.focus.tiling.focused_frame.algorithm = 'max'
while hlwm.attr.tags.focus.client_count() < client_count:
x11.create_client()
assert hlwm.attr.clients.focus.winid() == winid
x11.set_window_title(win_handle, '-')
# compute the 'average' title position
align_to_title_pos = {}
for align in ['left', 'right', 'center']:
hlwm.attr.theme.title_align = align
img = x11.decoration_screenshot(win_handle)
point_sum = Point(0, 0)
point_count = 0
for x, y in itertools.product(range(0, img.width), range(0, img.height)):
if img.pixel(x, y) == text_color:
point_sum.x += x
point_sum.y += y
point_count += 1
# compute the average point:
align_to_title_pos[align] = point_sum // point_count
# all titles should be on the same height:
assert align_to_title_pos['left'].y == align_to_title_pos['center'].y
assert align_to_title_pos['center'].y == align_to_title_pos['right'].y
# the x coordinate should be different by at least this:
# the width of the decoration, divided by the number of tabs
# and divided by roughly 3 :-)
x_diff = hlwm.attr.clients.focus.decoration_geometry().width / client_count / 3
assert align_to_title_pos['left'].x + x_diff < align_to_title_pos['center'].x
assert align_to_title_pos['center'].x + x_diff < align_to_title_pos['right'].x
|
76c5339a4e98607ecb7a846e580809fb704ec45b
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/homeassistant/components/nuki/const.py
|
680454c3edc7e4c494559d0fdcbba33fe99e7e7f
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 400
|
py
|
const.py
|
"""Constants for Nuki."""
DOMAIN = "nuki"
# Attributes
ATTR_BATTERY_CRITICAL = "battery_critical"
ATTR_NUKI_ID = "nuki_id"
ATTR_ENABLE = "enable"
ATTR_UNLATCH = "unlatch"
# Data
DATA_BRIDGE = "nuki_bridge_data"
DATA_LOCKS = "nuki_locks_data"
DATA_OPENERS = "nuki_openers_data"
DATA_COORDINATOR = "nuki_coordinator"
# Defaults
DEFAULT_PORT = 8080
DEFAULT_TIMEOUT = 20
ERROR_STATES = (0, 254, 255)
|
29d3095e2dab22bfad7ebf2397fda24893b5fa5c
|
3f5f778f973e229037007b1eb00e5171cbe2560e
|
/abcpy/continuousmodels.py
|
baa9e98a9b7733848d69a13a8db78e0e2ff54540
|
[
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] |
permissive
|
eth-cscs/abcpy
|
9f568b66f66ebc0b835c837dac481c9c2ef199fe
|
caf0fd899424da69c0ef0bcd499696c5a077cdb1
|
refs/heads/master
| 2023-03-16T12:34:34.650734
| 2023-03-13T16:07:26
| 2023-03-13T16:07:26
| 79,544,398
| 107
| 43
|
BSD-3-Clause-Clear
| 2023-08-16T11:04:51
| 2017-01-20T09:15:25
|
Python
|
UTF-8
|
Python
| false
| false
| 28,289
|
py
|
continuousmodels.py
|
import numpy as np
from scipy.special import gamma
from scipy.stats import multivariate_normal, norm, lognorm, expon
from abcpy.probabilisticmodels import ProbabilisticModel, Continuous, InputConnector
class Uniform(ProbabilisticModel, Continuous):
def __init__(self, parameters, name='Uniform'):
"""
This class implements a probabilistic model following an uniform distribution.
Parameters
----------
parameters: list
Contains two lists. The first list specifies the probabilistic models and hyperparameters from which the
lower bound of the uniform distribution derive. The second list specifies the probabilistic models and
hyperparameters from which the upper bound derives.
name: string, optional
The name that should be given to the probabilistic model in the journal file.
"""
if not isinstance(parameters, list):
raise TypeError('Input for Uniform has to be of type list.')
if len(parameters) < 2:
raise ValueError('Input for Uniform has to be of length 2.')
if not isinstance(parameters[0], list):
raise TypeError('Each boundary for Uniform has to be of type list.')
if not isinstance(parameters[1], list):
raise TypeError('Each boundary for Uniform has to be of type list.')
if len(parameters[0]) != len(parameters[1]):
raise ValueError('Length of upper and lower bound have to be equal.')
self._dimension = len(parameters[0])
input_parameters = InputConnector.from_list(parameters)
super(Uniform, self).__init__(input_parameters, name)
self.visited = False
def _check_input(self, input_values):
"""
Checks parameter values sampled from the parents.
"""
if len(input_values) % 2 != 0:
return False
# test whether lower bound is not greater than upper bound
for j in range(self.get_output_dimension()):
if input_values[j] > input_values[j + self.get_output_dimension()]:
return False
return True
def _check_output(self, parameters):
"""
Checks parameter values given as fixed values. Returns False iff a lower bound value is larger than a
corresponding upper bound value.
"""
for i in range(self.get_output_dimension()):
lower_value = self.get_input_connector()[i]
upper_value = self.get_input_connector()[i + self.get_output_dimension()]
if parameters[i] < lower_value or parameters[i] > upper_value:
return False
return True
def forward_simulate(self, input_values, k, rng=np.random.RandomState(), mpi_comm=None):
"""
Samples from a uniform distribution using the current values for each probabilistic model from which the model derives.
Parameters
----------
input_values: list
List of input parameters, in the same order as specified in the InputConnector passed to the init function
k: integer
The number of samples that should be drawn.
rng: Random number generator
Defines the random number generator to be used. The default value uses a random seed to initialize the generator.
Returns
-------
list: [np.ndarray]
A list containing the sampled values as np-array.
"""
samples = np.zeros(shape=(k, self.get_output_dimension()))
for j in range(0, self.get_output_dimension()):
samples[:, j] = rng.uniform(input_values[j], input_values[j + self.get_output_dimension()], k)
return [np.array(x).reshape(-1, ) for x in samples]
def get_output_dimension(self):
return self._dimension
def pdf(self, input_values, x):
"""
Calculates the probability density function at point x.
Commonly used to determine whether perturbed parameters are still valid according to the pdf.
Parameters
----------
input_values: list
List of input parameters, in the same order as specified in the InputConnector passed to the init function
x: list
The point at which the pdf should be evaluated.
Returns
-------
Float:
The evaluated pdf at point x.
"""
lower_bound = input_values[:self.get_output_dimension()]
upper_bound = input_values[self.get_output_dimension():]
if np.product(np.greater_equal(x, np.array(lower_bound)) * np.less_equal(x, np.array(upper_bound))):
pdf_value = 1. / np.product(np.array(upper_bound) - np.array(lower_bound))
else:
pdf_value = 0.
self.calculated_pdf = pdf_value
return pdf_value
class Normal(ProbabilisticModel, Continuous):
def __init__(self, parameters, name='Normal'):
"""
This class implements a probabilistic model following a normal distribution with mean mu and variance sigma.
Parameters
----------
parameters: list
Contains the probabilistic models and hyperparameters from which the model derives.
The list has two entries: from the first entry mean of the distribution and from the second entry variance is derived.
Note that the second value of the list is strictly greater than 0.
name: string
The name that should be given to the probabilistic model in the journal file.
"""
if not isinstance(parameters, list):
raise TypeError('Input for Normal has to be of type list.')
if len(parameters) < 2:
raise ValueError('Input for Normal has to be of length 2.')
input_parameters = InputConnector.from_list(parameters)
super(Normal, self).__init__(input_parameters, name)
self.visited = False
def _check_input(self, input_values):
"""
Returns True if the standard deviation is negative.
"""
if len(input_values) != 2:
return False
if input_values[1] <= 0:
return False
return True
def _check_output(self, parameters):
"""
Checks parameter values that are given as fixed values.
"""
return True
def forward_simulate(self, input_values, k, rng=np.random.RandomState(), mpi_comm=None):
"""
Samples from a normal distribution using the current values for each probabilistic model from which the model derives.
Parameters
----------
input_values: list
List of input parameters, in the same order as specified in the InputConnector passed to the init function
k: integer
The number of samples that should be drawn.
rng: Random number generator
Defines the random number generator to be used. The default value uses a random seed to initialize the generator.
Returns
-------
list: [np.ndarray]
A list containing the sampled values as np-array.
"""
mu = input_values[0]
sigma = input_values[1]
result = np.array(rng.normal(mu, sigma, k))
return [np.array([x]).reshape(-1, ) for x in result]
def get_output_dimension(self):
return 1
# Why does the following not work here?
# return self._dimension
def pdf(self, input_values, x):
"""
Calculates the probability density function at point x.
Commonly used to determine whether perturbed parameters are still valid according to the pdf.
Parameters
----------
input_values: list
List of input parameters of the from [mu, sigma]
x: list
The point at which the pdf should be evaluated.
Returns
-------
Float:
The evaluated pdf at point x.
"""
mu = input_values[0]
sigma = input_values[1]
pdf = norm(mu, sigma).pdf(x)
self.calculated_pdf = pdf
return pdf
class StudentT(ProbabilisticModel, Continuous):
def __init__(self, parameters, name='StudentT'):
"""
This class implements a probabilistic model following the Student's T-distribution.
Parameters
----------
parameters: list
Contains the probabilistic models and hyperparameters from which the model derives.
The list has two entries: from the first entry mean of the distribution and from the second entry degrees of freedom is derived.
Note that the second value of the list is strictly greater than 0.
name: string
The name that should be given to the probabilistic model in the journal file.
"""
if not isinstance(parameters, list):
raise TypeError('Input for StudentT has to be of type list.')
if len(parameters) < 2:
raise ValueError('Input for StudentT has to be of length 2.')
input_parameters = InputConnector.from_list(parameters)
super(StudentT, self).__init__(input_parameters, name)
self.visited = False
def forward_simulate(self, input_values, k, rng=np.random.RandomState(), mpi_comm=None):
"""
Samples from a Student's T-distribution using the current values for each probabilistic model from which the model derives.
Parameters
----------
input_values: list
List of input parameters, in the same order as specified in the InputConnector passed to the init function
k: integer
The number of samples that should be drawn.
rng: Random number generator
Defines the random number generator to be used. The default value uses a random seed to initialize the generator.
Returns
-------
list: [np.ndarray]
A list containing the sampled values as np-array.
"""
mean = input_values[0]
df = input_values[1]
result = np.array((rng.standard_t(df, k) + mean))
return [np.array([x]).reshape(-1, ) for x in result]
def _check_input(self, input_values):
"""
Checks parameter values sampled from the parents of the probabilistic model. Returns False iff the degrees of freedom are smaller than or equal to 0.
"""
if len(input_values) != 2:
return False
if input_values[1] <= 0:
return False
return True
def _check_output(self, parameters):
"""
Checks parameter values given as fixed values.
"""
return True
def get_output_dimension(self):
return 1
# Why does the following not work here?
# return self._dimension
def pdf(self, input_values, x):
"""
Calculates the probability density function at point x.
Commonly used to determine whether perturbed parameters are still valid according to the pdf.
Parameters
----------
input_values: list
List of input parameters
x: list
The point at which the pdf should be evaluated.
Returns
-------
Float:
The evaluated pdf at point x.
"""
df = input_values[1]
x -= input_values[0] # divide by std dev if we include that
pdf = gamma((df + 1) / 2) / (np.sqrt(df * np.pi) * gamma(df / 2) * (1 + x ** 2 / df) ** ((df + 1) / 2))
self.calculated_pdf = pdf
return pdf
class MultivariateNormal(ProbabilisticModel, Continuous):
def __init__(self, parameters, name='Multivariate Normal'):
"""
This class implements a probabilistic model following a multivariate normal distribution with mean and
covariance matrix.
Parameters
----------
parameters: list of at length 2
Contains the probabilistic models and hyperparameters from which the model derives. The first entry defines
the mean, while the second entry defines the Covariance matrix. Note that if the mean is n dimensional, the
covariance matrix is required to be of dimension nxn, symmetric and
positive-definite.
name: string
The name that should be given to the probabilistic model in the journal file.
"""
# convert user input to InputConnector object
if not isinstance(parameters, list):
raise TypeError('Input for Multivariate Normal has to be of type list.')
if len(parameters) < 2:
raise ValueError('Input for Multivariate Normal has to be of length 2.')
mean = parameters[0]
if isinstance(mean, list):
self._dimension = len(mean)
elif isinstance(mean, ProbabilisticModel):
self._dimension = mean.get_output_dimension()
input_parameters = InputConnector.from_list(parameters)
super(MultivariateNormal, self).__init__(input_parameters, name)
self.visited = False
def _check_input(self, input_values):
"""
Checks parameter values sampled from the parents at initialization. Returns False iff the covariance matrix is
not symmetric or not positive definite.
"""
# Test whether input in compatible
dim = self._dimension
param_ctn = len(input_values)
if param_ctn != dim + dim ** 2:
return False
cov = np.array(input_values[dim:dim + dim ** 2]).reshape((dim, dim))
# Check whether the covariance matrix is symmetric
if not np.allclose(cov, cov.T, atol=1e-3):
return False
# Check whether the covariance matrix is positive definite
try:
is_pos = np.linalg.cholesky(cov)
except np.linalg.LinAlgError:
return False
return True
def _check_output(self, parameters):
"""
Checks parameter values that are given as fixed values.
"""
return True
def forward_simulate(self, input_values, k, rng=np.random.RandomState(), mpi_comm=None):
"""
Samples from a multivariate normal distribution using the current values for each probabilistic model from which the
model derives.
Parameters
----------
input_values: list
List of input parameters, in the same order as specified in the InputConnector passed to the init function
k: integer
The number of samples that should be drawn.
rng: Random number generator
Defines the random number generator to be used. The default value uses a random seed to initialize the generator.
Returns
-------
list: [np.ndarray]
A list containing the sampled values as np-array.
"""
dim = self.get_output_dimension()
mean = np.array(input_values[0:dim])
cov = np.array(input_values[dim:dim + dim ** 2]).reshape((dim, dim))
result = rng.multivariate_normal(mean, cov, k)
return [np.array([result[i, :]]).reshape(-1, ) for i in range(k)]
def get_output_dimension(self):
return self._dimension
def pdf(self, input_values, x):
"""
Calculates the probability density function at point x. Commonly used to determine whether perturbed parameters
are still valid according to the pdf.
Parameters
----------
input_values: list
List of input parameters
x: list
The point at which the pdf should be evaluated.
Returns
-------
Float:
The evaluated pdf at point x.
"""
dim = self._dimension
# Extract parameters
mean = np.array(input_values[0:dim])
cov = np.array(input_values[dim:dim + dim ** 2]).reshape((dim, dim))
pdf = multivariate_normal(mean, cov).pdf(x)
self.calculated_pdf = pdf
return pdf
class MultiStudentT(ProbabilisticModel, Continuous):
def __init__(self, parameters, name='MultiStudentT'):
"""
This class implements a probabilistic model following the multivariate Student-T distribution.
Parameters
----------
parameters: list
All but the last two entries contain the probabilistic models and hyperparameters from which the model
derives. The second to last entry contains the covariance matrix. If the mean is of dimension n, the
covariance matrix is required to be nxn dimensional. The last entry contains the degrees of freedom.
name: string
The name that should be given to the probabilistic model in the journal file.
"""
if not isinstance(parameters, list):
raise TypeError('Input for Multivariate StudentT has to be of type list.')
if len(parameters) < 3:
raise ValueError('Input for Multivariate Student T has to be of length 3.')
if not isinstance(parameters[0], list):
raise TypeError('Input for mean of Multivariate Student T has to be of type list.')
if not isinstance(parameters[1], list):
raise TypeError('Input for covariance of Multivariate Student T has to be of type list.')
mean = parameters[0]
if isinstance(mean, list):
self._dimension = len(mean)
input_parameters = InputConnector.from_list(parameters)
elif isinstance(mean, ProbabilisticModel):
self._dimension = mean.get_output_dimension()
input_parameters = parameters
super(MultiStudentT, self).__init__(input_parameters, name)
self.visited = False
def _check_input(self, input_values):
"""
Returns False iff the degrees of freedom are less than or equal to 0, the covariance matrix is not symmetric or
the covariance matrix is not positive definite.
"""
dim = self._dimension
param_ctn = len(input_values)
if param_ctn > dim + dim ** 2 + 1 or param_ctn < dim + dim ** 2 + 1:
return False
# Extract parameters
mean = np.array(input_values[0:dim])
cov = np.array(input_values[dim:dim + dim ** 2]).reshape((dim, dim))
df = input_values[-1]
# Check whether the covariance matrix is symmetric
if not np.allclose(cov, cov.T, atol=1e-3):
return False
# Check whether the covariance matrix is positive definite
try:
is_pos = np.linalg.cholesky(cov)
except np.linalg.LinAlgError:
return False
# Check whether the degrees of freedom are <=0
if df <= 0:
return False
return True
def _check_output(self, parameters):
"""
Checks parameter values given as fixed values.
"""
return True
def forward_simulate(self, input_values, k, rng=np.random.RandomState(), mpi_comm=None):
"""
Samples from a multivariate Student's T-distribution using the current values for each probabilistic model from
which the model derives.
Parameters
----------
input_values: list
List of input parameters, in the same order as specified in the InputConnector passed to the init function
k: integer
The number of samples that should be drawn.
rng: Random number generator
Defines the random number generator to be used. The default value uses a random seed to initialize the
generator.
Returns
-------
list: [np.ndarray]
A list containing the sampled values as np-array.
"""
# Extract input_parameters
dim = self.get_output_dimension()
mean = np.array(input_values[0:dim])
cov = np.array(input_values[dim:dim + dim ** 2]).reshape((dim, dim))
df = input_values[-1]
if df == np.inf:
chisq = 1.0
else:
chisq = rng.chisquare(df, k) / df
chisq = chisq.reshape(-1, 1).repeat(dim, axis=1)
mvn = rng.multivariate_normal(np.zeros(dim), cov, k)
result = (mean + np.divide(mvn, np.sqrt(chisq)))
return [np.array([result[i, :]]).reshape(-1, ) for i in range(k)]
def get_output_dimension(self):
return self._dimension
def pdf(self, input_values, x):
"""
Calculates the probability density function at point x.
Commonly used to determine whether perturbed parameters are still valid according to the pdf.
Parameters
----------
input_values: list
List of input parameters
x: list
The point at which the pdf should be evaluated.
Returns
-------
Float:
The evaluated pdf at point x.
"""
dim = self.get_output_dimension()
# Extract parameters
mean = np.array(input_values[0:dim])
cov = np.array(input_values[dim:dim + dim ** 2]).reshape((dim, dim))
df = input_values[-1]
p = len(mean)
numerator = gamma((df + p) / 2)
denominator = gamma(df / 2) * pow(df * np.pi, p / 2.) * np.sqrt(abs(np.linalg.det(cov)))
normalizing_const = numerator / denominator
tmp = 1 + 1 / df * np.dot(np.dot(np.transpose(x - mean), np.linalg.inv(cov)), (x - mean))
density = normalizing_const * pow(tmp, -((df + p) / 2.))
self.calculated_pdf = density
return density
class LogNormal(ProbabilisticModel, Continuous):
def __init__(self, parameters, name='LogNormal'):
"""
This class implements a probabilistic model following a Lognormal distribution with mean mu and variance sigma.
Parameters
----------
parameters: list
Contains the probabilistic models and hyperparameters from which the model derives.
The list has two entries: from the first entry mean of the underlying normal distribution and from the second entry variance of the underlying normal
distribution is derived.
Note that the second value of the list is strictly greater than 0.
name: string
The name that should be given to the probabilistic model in the journal file.
"""
if not isinstance(parameters, list):
raise TypeError('Input for LogNormal has to be of type list.')
if len(parameters) < 2:
raise ValueError('Input for LogNormal has to be of length 2.')
input_parameters = InputConnector.from_list(parameters)
super(LogNormal, self).__init__(input_parameters, name)
self.visited = False
def _check_input(self, input_values):
"""
Returns True if the standard deviation is negative.
"""
if len(input_values) != 2:
return False
if input_values[1] <= 0:
return False
return True
def _check_output(self, parameters):
"""
Checks parameter values that are given as fixed values.
"""
return True
def forward_simulate(self, input_values, k, rng=np.random.RandomState(), mpi_comm=None):
"""
Samples from a normal distribution using the current values for each probabilistic model from which the model derives.
Parameters
----------
input_values: list
List of input parameters, in the same order as specified in the InputConnector passed to the init function
k: integer
The number of samples that should be drawn.
rng: Random number generator
Defines the random number generator to be used. The default value uses a random seed to initialize the generator.
Returns
-------
list: [np.ndarray]
A list containing the sampled values as np-array.
"""
mu = input_values[0]
sigma = input_values[1]
result = np.array(rng.lognormal(mu, sigma, k))
return [np.array([x]).reshape(-1, ) for x in result]
def get_output_dimension(self):
return 1
# Why does the following not work here?
# return self._dimension
def pdf(self, input_values, x):
"""
Calculates the probability density function at point x.
Commonly used to determine whether perturbed parameters are still valid according to the pdf.
Parameters
----------
input_values: list
List of input parameters of the from [mu, sigma]
x: list
The point at which the pdf should be evaluated.
Returns
-------
Float:
The evaluated pdf at point x.
"""
mu = input_values[0]
sigma = input_values[1]
pdf = lognorm(scale=np.exp(mu), s=sigma).pdf(x)
self.calculated_pdf = pdf
return pdf
class Exponential(ProbabilisticModel, Continuous):
def __init__(self, parameters, name='Exponential'):
"""
This class implements a probabilistic model following a normal distribution with mean mu and variance sigma.
Parameters
----------
parameters: list
Contains the probabilistic models and hyperparameters from which the model derives.
The list has one entry: the rate :math:`\lambda` of the exponential distribution, that has therefore pdf:
:math:`f(x; \lambda) = \lambda \exp(-\lambda x )`
name: string
The name that should be given to the probabilistic model in the journal file.
"""
if not isinstance(parameters, list):
raise TypeError('Input for Exponential has to be of type list.')
if len(parameters) != 1:
raise ValueError('Input for Exponential has to be of length 1.')
input_parameters = InputConnector.from_list(parameters)
super(Exponential, self).__init__(input_parameters, name)
self.visited = False
def _check_input(self, input_values):
"""
Returns True if the standard deviation is negative.
"""
if len(input_values) != 1:
return False
if input_values[0] <= 0:
return False
return True
def _check_output(self, parameters):
"""
Checks parameter values that are given as fixed values.
"""
return True
def forward_simulate(self, input_values, k, rng=np.random.RandomState(), mpi_comm=None):
"""
Samples from a normal distribution using the current values for each probabilistic model from which the model derives.
Parameters
----------
input_values: list
List of input parameters, in the same order as specified in the InputConnector passed to the init function
k: integer
The number of samples that should be drawn.
rng: Random number generator
Defines the random number generator to be used. The default value uses a random seed to initialize the generator.
Returns
-------
list: [np.ndarray]
A list containing the sampled values as np-array.
"""
rate = input_values[0]
scale = 1 / rate
result = np.array(rng.exponential(scale, k))
return [np.array([x]).reshape(-1, ) for x in result]
def get_output_dimension(self):
return 1
# Why does the following not work here?
# return self._dimension
def pdf(self, input_values, x):
"""
Calculates the probability density function at point x.
Commonly used to determine whether perturbed parameters are still valid according to the pdf.
Parameters
----------
input_values: list
List of input parameters of the from [rate]
x: list
The point at which the pdf should be evaluated.
Returns
-------
Float:
The evaluated pdf at point x.
"""
rate = input_values[0]
scale = 1 / rate
pdf = expon(scale=scale).pdf(x)
self.calculated_pdf = pdf
return pdf
|
6b3b88de43e4375aa0e13ff135b44cbe70cd1fff
|
db85e6659af346ceaffa328c3b790fa77c513d6f
|
/arxiv_vanity/papers/migrations/0017_auto_20180619_1657.py
|
01d79e38fdece857707c6377bdbf5724cdc47cc2
|
[
"Apache-2.0"
] |
permissive
|
arxiv-vanity/arxiv-vanity
|
d51ca32eeed22853c07eb2c81b5d5ac4649241fc
|
f7eb2f19ee2c3120a3084cfce2ba96fb766fbf8e
|
refs/heads/master
| 2023-07-31T14:39:04.566729
| 2022-01-18T22:55:31
| 2022-01-18T22:55:42
| 100,118,170
| 1,567
| 105
|
Apache-2.0
| 2023-02-13T05:30:09
| 2017-08-12T14:41:41
|
Python
|
UTF-8
|
Python
| false
| false
| 499
|
py
|
0017_auto_20180619_1657.py
|
# Generated by Django 2.0.6 on 2018-06-19 16:57
from django.db import migrations
def generate_arxiv_ids(apps, schema_editor):
SourceFile = apps.get_model("papers", "SourceFile")
for sf in SourceFile.objects.iterator():
if not sf.arxiv_id:
sf.arxiv_id = sf.file.name.rsplit(".", 1)[0]
sf.save()
class Migration(migrations.Migration):
dependencies = [("papers", "0016_auto_20180619_1655")]
operations = [migrations.RunPython(generate_arxiv_ids)]
|
89268c705d2ca9800e0c1d64350b3fdfb5754438
|
532ad1aedff8528b2e8af4e4e752f32d58b92b0d
|
/doc/compile/modes_solution_1.py
|
1bcf05da45a681f274168c5dae8d29126daaec1b
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
aesara-devs/aesara
|
ebaa204159d4ddb94ede10580c5b8e39d114713f
|
b5a3cf45f0f6762bb4bb0e2c657f7d3822c74595
|
refs/heads/main
| 2023-08-09T10:56:56.528283
| 2023-07-15T06:15:49
| 2023-07-15T13:28:29
| 221,231,590
| 861
| 142
|
NOASSERTION
| 2023-09-05T03:16:16
| 2019-11-12T14:02:08
|
Python
|
UTF-8
|
Python
| false
| false
| 1,881
|
py
|
modes_solution_1.py
|
#!/usr/bin/env python
# Aesara tutorial
# Solution to Exercise in section 'Configuration Settings and Compiling Modes'
import numpy as np
import aesara
import aesara.tensor as at
aesara.config.floatX = "float32"
rng = np.random.default_rng(428)
N = 400
feats = 784
D = (
rng.standard_normal((N, feats)).astype(aesara.config.floatX),
rng.integers(size=N, low=0, high=2).astype(aesara.config.floatX),
)
training_steps = 10000
# Declare Aesara symbolic variables
x = at.matrix("x")
y = at.vector("y")
w = aesara.shared(rng.standard_normal(feats).astype(aesara.config.floatX), name="w")
b = aesara.shared(np.asarray(0.0, dtype=aesara.config.floatX), name="b")
x.tag.test_value = D[0]
y.tag.test_value = D[1]
# print "Initial model:"
# print w.get_value(), b.get_value()
# Construct Aesara expression graph
p_1 = 1 / (1 + at.exp(-at.dot(x, w) - b)) # Probability of having a one
prediction = p_1 > 0.5 # The prediction that is done: 0 or 1
xent = -y * at.log(p_1) - (1 - y) * at.log(1 - p_1) # Cross-entropy
cost = at.cast(xent.mean(), "float32") + 0.01 * (w**2).sum() # The cost to optimize
gw, gb = at.grad(cost, [w, b])
# Compile expressions to functions
train = aesara.function(
inputs=[x, y],
outputs=[prediction, xent],
updates={w: w - 0.01 * gw, b: b - 0.01 * gb},
name="train",
)
predict = aesara.function(inputs=[x], outputs=prediction, name="predict")
if any(
x.op.__class__.__name__ in ("Gemv", "CGemv", "Gemm", "CGemm")
for x in train.maker.fgraph.toposort()
):
print("Used the cpu")
else:
print("ERROR, not able to tell if aesara used the cpu or another device")
print(train.maker.fgraph.toposort())
for i in range(training_steps):
pred, err = train(D[0], D[1])
# print "Final model:"
# print w.get_value(), b.get_value()
print("target values for D")
print(D[1])
print("prediction on D")
print(predict(D[0]))
|
59ed74abf1f3e36d7f27af291db5dc04e6d7c37b
|
7d90019c8f480a4dd65202a901b37dae1c1f6064
|
/vctk_preprocess/prepare_vctk_labels.py
|
c715f31413b29cd89cb81ca91b11a15aea7e9a02
|
[
"MIT"
] |
permissive
|
r9y9/deepvoice3_pytorch
|
d3e85f54d46e809f6fffc0d619e0b4a9d1b13488
|
f90255c96177c344cd18b5a52651b420a4d8062d
|
refs/heads/master
| 2023-08-23T08:00:32.174896
| 2023-06-29T18:32:26
| 2023-06-29T18:32:26
| 108,992,863
| 1,964
| 511
|
NOASSERTION
| 2023-08-11T16:51:15
| 2017-10-31T12:31:44
|
Python
|
UTF-8
|
Python
| false
| false
| 1,412
|
py
|
prepare_vctk_labels.py
|
# coding: utf-8
"""
Prepare HTS alignments for VCTK.
usage: prepare_vctk_labels.py [options] <data_root> <out_dir>
options:
-h, --help Show help message.
"""
from docopt import docopt
import os
from nnmnkwii.datasets import vctk
from os.path import join, exists, splitext, basename
import sys
from glob import glob
from subprocess import Popen, PIPE
from tqdm import tqdm
def do(cmd):
print(cmd)
p = Popen(cmd, shell=True)
p.wait()
if __name__ == "__main__":
args = docopt(__doc__)
data_root = args["<data_root>"]
out_dir = args["<out_dir>"]
for idx in tqdm(range(len(vctk.available_speakers))):
speaker = vctk.available_speakers[idx]
wav_root = join(data_root, "wav48/p{}".format(speaker))
txt_root = join(data_root, "txt/p{}".format(speaker))
assert exists(wav_root)
assert exists(txt_root)
print(wav_root, txt_root)
# Do alignments
cmd = "python ./extract_feats.py -w {} -t {}".format(wav_root, txt_root)
do(cmd)
# Copy
lab_dir = join(out_dir, "p{}".format(speaker))
if not exists(lab_dir):
os.makedirs(lab_dir)
cmd = "cp ./latest_features/merlin/misc/scripts/alignment/phone_align/full-context-labels/mono/*.lab {}".format(
lab_dir)
do(cmd)
# Remove
do("rm -rf ./latest_features")
sys.exit(0)
|
1660c01b8404107d1d542b4421cc1eb48633599c
|
8eccea9f715a2a0ce602f1944ed3e812adcacb4d
|
/tests/api/v2/test_knowledge.py
|
70ccbf4cb5498931247e26cb6a9786df50cf4a43
|
[
"Apache-2.0"
] |
permissive
|
mitre/caldera
|
c466cde25bb0191880984cfdf3af84efc8a7c9f4
|
3140411d4b96d8d5607b2b50476f7bf3d506de00
|
refs/heads/master
| 2023-08-23T02:14:23.360314
| 2023-08-21T18:55:29
| 2023-08-21T18:55:29
| 112,409,981
| 4,685
| 1,046
|
Apache-2.0
| 2023-09-13T16:36:05
| 2017-11-29T01:25:10
|
Python
|
UTF-8
|
Python
| false
| false
| 13,295
|
py
|
test_knowledge.py
|
import pytest
from aiohttp import web
from app.service.app_svc import AppService
from app.service.auth_svc import AuthService, CONFIG_API_KEY_RED
from app.service.file_svc import FileSvc
from app.service.data_svc import DataService
from app.service.event_svc import EventService
from app.service.contact_svc import ContactService
from app.utility.base_service import BaseService
from app.utility.base_world import BaseWorld
from app.api.v2.handlers.fact_api import FactApi
from app.api.v2.responses import json_request_validation_middleware
from app.api.v2.security import authentication_required_middleware_factory
from app.objects.secondclass.c_fact import WILDCARD_STRING
from app.service.knowledge_svc import KnowledgeService
cakr = 'abc123'
headers = {'key': cakr, 'Content-Type': 'application/json'}
@pytest.fixture
def base_world():
BaseWorld.apply_config(
name='main',
config={
CONFIG_API_KEY_RED: cakr,
'users': {
'red': {'reduser': 'redpass'},
'blue': {'blueuser': 'bluepass'}
},
'crypt_salt': 'thisisdefinitelynotkosher', # Salt for file service instantiation
'encryption_key': 'andneitheristhis', # fake encryption key for file service instantiation
}
)
yield BaseWorld
BaseWorld.clear_config()
@pytest.fixture
async def knowledge_webapp(event_loop, base_world, data_svc):
app_svc = AppService(web.Application())
app_svc.add_service('auth_svc', AuthService())
app_svc.add_service('knowledge_svc', KnowledgeService())
app_svc.add_service('data_svc', DataService())
app_svc.add_service('event_svc', EventService())
app_svc.add_service('contact_svc', ContactService())
app_svc.add_service('file_svc', FileSvc()) # This needs to be done this way, or it we won't have a valid BaseWorld
services = app_svc.get_services()
app = web.Application(
middlewares=[
authentication_required_middleware_factory(services['auth_svc']),
json_request_validation_middleware
]
)
FactApi(services).add_routes(app)
await app_svc.register_contacts()
return app
async def test_display_facts(knowledge_webapp, aiohttp_client, fire_event_mock):
client = await aiohttp_client(knowledge_webapp)
fact_data = {
'trait': 'demo',
'value': 'test'
}
await client.post('/facts', json=fact_data, headers=headers)
resp = await client.get('/facts', json=fact_data, headers=headers)
data = await resp.json()
response = data['found']
assert len(response) == 1
assert response[0]['trait'] == 'demo'
assert response[0]['value'] == 'test'
assert response[0]['source'] == WILDCARD_STRING
async def test_display_operation_facts(knowledge_webapp, aiohttp_client, fire_event_mock):
client = await aiohttp_client(knowledge_webapp)
op_id_test = 'this_is_a_valid_operation_id'
fact_data = {
'trait': 'demo',
'value': 'test',
'source': op_id_test
}
await client.post('/facts', json=fact_data, headers=headers)
resp = await client.get(f'/facts/{op_id_test}', headers=headers)
data = await resp.json()
response = data['found']
assert len(response) == 1
assert response[0]['trait'] == 'demo'
assert response[0]['value'] == 'test'
assert response[0]['source'] == op_id_test
async def test_display_relationships(knowledge_webapp, aiohttp_client, fire_event_mock):
client = await aiohttp_client(knowledge_webapp)
op_id_test = 'this_is_a_valid_operation_id'
fact_data_a = {
'trait': 'a',
'value': '1',
}
fact_data_b = {
'trait': 'b',
'value': '2'
}
relationship_data = {
'source': fact_data_a,
'edge': 'gamma',
'target': fact_data_b,
'origin': op_id_test
}
await client.post('/relationships', json=relationship_data, headers=headers)
resp = await client.get('/relationships', json=relationship_data, headers=headers)
data = await resp.json()
response = data['found']
assert len(response) == 1
assert response[0]['source']['trait'] == 'a'
assert response[0]['source']['value'] == '1'
assert response[0]['edge'] == 'gamma'
assert response[0]['origin'] == 'this_is_a_valid_operation_id'
assert response[0]['source']['source'] == 'this_is_a_valid_operation_id'
async def test_display_operation_relationships(knowledge_webapp, aiohttp_client, fire_event_mock):
client = await aiohttp_client(knowledge_webapp)
op_id_test = 'this_is_a_valid_operation_id'
fact_data_a = {
'trait': 'a',
'value': '1',
'source': op_id_test
}
fact_data_b = {
'trait': 'b',
'value': '2',
'source': op_id_test
}
relationship_data = {
'source': fact_data_a,
'edge': 'gamma',
'target': fact_data_b,
'origin': op_id_test
}
await client.post('/relationships', json=relationship_data, headers=headers)
resp = await client.get(f'/relationships/{op_id_test}', headers=headers)
data = await resp.json()
response = data['found']
assert len(response) == 1
assert response[0]['source']['trait'] == fact_data_a['trait']
assert response[0]['source']['value'] == fact_data_a['value']
assert response[0]['target']['trait'] == fact_data_b['trait']
assert response[0]['target']['value'] == fact_data_b['value']
assert response[0]['edge'] == relationship_data['edge']
assert response[0]['origin'] == op_id_test
assert response[0]['source']['source'] == op_id_test
assert response[0]['target']['source'] == op_id_test
async def test_remove_fact(knowledge_webapp, aiohttp_client, fire_event_mock):
client = await aiohttp_client(knowledge_webapp)
fact_data = {
'trait': 'demo',
'value': 'test'
}
init = await client.post('/facts', json=fact_data, headers=headers)
pre = await init.json()
subs = await client.delete('/facts', json=fact_data, headers=headers)
post = await subs.json()
tmp = await client.get('/facts', json=fact_data, headers=headers)
cur = await tmp.json()
current = cur['found']
start = pre['added']
end = post['removed']
assert len(start) == 1
assert len(end) == 1
assert len(current) == 0
assert start == end
async def test_remove_relationship(knowledge_webapp, aiohttp_client, fire_event_mock):
client = await aiohttp_client(knowledge_webapp)
op_id_test = 'this_is_a_valid_operation_id'
fact_data_a = {
'trait': 'a',
'value': '1',
}
fact_data_b = {
'trait': 'b',
'value': '2'
}
relationship_data = {
'source': fact_data_a,
'edge': 'alpha',
'target': fact_data_b,
'origin': op_id_test
}
init = await client.post('/relationships', json=relationship_data, headers=headers)
pre = await init.json()
subs = await client.delete('/relationships', json=dict(edge='alpha'), headers=headers)
post = await subs.json()
resp = await client.get('/relationships', json=relationship_data, headers=headers)
cur = await resp.json()
start = pre['added']
end = post['removed']
current = cur['found']
assert len(start) == 1
assert len(end) == 1
assert len(current) == 0
assert start == end
async def test_add_fact(knowledge_webapp, aiohttp_client, fire_event_mock):
client = await aiohttp_client(knowledge_webapp)
fact_data = {
'trait': 'demo',
'value': 'test'
}
resp = await client.post('/facts', json=fact_data, headers=headers)
data = await resp.json()
response = data['added']
assert len(response) == 1
assert response[0]['trait'] == 'demo'
assert response[0]['value'] == 'test'
tmp = await client.get('/facts', json=fact_data, headers=headers)
cur = await tmp.json()
current = cur['found']
assert current == response
async def test_add_fact_to_operation(knowledge_webapp, aiohttp_client, test_operation, setup_empty_operation, fire_event_mock):
client = await aiohttp_client(knowledge_webapp)
fact_data = {
'trait': 'demo',
'value': 'test',
'source': test_operation['id']
}
resp = await client.post('/facts', json=fact_data, headers=headers)
data = await resp.json()
response = data['added']
assert len(response) == 1
assert response[0]['trait'] == 'demo'
assert response[0]['value'] == 'test'
assert response[0]['source'] == test_operation['id']
tmp = await client.get('/facts', json=fact_data, headers=headers)
cur = await tmp.json()
current = cur['found']
assert current == response
data_svc = BaseService.get_service('data_svc')
file_svc = BaseService.get_service('file_svc')
matched_operations = await data_svc.locate('operations', {'id': test_operation['id']})
report = await matched_operations[0].report(file_svc, data_svc)
assert response[0] in report['facts']
async def test_add_fact_to_finished_operation(knowledge_webapp, aiohttp_client, setup_finished_operation,
finished_operation_payload, fire_event_mock):
client = await aiohttp_client(knowledge_webapp)
op_id = finished_operation_payload['id']
matched_operations = await BaseService.get_service('data_svc').locate('operations', {'id': op_id})
assert await matched_operations[0].is_finished()
fact_data = {
'trait': 'demo',
'value': 'test',
'source': op_id
}
resp = await client.post('/facts', json=fact_data, headers=headers)
data = await resp.json()
assert 'error' in data
assert 'Cannot add fact to finished operation.' in data['error']
async def test_add_relationship(knowledge_webapp, aiohttp_client, fire_event_mock):
client = await aiohttp_client(knowledge_webapp)
fact_data_a = {
'trait': 'a',
'value': '1',
}
fact_data_b = {
'trait': 'b',
'value': '2'
}
relationship_data = {
'source': fact_data_a,
'edge': 'tango',
'target': fact_data_b
}
expected_response = f"{fact_data_a['trait']}({fact_data_a['value']}) : " \
f"tango : {fact_data_b['trait']}({fact_data_b['value']})"
resp = await client.post('/relationships', json=relationship_data, headers=headers)
data = await resp.json()
response = data['added']
assert len(response) == 1
assert response[0]['source']['trait'] == fact_data_a['trait']
assert response[0]['target']['value'] == fact_data_b['value']
assert response[0]['edge'] == 'tango'
assert response[0]['source']['relationships'] == response[0]['target']['relationships']
assert response[0]['source']['relationships'][0] == expected_response
resp = await client.get('/relationships', json=relationship_data, headers=headers)
cur = await resp.json()
current = cur['found']
assert current == response
async def test_patch_fact(knowledge_webapp, aiohttp_client, fire_event_mock):
client = await aiohttp_client(knowledge_webapp)
fact_data = {
'trait': 'domain.user.name',
'value': 'thomas'
}
patch_data = {
"criteria": {
"trait": "domain.user.name",
"value": "thomas"},
"updates": {
"value": "jacobson"
}
}
await client.post('/facts', json=fact_data, headers=headers)
resp = await client.patch('/facts', json=patch_data, headers=headers)
message = await resp.json()
patched = message['updated']
assert len(patched) == 1
assert patched[0]['value'] == 'jacobson'
tmp = await client.get('/facts', json=dict(trait='domain.user.name'), headers=headers)
cur = await tmp.json()
current = cur['found']
assert len(current) == 1
assert patched == current
async def test_patch_relationship(knowledge_webapp, aiohttp_client, fire_event_mock):
client = await aiohttp_client(knowledge_webapp)
relationship_data = {
"source": {
"trait": "domain.user.name",
"value": "bobross"
},
"edge": "has_password",
"target": {
"trait": "domain.user.password",
"value": "12345"
}
}
patch_data = {
"criteria": {
"edge": "has_password",
"source": {
"value": "bobross"
}
},
"updates": {
"target": {
"value": "54321"
},
"edge": "has_admin_password"
}
}
await client.post('/relationships', json=relationship_data, headers=headers)
resp = await client.patch('/relationships', json=patch_data, headers=headers)
message = await resp.json()
patched = message['updated']
assert len(patched) == 1
assert patched[0]['target']['value'] == '54321'
assert patched[0]['source']['value'] == 'bobross'
assert patched[0]['edge'] == 'has_admin_password'
tmp = await client.get('/relationships', json=dict(edge='has_admin_password'), headers=headers)
cur = await tmp.json()
current = cur['found']
assert len(current) == 1
assert patched == current
|
ea7f2621012da8d223f14ba46e84528c4415538c
|
63ace5832d453e325681d02f6496a0999b72edcb
|
/bip_utils/utils/crypto/hmac.py
|
1aa49275e96e9e2e87d96ff80fb30cf5acd48656
|
[
"MIT"
] |
permissive
|
ebellocchia/bip_utils
|
c9ec04c687f4247e57434319e36b2abab78f0b32
|
d15c75ddd74e4838c396a0d036ef6faf11b06a4b
|
refs/heads/master
| 2023-09-01T13:38:55.567370
| 2023-08-16T17:04:14
| 2023-08-16T17:04:14
| 251,130,186
| 244
| 88
|
MIT
| 2023-08-23T13:46:19
| 2020-03-29T20:42:48
|
Python
|
UTF-8
|
Python
| false
| false
| 3,704
|
py
|
hmac.py
|
# Copyright (c) 2022 Emanuele Bellocchia
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Module for SHA-2 algorithms."""
# Imports
import hashlib
import hmac
from typing import Tuple, Union
from bip_utils.utils.misc import AlgoUtils
HMAC_USE_DIGEST: bool = hasattr(hmac, "digest")
class HmacSha256:
"""
HMAC-SHA256 class.
It computes digests using HMAC-SHA256 algorithm.
"""
@staticmethod
def QuickDigest(key: Union[bytes, str],
data: Union[bytes, str]) -> bytes:
"""
Compute the digest (quick version).
Args:
key (str or bytes) : Key
data (str or bytes): Data
Returns:
bytes: Computed digest
"""
# Use digest if available
if HMAC_USE_DIGEST:
return hmac.digest(AlgoUtils.Encode(key), AlgoUtils.Encode(data), "sha256")
return hmac.new(AlgoUtils.Encode(key), AlgoUtils.Encode(data), hashlib.sha256).digest()
@staticmethod
def DigestSize() -> int:
"""
Get the digest size in bytes.
Returns:
int: Digest size in bytes
"""
return hashlib.sha256().digest_size
class HmacSha512:
"""
HMAC-SHA512 class.
It computes digests using HMAC-SHA512 algorithm.
"""
@staticmethod
def QuickDigest(key: Union[bytes, str],
data: Union[bytes, str]) -> bytes:
"""
Compute the digest (quick version).
Args:
key (str or bytes) : Key
data (str or bytes): Data
Returns:
bytes: Computed digest
"""
# Use digest if available
if HMAC_USE_DIGEST:
return hmac.digest(AlgoUtils.Encode(key), AlgoUtils.Encode(data), "sha512")
return hmac.new(AlgoUtils.Encode(key), AlgoUtils.Encode(data), hashlib.sha512).digest()
@staticmethod
def QuickDigestHalves(key: Union[bytes, str],
data: Union[bytes, str]) -> Tuple[bytes, bytes]:
"""
Compute the digest and return it split into two halves (quick version).
Args:
key (str or bytes) : Key
data (str or bytes): Data
Returns:
tuple[bytes, bytes]: Computed digest left part (index 0) and right part (index 1)
"""
digest_bytes = HmacSha512.QuickDigest(key, data)
return digest_bytes[:HmacSha512.DigestSize() // 2], digest_bytes[HmacSha512.DigestSize() // 2:]
@staticmethod
def DigestSize() -> int:
"""
Get the digest size in bytes.
Returns:
int: Digest size in bytes
"""
return hashlib.sha512().digest_size
|
738503de84b19570780da5ba3b0a2f1ef89fcb22
|
68c060c440882c127cde9f723e001e736a3e08e6
|
/setup.py
|
e95eb13ebb5df44b758c2aa98a2ca1b31aaf1bb4
|
[
"MIT"
] |
permissive
|
Edinburgh-Genome-Foundry/Flametree
|
06074673c70c13d804e156768a1fc35d51adc383
|
7fa48ab2effd96b210c10372f9d3757ebe1162aa
|
refs/heads/master
| 2023-07-31T22:23:33.977231
| 2022-05-04T13:24:34
| 2022-05-04T13:24:34
| 80,863,792
| 166
| 19
|
NOASSERTION
| 2020-09-12T19:12:28
| 2017-02-03T19:50:06
|
Python
|
UTF-8
|
Python
| false
| false
| 529
|
py
|
setup.py
|
import ez_setup
ez_setup.use_setuptools()
from setuptools import setup, find_packages
exec(open("flametree/version.py").read()) # loads __version__
setup(
name="flametree",
version=__version__,
author="Zulko",
description="Python file and zip operations made easy",
url="https://github.com/Edinburgh-Genome-Foundry/Flametree",
long_description=open("pypi-readme.rst").read(),
license="MIT",
keywords="file system, zip, archive, file, directory",
packages=find_packages(exclude="docs"),
)
|
dc840455d2bc44a06e1934eb7b7b5cc6d918fb13
|
010279e2ba272d09e9d2c4e903722e5faba2cf7a
|
/contrib/python/ipykernel/py2/ipykernel/ipkernel.py
|
6304131f9ac24f2d34159ead1c4f6d6e59cfb588
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
catboost/catboost
|
854c1a1f439a96f1ae6b48e16644be20aa04dba2
|
f5042e35b945aded77b23470ead62d7eacefde92
|
refs/heads/master
| 2023-09-01T12:14:14.174108
| 2023-09-01T10:01:01
| 2023-09-01T10:22:12
| 97,556,265
| 8,012
| 1,425
|
Apache-2.0
| 2023-09-11T03:32:32
| 2017-07-18T05:29:04
|
Python
|
UTF-8
|
Python
| false
| false
| 15,354
|
py
|
ipkernel.py
|
"""The IPython kernel implementation"""
import getpass
import sys
from IPython.core import release
from ipython_genutils.py3compat import builtin_mod, PY3, unicode_type, safe_unicode
from IPython.utils.tokenutil import token_at_cursor, line_at_cursor
from traitlets import Instance, Type, Any, List, Bool
from .comm import CommManager
from .kernelbase import Kernel as KernelBase
from .zmqshell import ZMQInteractiveShell
try:
from IPython.core.completer import rectify_completions as _rectify_completions, provisionalcompleter as _provisionalcompleter
_use_experimental_60_completion = True
except ImportError:
_use_experimental_60_completion = False
_EXPERIMENTAL_KEY_NAME = '_jupyter_types_experimental'
class IPythonKernel(KernelBase):
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC',
allow_none=True)
shell_class = Type(ZMQInteractiveShell)
use_experimental_completions = Bool(True,
help="Set this flag to False to deactivate the use of experimental IPython completion APIs.",
).tag(config=True)
user_module = Any()
def _user_module_changed(self, name, old, new):
if self.shell is not None:
self.shell.user_module = new
user_ns = Instance(dict, args=None, allow_none=True)
def _user_ns_changed(self, name, old, new):
if self.shell is not None:
self.shell.user_ns = new
self.shell.init_user_ns()
# A reference to the Python builtin 'raw_input' function.
# (i.e., __builtin__.raw_input for Python 2.7, builtins.input for Python 3)
_sys_raw_input = Any()
_sys_eval_input = Any()
def __init__(self, **kwargs):
super(IPythonKernel, self).__init__(**kwargs)
# Initialize the InteractiveShell subclass
self.shell = self.shell_class.instance(parent=self,
profile_dir = self.profile_dir,
user_module = self.user_module,
user_ns = self.user_ns,
kernel = self,
)
self.shell.displayhook.session = self.session
self.shell.displayhook.pub_socket = self.iopub_socket
self.shell.displayhook.topic = self._topic('execute_result')
self.shell.display_pub.session = self.session
self.shell.display_pub.pub_socket = self.iopub_socket
self.comm_manager = CommManager(parent=self, kernel=self)
self.shell.configurables.append(self.comm_manager)
comm_msg_types = [ 'comm_open', 'comm_msg', 'comm_close' ]
for msg_type in comm_msg_types:
self.shell_handlers[msg_type] = getattr(self.comm_manager, msg_type)
help_links = List([
{
'text': "Python Reference",
'url': "https://docs.python.org/%i.%i" % sys.version_info[:2],
},
{
'text': "IPython Reference",
'url': "https://ipython.org/documentation.html",
},
{
'text': "NumPy Reference",
'url': "https://docs.scipy.org/doc/numpy/reference/",
},
{
'text': "SciPy Reference",
'url': "https://docs.scipy.org/doc/scipy/reference/",
},
{
'text': "Matplotlib Reference",
'url': "https://matplotlib.org/contents.html",
},
{
'text': "SymPy Reference",
'url': "http://docs.sympy.org/latest/index.html",
},
{
'text': "pandas Reference",
'url': "https://pandas.pydata.org/pandas-docs/stable/",
},
]).tag(config=True)
# Kernel info fields
implementation = 'ipython'
implementation_version = release.version
language_info = {
'name': 'python',
'version': sys.version.split()[0],
'mimetype': 'text/x-python',
'codemirror_mode': {
'name': 'ipython',
'version': sys.version_info[0]
},
'pygments_lexer': 'ipython%d' % (3 if PY3 else 2),
'nbconvert_exporter': 'python',
'file_extension': '.py'
}
@property
def banner(self):
return self.shell.banner
def start(self):
self.shell.exit_now = False
super(IPythonKernel, self).start()
def set_parent(self, ident, parent):
"""Overridden from parent to tell the display hook and output streams
about the parent message.
"""
super(IPythonKernel, self).set_parent(ident, parent)
self.shell.set_parent(parent)
def init_metadata(self, parent):
"""Initialize metadata.
Run at the beginning of each execution request.
"""
md = super(IPythonKernel, self).init_metadata(parent)
# FIXME: remove deprecated ipyparallel-specific code
# This is required for ipyparallel < 5.0
md.update({
'dependencies_met' : True,
'engine' : self.ident,
})
return md
def finish_metadata(self, parent, metadata, reply_content):
"""Finish populating metadata.
Run after completing an execution request.
"""
# FIXME: remove deprecated ipyparallel-specific code
# This is required by ipyparallel < 5.0
metadata['status'] = reply_content['status']
if reply_content['status'] == 'error' and reply_content['ename'] == 'UnmetDependency':
metadata['dependencies_met'] = False
return metadata
def _forward_input(self, allow_stdin=False):
"""Forward raw_input and getpass to the current frontend.
via input_request
"""
self._allow_stdin = allow_stdin
if PY3:
self._sys_raw_input = builtin_mod.input
builtin_mod.input = self.raw_input
else:
self._sys_raw_input = builtin_mod.raw_input
self._sys_eval_input = builtin_mod.input
builtin_mod.raw_input = self.raw_input
builtin_mod.input = lambda prompt='': eval(self.raw_input(prompt))
self._save_getpass = getpass.getpass
getpass.getpass = self.getpass
def _restore_input(self):
"""Restore raw_input, getpass"""
if PY3:
builtin_mod.input = self._sys_raw_input
else:
builtin_mod.raw_input = self._sys_raw_input
builtin_mod.input = self._sys_eval_input
getpass.getpass = self._save_getpass
@property
def execution_count(self):
return self.shell.execution_count
@execution_count.setter
def execution_count(self, value):
# Ignore the incrememnting done by KernelBase, in favour of our shell's
# execution counter.
pass
def do_execute(self, code, silent, store_history=True,
user_expressions=None, allow_stdin=False):
shell = self.shell # we'll need this a lot here
self._forward_input(allow_stdin)
reply_content = {}
try:
res = shell.run_cell(code, store_history=store_history, silent=silent)
finally:
self._restore_input()
if res.error_before_exec is not None:
err = res.error_before_exec
else:
err = res.error_in_exec
if res.success:
reply_content[u'status'] = u'ok'
else:
reply_content[u'status'] = u'error'
reply_content.update({
u'traceback': shell._last_traceback or [],
u'ename': unicode_type(type(err).__name__),
u'evalue': safe_unicode(err),
})
# FIXME: deprecated piece for ipyparallel (remove in 5.0):
e_info = dict(engine_uuid=self.ident, engine_id=self.int_id,
method='execute')
reply_content['engine_info'] = e_info
# Return the execution counter so clients can display prompts
reply_content['execution_count'] = shell.execution_count - 1
if 'traceback' in reply_content:
self.log.info("Exception in execute request:\n%s", '\n'.join(reply_content['traceback']))
# At this point, we can tell whether the main code execution succeeded
# or not. If it did, we proceed to evaluate user_expressions
if reply_content['status'] == 'ok':
reply_content[u'user_expressions'] = \
shell.user_expressions(user_expressions or {})
else:
# If there was an error, don't even try to compute expressions
reply_content[u'user_expressions'] = {}
# Payloads should be retrieved regardless of outcome, so we can both
# recover partial output (that could have been generated early in a
# block, before an error) and always clear the payload system.
reply_content[u'payload'] = shell.payload_manager.read_payload()
# Be aggressive about clearing the payload because we don't want
# it to sit in memory until the next execute_request comes in.
shell.payload_manager.clear_payload()
return reply_content
def do_complete(self, code, cursor_pos):
if _use_experimental_60_completion and self.use_experimental_completions:
return self._experimental_do_complete(code, cursor_pos)
# FIXME: IPython completers currently assume single line,
# but completion messages give multi-line context
# For now, extract line from cell, based on cursor_pos:
if cursor_pos is None:
cursor_pos = len(code)
line, offset = line_at_cursor(code, cursor_pos)
line_cursor = cursor_pos - offset
txt, matches = self.shell.complete('', line, line_cursor)
return {'matches' : matches,
'cursor_end' : cursor_pos,
'cursor_start' : cursor_pos - len(txt),
'metadata' : {},
'status' : 'ok'}
def _experimental_do_complete(self, code, cursor_pos):
"""
Experimental completions from IPython, using Jedi.
"""
if cursor_pos is None:
cursor_pos = len(code)
with _provisionalcompleter():
raw_completions = self.shell.Completer.completions(code, cursor_pos)
completions = list(_rectify_completions(code, raw_completions))
comps = []
for comp in completions:
comps.append(dict(
start=comp.start,
end=comp.end,
text=comp.text,
type=comp.type,
))
if completions:
s = completions[0].start
e = completions[0].end
matches = [c.text for c in completions]
else:
s = cursor_pos
e = cursor_pos
matches = []
return {'matches': matches,
'cursor_end': e,
'cursor_start': s,
'metadata': {_EXPERIMENTAL_KEY_NAME: comps},
'status': 'ok'}
def do_inspect(self, code, cursor_pos, detail_level=0):
name = token_at_cursor(code, cursor_pos)
info = self.shell.object_inspect(name)
reply_content = {'status' : 'ok'}
reply_content['data'] = data = {}
reply_content['metadata'] = {}
reply_content['found'] = info['found']
if info['found']:
info_text = self.shell.object_inspect_text(
name,
detail_level=detail_level,
)
data['text/plain'] = info_text
return reply_content
def do_history(self, hist_access_type, output, raw, session=0, start=0,
stop=None, n=None, pattern=None, unique=False):
if hist_access_type == 'tail':
hist = self.shell.history_manager.get_tail(n, raw=raw, output=output,
include_latest=True)
elif hist_access_type == 'range':
hist = self.shell.history_manager.get_range(session, start, stop,
raw=raw, output=output)
elif hist_access_type == 'search':
hist = self.shell.history_manager.search(
pattern, raw=raw, output=output, n=n, unique=unique)
else:
hist = []
return {
'status': 'ok',
'history' : list(hist),
}
def do_shutdown(self, restart):
self.shell.exit_now = True
return dict(status='ok', restart=restart)
def do_is_complete(self, code):
status, indent_spaces = self.shell.input_splitter.check_complete(code)
r = {'status': status}
if status == 'incomplete':
r['indent'] = ' ' * indent_spaces
return r
def do_apply(self, content, bufs, msg_id, reply_metadata):
from .serialize import serialize_object, unpack_apply_message
shell = self.shell
try:
working = shell.user_ns
prefix = "_"+str(msg_id).replace("-","")+"_"
f,args,kwargs = unpack_apply_message(bufs, working, copy=False)
fname = getattr(f, '__name__', 'f')
fname = prefix+"f"
argname = prefix+"args"
kwargname = prefix+"kwargs"
resultname = prefix+"result"
ns = { fname : f, argname : args, kwargname : kwargs , resultname : None }
# print ns
working.update(ns)
code = "%s = %s(*%s,**%s)" % (resultname, fname, argname, kwargname)
try:
exec(code, shell.user_global_ns, shell.user_ns)
result = working.get(resultname)
finally:
for key in ns:
working.pop(key)
result_buf = serialize_object(result,
buffer_threshold=self.session.buffer_threshold,
item_threshold=self.session.item_threshold,
)
except BaseException as e:
# invoke IPython traceback formatting
shell.showtraceback()
reply_content = {
u'traceback': shell._last_traceback or [],
u'ename': unicode_type(type(e).__name__),
u'evalue': safe_unicode(e),
}
# FIXME: deprecated piece for ipyparallel (remove in 5.0):
e_info = dict(engine_uuid=self.ident, engine_id=self.int_id, method='apply')
reply_content['engine_info'] = e_info
self.send_response(self.iopub_socket, u'error', reply_content,
ident=self._topic('error'))
self.log.info("Exception in apply request:\n%s", '\n'.join(reply_content['traceback']))
result_buf = []
reply_content['status'] = 'error'
else:
reply_content = {'status' : 'ok'}
return reply_content, result_buf
def do_clear(self):
self.shell.reset(False)
return dict(status='ok')
# This exists only for backwards compatibility - use IPythonKernel instead
class Kernel(IPythonKernel):
def __init__(self, *args, **kwargs):
import warnings
warnings.warn('Kernel is a deprecated alias of ipykernel.ipkernel.IPythonKernel',
DeprecationWarning)
super(Kernel, self).__init__(*args, **kwargs)
|
f91b025f0934e81ffdc6c28d0a3ffba850fae349
|
bbd69601912a3361d788efd03a47f9d4e3bac09e
|
/docs/sphinx/rest_substitutions/snippets/python/converted/wx.MemoryFSHandler.1.py
|
6ad3ee1251e74b927316a773e768cbc3ef3dbf1d
|
[] |
no_license
|
wxWidgets/Phoenix
|
56929484460a0399a8f1d9582bc77c20aa14748d
|
a1184286703cf24c4b88e5bc14cf2979c1b1ea00
|
refs/heads/master
| 2023-09-01T07:10:17.437093
| 2023-08-31T05:38:01
| 2023-08-31T05:38:01
| 5,078,061
| 2,268
| 677
| null | 2023-09-09T17:06:59
| 2012-07-17T06:22:25
|
Python
|
UTF-8
|
Python
| false
| false
| 1,357
|
py
|
wx.MemoryFSHandler.1.py
|
def OnAbout(self, event):
bcur = wx.BeginBusyCursor()
wx.FileSystem.AddHandler(wx.MemoryFSHandler)
wx.MemoryFSHandler.AddFile("logo.pcx", wx.Bitmap("logo.pcx", wx.BITMAP_TYPE_PCX))
wx.MemoryFSHandler.AddFile("about.htm",
"<html><body>About: "
"<img src=\"memory:logo.pcx\"></body></html>")
dlg = wx.Dialog(self, -1, _("About"))
topsizer = wx.BoxSizer(wx.VERTICAL)
html = wx.html.HtmlWindow(dlg, size=wx.Size(380, 160), style=wx.HW_SCROLLBAR_NEVER)
html.SetBorders(0)
html.LoadPage("memory:about.htm")
html.SetSize(html.GetInternalRepresentation().GetWidth(),
html.GetInternalRepresentation().GetHeight())
topsizer.Add(html, 1, wx.ALL, 10)
topsizer.Add(wx.StaticLine(dlg, -1), 0, wx.EXPAND | wx.LEFT | wx.RIGHT, 10)
topsizer.Add(wx.Button(dlg, wx.ID_OK, "Ok"),
0, wx.ALL | wx.ALIGN_RIGHT, 15)
dlg.SetAutoLayout(True)
dlg.SetSizer(topsizer)
topsizer.Fit(dlg)
dlg.Centre()
dlg.ShowModal()
wx.MemoryFSHandler.RemoveFile("logo.pcx")
wx.MemoryFSHandler.RemoveFile("about.htm")
|
4ac3cf0a96a614431d679339cf5a6293b3641e5a
|
6633e6d880c109a6dafd8849bdc9b5845926ba87
|
/tests/test_errors.py
|
f6810c9194432146d3cd43e92213b14745a4e22a
|
[
"MIT"
] |
permissive
|
camelot-dev/camelot
|
4527c41268bf177ddd2edff54d6f557ab1d7b737
|
0f96c0025108260d4e4d23d90c3fbc7db9286f00
|
refs/heads/master
| 2023-08-04T23:44:38.518967
| 2023-07-15T06:01:34
| 2023-07-15T06:01:34
| 194,679,925
| 2,308
| 415
|
MIT
| 2023-09-14T21:35:16
| 2019-07-01T13:39:33
|
Python
|
UTF-8
|
Python
| false
| false
| 5,252
|
py
|
test_errors.py
|
# -*- coding: utf-8 -*-
import os
import sys
import warnings
import pytest
import camelot
testdir = os.path.dirname(os.path.abspath(__file__))
testdir = os.path.join(testdir, "files")
filename = os.path.join(testdir, "foo.pdf")
skip_on_windows = pytest.mark.skipif(
sys.platform.startswith("win"),
reason="Ghostscript not installed in Windows test environment",
)
def test_unknown_flavor():
message = "Unknown flavor specified." " Use either 'lattice' or 'stream'"
with pytest.raises(NotImplementedError, match=message):
tables = camelot.read_pdf(filename, flavor="chocolate")
def test_input_kwargs():
message = "columns cannot be used with flavor='lattice'"
with pytest.raises(ValueError, match=message):
tables = camelot.read_pdf(filename, columns=["10,20,30,40"])
def test_unsupported_format():
message = "File format not supported"
filename = os.path.join(testdir, "foo.csv")
with pytest.raises(NotImplementedError, match=message):
tables = camelot.read_pdf(filename)
@skip_on_windows
def test_no_tables_found_logs_suppressed():
filename = os.path.join(testdir, "foo.pdf")
with warnings.catch_warnings():
# the test should fail if any warning is thrown
warnings.simplefilter("error")
try:
tables = camelot.read_pdf(filename, suppress_stdout=True)
except Warning as e:
warning_text = str(e)
pytest.fail(f"Unexpected warning: {warning_text}")
def test_no_tables_found_warnings_suppressed():
filename = os.path.join(testdir, "empty.pdf")
with warnings.catch_warnings():
# the test should fail if any warning is thrown
warnings.simplefilter("error")
try:
tables = camelot.read_pdf(filename, suppress_stdout=True)
except Warning as e:
warning_text = str(e)
pytest.fail(f"Unexpected warning: {warning_text}")
def test_no_password():
filename = os.path.join(testdir, "health_protected.pdf")
message = "File has not been decrypted"
with pytest.raises(Exception, match=message):
tables = camelot.read_pdf(filename)
def test_bad_password():
filename = os.path.join(testdir, "health_protected.pdf")
message = "File has not been decrypted"
with pytest.raises(Exception, match=message):
tables = camelot.read_pdf(filename, password="wrongpass")
def test_stream_equal_length():
message = "Length of table_areas and columns" " should be equal"
with pytest.raises(ValueError, match=message):
tables = camelot.read_pdf(
filename,
flavor="stream",
table_areas=["10,20,30,40"],
columns=["10,20,30,40", "10,20,30,40"],
)
def test_image_warning():
filename = os.path.join(testdir, "image.pdf")
with warnings.catch_warnings():
warnings.simplefilter("error", category=UserWarning)
with pytest.raises(UserWarning) as e:
tables = camelot.read_pdf(filename)
assert (
str(e.value)
== "page-1 is image-based, camelot only works on text-based pages."
)
def test_stream_no_tables_on_page():
filename = os.path.join(testdir, "empty.pdf")
with warnings.catch_warnings():
warnings.simplefilter("error")
with pytest.raises(UserWarning) as e:
tables = camelot.read_pdf(filename, flavor="stream")
assert str(e.value) == "No tables found on page-1"
def test_stream_no_tables_in_area():
filename = os.path.join(testdir, "only_page_number.pdf")
with warnings.catch_warnings():
warnings.simplefilter("error")
with pytest.raises(UserWarning) as e:
tables = camelot.read_pdf(filename, flavor="stream")
assert str(e.value) == "No tables found in table area 1"
def test_lattice_no_tables_on_page():
filename = os.path.join(testdir, "empty.pdf")
with warnings.catch_warnings():
warnings.simplefilter("error", category=UserWarning)
with pytest.raises(UserWarning) as e:
tables = camelot.read_pdf(filename, flavor="lattice")
assert str(e.value) == "No tables found on page-1"
def test_lattice_unknown_backend():
message = "Unknown backend 'mupdf' specified. Please use either 'poppler' or 'ghostscript'."
with pytest.raises(NotImplementedError, match=message):
tables = camelot.read_pdf(filename, backend="mupdf")
def test_lattice_no_convert_method():
class ConversionBackend(object):
pass
message = "must implement a 'convert' method"
with pytest.raises(NotImplementedError, match=message):
tables = camelot.read_pdf(filename, backend=ConversionBackend())
def test_lattice_ghostscript_deprecation_warning():
ghostscript_deprecation_warning = (
"'ghostscript' will be replaced by 'poppler' as the default image conversion"
" backend in v0.12.0. You can try out 'poppler' with backend='poppler'."
)
with warnings.catch_warnings():
warnings.simplefilter("error")
with pytest.raises(DeprecationWarning) as e:
tables = camelot.read_pdf(filename)
assert str(e.value) == ghostscript_deprecation_warning
|
f64a0d2c3b64205d534b96e4e71020db40e374a9
|
8da41ffa2ccb09e04f95db0f211e0ed69a42a352
|
/blogs/gcp_forecasting/scalable_time_series.py
|
4542af90992d2b288c080754abf042c2e3866381
|
[
"Apache-2.0"
] |
permissive
|
GoogleCloudPlatform/training-data-analyst
|
808af9b09a0e5f5657c4ca76cdd205f808d76d89
|
975a95032ce5b7012d1772c7f1f5cfe606eae839
|
refs/heads/master
| 2023-09-05T19:50:59.722334
| 2023-09-04T14:25:33
| 2023-09-04T14:25:33
| 56,459,948
| 7,311
| 5,917
|
Apache-2.0
| 2023-09-13T21:45:54
| 2016-04-17T21:39:27
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 11,188
|
py
|
scalable_time_series.py
|
"""Queries for scalable time-series modeling."""
def create_date_range(project, dataset_name, table_name):
sql_date_range = """
WITH
# Get min and max dates so we can enumerate the range next
CTE_date_limits AS (
SELECT
CAST(MIN(sale_date) AS DATE) AS min_sale_date,
CAST(MAX(sale_date) AS DATE) AS max_sale_date
FROM
`{0}.{1}.{2}` ),
# Expand date range using date bounds
CTE_date_range AS (
SELECT
UNIX_DATE(calendar_date) AS unix_date
FROM
UNNEST(GENERATE_DATE_ARRAY((
SELECT
min_sale_date
FROM
CTE_date_limits), (
SELECT
max_sale_date
FROM
CTE_date_limits), INTERVAL 1 DAY) ) AS calendar_date ),
# Create start and end date ranges for timescale
CTE_start_end_timescale_date_range AS (
SELECT
ROW_NUMBER() OVER (ORDER BY unix_date) - 1 AS timescale_idx,
unix_date AS unix_timescale_start_date,
LEAD(unix_date, 6) OVER (ORDER BY unix_date) AS unix_timescale_end_date
FROM
CTE_date_range )
SELECT
timescale_idx,
unix_timescale_start_date,
unix_timescale_end_date,
DATE_FROM_UNIX_DATE(unix_timescale_start_date) AS timescale_start_date,
DATE_FROM_UNIX_DATE(unix_timescale_end_date) AS timescale_end_date
FROM
CTE_start_end_timescale_date_range
""".format(project, dataset_name, table_name)
return sql_date_range
def bq_create_rolling_features_label(project, dataset, date_range_table, sales_dataset_table, value_name, downsample_size, window_size, horizon, labels_size=1):
feature_pivot_list = ["""SUM(CASE WHEN timestep_idx = {time} - 1 THEN {value_name} ELSE 0.0 END) AS price_ago_{time}""".format(time=time,value_name=value_name) for time in range(window_size, 0, -1)]
label_pivot_list = ["""SUM(CASE WHEN timestep_idx = {time} - 1 THEN {value_name} ELSE 0.0 END) AS price_ahead_{time}""".format(time=time,value_name=value_name) for time in range(1, labels_size + 1)]
feature_list = ["price_ago_{time}".format(time=time) for time in range(window_size, 0, -1)]
label_list = ["price_ahead_{time}".format(time=time) for time in range(1, labels_size + 1)]
new_line = ",\n "
sql_bqml_sub_sequences = """
WITH
# Create sequence date ranges
CTE_seq_date_ranges AS (
SELECT
A.timescale_idx AS seq_idx,
ROW_NUMBER() OVER (PARTITION BY A.timescale_idx ORDER BY B.unix_timescale_start_date) - 1 AS timestep_idx,
B.timescale_idx AS timescale_idx,
A.unix_timescale_start_date AS seq_unix_start_date,
B.unix_timescale_start_date AS timestep_unix_start_date,
B.unix_timescale_end_date AS timestep_unix_end_date
FROM
`{project}.{dataset}.{date_range_table}` AS A
INNER JOIN
`{project}.{dataset}.{date_range_table}` AS B
ON
MOD(B.unix_timescale_start_date - A.unix_timescale_start_date, {downsample_size}) = 0
AND B.unix_timescale_start_date >= A.unix_timescale_start_date
WHERE
B.unix_timescale_end_date IS NOT NULL),
# Create sequence date ranges for features data
CTE_seq_features_date_range AS (
SELECT
*
FROM
CTE_seq_date_ranges
WHERE
timestep_idx < {window_size}),
# Create sequence date ranges for labels data
CTE_seq_labels_date_range AS (
SELECT
*
FROM
CTE_seq_date_ranges
WHERE
timestep_idx < {labels_size}),
# Join timescale information with data to be aggregated over timescale
CTE_timescale_joined_data AS (
SELECT
timescale_idx,
unix_timescale_start_date,
unix_timescale_end_date,
all_sales.sale_price
FROM
`{project}.{dataset}.{date_range_table}` AS start_end_timescale_date_range
INNER JOIN
`{project}.{sales_dataset_table}` AS all_sales
ON
start_end_timescale_date_range.unix_timescale_start_date <= UNIX_DATE(CAST(all_sales.sale_date AS DATE))
AND UNIX_DATE(CAST(all_sales.sale_date AS DATE)) <= start_end_timescale_date_range.unix_timescale_end_date),
# Group data we want aggregated over timescale
CTE_grouped_data AS (
SELECT
timescale_idx,
unix_timescale_start_date,
unix_timescale_end_date,
APPROX_QUANTILES(sale_price, 100)[
OFFSET
(50)] AS {value_name}
FROM
CTE_timescale_joined_data
GROUP BY
timescale_idx,
unix_timescale_start_date,
unix_timescale_end_date),
# Join features data to features date ranges
CTE_features AS (
SELECT
A.seq_idx,
A.timestep_idx,
A.seq_unix_start_date,
IFNULL(B.{value_name},0) AS {value_name}
FROM
CTE_seq_features_date_range AS A
INNER JOIN
CTE_grouped_data AS B
ON
A.timescale_idx = B.timescale_idx),
# Aggregate features data into sequences
CTE_seq_features AS (
SELECT
seq_idx,
seq_unix_start_date,
{feature_pivot_list}
FROM
CTE_features
GROUP BY
seq_idx,
seq_unix_start_date),
# Join labels data to labels date ranges
CTE_labels AS (
SELECT
A.seq_idx,
A.timestep_idx,
A.seq_unix_start_date,
IFNULL(B.{value_name},0) AS {value_name}
FROM
CTE_seq_labels_date_range AS A
INNER JOIN
CTE_grouped_data AS B
ON
A.timescale_idx = B.timescale_idx),
# Aggregate labels data into sequences
CTE_seq_labels AS (
SELECT
seq_idx,
seq_unix_start_date,
{label_pivot_list}
FROM
CTE_labels
GROUP BY
seq_idx,
seq_unix_start_date)
# Join features with labels with horizon in between
SELECT
DATE_FROM_UNIX_DATE(A.seq_unix_start_date) AS feat_seq_start_date,
DATE_FROM_UNIX_DATE(A.seq_unix_start_date + {window_size} * {downsample_size} - 1) AS feat_seq_end_date,
DATE_FROM_UNIX_DATE(B.seq_unix_start_date) AS lab_seq_start_date,
DATE_FROM_UNIX_DATE(B.seq_unix_start_date + {labels_size} * {downsample_size} - 1) AS lab_seq_end_date,
{feature_list},
{label_list}
FROM
CTE_seq_features AS A
INNER JOIN
CTE_seq_labels AS B
ON
A.seq_unix_start_date + ({window_size} * {downsample_size} - 1) + ({downsample_size} * {horizon}) = B.seq_unix_start_date
ORDER BY
A.seq_idx
""".format(project=project,
dataset=dataset,
date_range_table=date_range_table,
sales_dataset_table=sales_dataset_table,
value_name=value_name,
downsample_size=downsample_size,
window_size=window_size,
horizon=horizon,
labels_size=labels_size,
feature_pivot_list=new_line.join(feature_pivot_list),
label_pivot_list=new_line.join(label_pivot_list),
feature_list=new_line.join(feature_list),
label_list=new_line.join(label_list))
return sql_bqml_sub_sequences
def csv_create_rolling_features_label(project, dataset, date_range_table, sales_dataset_table, value_name, downsample_size, window_size, horizon, labels_size=1):
sql_csv_sub_sequences = """
WITH
# Create sequence date ranges
CTE_seq_date_ranges AS (
SELECT
A.timescale_idx AS seq_idx,
ROW_NUMBER() OVER (PARTITION BY A.timescale_idx ORDER BY B.unix_timescale_start_date) - 1 AS timestep_idx,
B.timescale_idx AS timescale_idx,
A.unix_timescale_start_date AS seq_unix_start_date,
B.unix_timescale_start_date AS timestep_unix_start_date,
B.unix_timescale_end_date AS timestep_unix_end_date
FROM
`{project}.{dataset}.{date_range_table}` AS A
INNER JOIN
`{project}.{dataset}.{date_range_table}` AS B
ON
MOD(B.unix_timescale_start_date - A.unix_timescale_start_date, {downsample_size}) = 0
AND B.unix_timescale_start_date >= A.unix_timescale_start_date
WHERE
B.unix_timescale_end_date IS NOT NULL),
# Create sequence date ranges for features data
CTE_seq_features_date_range AS (
SELECT
*
FROM
CTE_seq_date_ranges
WHERE
timestep_idx < {window_size}),
# Create sequence date ranges for labels data
CTE_seq_labels_date_range AS (
SELECT
*
FROM
CTE_seq_date_ranges
WHERE
timestep_idx < {labels_size}),
# Join timescale information with data to be aggregated over timescale
CTE_timescale_joined_data AS (
SELECT
timescale_idx,
unix_timescale_start_date,
unix_timescale_end_date,
all_sales.sale_price
FROM
`{project}.{dataset}.{date_range_table}` AS start_end_timescale_date_range
INNER JOIN
`{project}.{sales_dataset_table}` AS all_sales
ON
start_end_timescale_date_range.unix_timescale_start_date <= UNIX_DATE(CAST(all_sales.sale_date AS DATE))
AND UNIX_DATE(CAST(all_sales.sale_date AS DATE)) <= start_end_timescale_date_range.unix_timescale_end_date),
# Group data we want aggregated over timescale
CTE_grouped_data AS (
SELECT
timescale_idx,
unix_timescale_start_date,
unix_timescale_end_date,
APPROX_QUANTILES(sale_price, 100)[
OFFSET
(50)] AS {value_name}
FROM
CTE_timescale_joined_data
GROUP BY
timescale_idx,
unix_timescale_start_date,
unix_timescale_end_date),
# Join features data to features date ranges
CTE_features AS (
SELECT
A.seq_idx,
A.timestep_idx,
A.seq_unix_start_date,
IFNULL(B.{value_name},
0) AS {value_name}
FROM
CTE_seq_features_date_range AS A
INNER JOIN
CTE_grouped_data AS B
ON
A.timescale_idx = B.timescale_idx),
# Aggregate features data into sequences
CTE_seq_features AS (
SELECT
seq_idx,
seq_unix_start_date,
STRING_AGG(CAST({value_name} AS STRING), ';'
ORDER BY
timestep_idx) AS {value_name}_agg
FROM
CTE_features
GROUP BY
seq_idx,
seq_unix_start_date),
# Join labels data to labels date ranges
CTE_labels AS (
SELECT
A.seq_idx,
A.timestep_idx,
A.seq_unix_start_date,
IFNULL(B.{value_name},
0) AS {value_name}
FROM
CTE_seq_labels_date_range AS A
INNER JOIN
CTE_grouped_data AS B
ON
A.timescale_idx = B.timescale_idx),
# Aggregate labels data into sequences
CTE_seq_labels AS (
SELECT
seq_idx,
seq_unix_start_date,
STRING_AGG(CAST({value_name} AS STRING), ';'
ORDER BY
timestep_idx) AS labels_agg
FROM
CTE_labels
GROUP BY
seq_idx,
seq_unix_start_date)
# Join features with labels with horizon in between
SELECT
DATE_FROM_UNIX_DATE(A.seq_unix_start_date) AS feat_seq_start_date,
DATE_FROM_UNIX_DATE(A.seq_unix_start_date + {window_size} * {downsample_size} - 1) AS feat_seq_end_date,
DATE_FROM_UNIX_DATE(B.seq_unix_start_date) AS lab_seq_start_date,
DATE_FROM_UNIX_DATE(B.seq_unix_start_date + {labels_size} * {downsample_size} - 1) AS lab_seq_end_date,
{value_name}_agg,
labels_agg
FROM
CTE_seq_features AS A
INNER JOIN
CTE_seq_labels AS B
ON
A.seq_unix_start_date + ({window_size} * {downsample_size} - 1) + ({downsample_size} * {horizon}) = B.seq_unix_start_date
ORDER BY
A.seq_idx
""".format(project=project,
dataset=dataset,
date_range_table=date_range_table,
sales_dataset_table=sales_dataset_table,
value_name=value_name,
downsample_size=downsample_size,
window_size=window_size,
horizon=horizon,
labels_size=labels_size)
return sql_csv_sub_sequences
|
bf180fd0a7137fbf4be601a78708a0a013038ee6
|
7949abb6e441f4981561cf1e8ea840e94a0eeb3f
|
/src/RusPython.py
|
05b0621871fef25d14f5af7fd72be7b72752cc97
|
[
"MIT"
] |
permissive
|
Eleirbag89/RusPython
|
eb909ac2c6001f7f09b9085f3a404693c178e408
|
c4656708faaa7ae14054b93c57c04f668dc0e6f1
|
refs/heads/master
| 2021-07-05T22:05:49.376354
| 2021-05-14T13:07:04
| 2021-05-14T13:07:04
| 50,372,579
| 118
| 5
| null | 2017-11-23T07:33:23
| 2016-01-25T18:46:59
|
Python
|
UTF-8
|
Python
| false
| false
| 1,105
|
py
|
RusPython.py
|
# coding: latin-1
# -----------------------------------------------------------------------------
# calc.py
#
# A simple calculator with variables.
# -----------------------------------------------------------------------------
import ply.yacc as yacc
import ruspylex
import ruspyparser
import util
import sys
def main(argv=None):
if argv is None:
argv = sys.argv
debug = False
if argv[1] == "-d":
debug = True
argv.remove("-d")
try:
in_file = open(argv[1],"r")
s = in_file.read()
in_file.close()
#s = raw_input('calc > ') # use input() on Python 3
except EOFError:
print("Eccezione")
params = argv[2:]
header=util.addInputParamers(params)
header = header+ util.import_extra(s)
s = util.caseInsensitivize_and_fix(s)
util.first_pass(s)
modu = header + ruspyparser.parse(s)
if debug:
print "Programma"
print modu
print "Execution"
try:
exec(compile(modu, filename="<string>", mode="exec"))
except Exception,e:
if debug:
print str(e)
else:
print "Tutto è andato bene, non preoccuparti"
if debug:
print "END"
if __name__ == "__main__":
main()
|
22af59ca4d2c8882e7b3da8034490e7a8f09339a
|
05fe579c12f0013ce83a106083ddb66ace5e8f47
|
/tests/ut/datavisual/data_transform/test_summary_parser.py
|
45c346a64e5065e0c2644119e250a2d12f9687a3
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"BSD-3-Clause"
] |
permissive
|
mindspore-ai/mindinsight
|
59d3f47144ada9a12d2c82d9826ad5f5288aed78
|
a774d893fb2f21dbc3edb5cd89f9e6eec274ebf1
|
refs/heads/master
| 2023-07-22T22:46:43.075617
| 2023-07-17T11:26:58
| 2023-07-17T11:26:58
| 250,692,948
| 224
| 24
|
Apache-2.0
| 2020-12-29T12:22:51
| 2020-03-28T01:58:56
|
Python
|
UTF-8
|
Python
| false
| false
| 5,076
|
py
|
test_summary_parser.py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Function:
Test mindinsight.datavisual.data_transform.ms_data_loader.
Usage:
pytest tests/ut/datavisual
"""
import os
import csv
import time
import shutil
import tempfile
from urllib.parse import quote
from mindinsight.datavisual.data_transform.summary_parser.event_parser import EventParser
from tests.utils.log_generators.images_log_generator import ImagesLogGenerator
from tests.utils.log_generators.scalars_log_generator import ScalarsLogGenerator
ROUND_NUM = 4
class TestSummaryParser:
"""Test ms_data_loader."""
def setup_class(self):
"""Run before test this class."""
self.base_summary_dir = tempfile.mkdtemp(suffix='summary')
def teardown_class(self):
"""Run after test this class."""
if os.path.exists(self.base_summary_dir):
shutil.rmtree(self.base_summary_dir)
def test_parse_and_export_save_csv_file(self):
"""Test parse summary file and save scalar to csv file."""
summary_dir = tempfile.mkdtemp(dir=self.base_summary_dir)
test_file_name = '%s/%s.%s.%s' % (summary_dir, 'scalar', 'summary', str(time.time()))
metadata, _ = TestSummaryParser.prepare_scalar_summary_file(test_file_name)
event_parse = EventParser(test_file_name, summary_dir)
event_parse.parse()
result = TestSummaryParser.parse_csv_file(summary_dir)
expect_value = TestSummaryParser.get_expect_value(metadata)
shutil.rmtree(summary_dir)
assert result == expect_value
def test_parse_and_export_png_file(self):
"""Test parse summary file and save image to png files."""
summary_dir = tempfile.mkdtemp(dir=self.base_summary_dir)
image_dir = os.path.join(summary_dir, 'image')
os.makedirs(image_dir, mode=0o700)
test_file_name = '%s/%s.%s.%s' % (summary_dir, 'image', 'summary', str(time.time()))
expect_names = TestSummaryParser.prepare_image_summary_file(test_file_name)
event_parse = EventParser(test_file_name, summary_dir)
event_parse.parse()
result = sorted(os.listdir(image_dir))
shutil.rmtree(summary_dir)
assert result == expect_names
@staticmethod
def prepare_scalar_summary_file(test_file_name):
"""Prepare the summary file with scalar data."""
scalars_log_generator = ScalarsLogGenerator()
test_steps = [1, 3, 5]
test_tag = "test_scalar_tag_name"
return scalars_log_generator.generate_log(test_file_name, test_steps, test_tag)
@staticmethod
def prepare_image_summary_file(test_file_name):
"""Prepare the summary file with image data."""
images_log_generator = ImagesLogGenerator()
test_steps = [1, 3, 5]
test_tags = "test_image_tag_name"
images_log_generator.generate_log(test_file_name, test_steps, test_tags)
return TestSummaryParser.get_expect_image_names(test_tags, test_steps)
@staticmethod
def parse_csv_file(summary_dir):
"""parse csv file to compare the result with expect value."""
export_path = os.path.join(summary_dir, "scalar.csv")
results = []
with open(export_path, 'r', encoding='utf-8') as file:
csv_reader = csv.reader(file, dialect='excel')
for line in csv_reader:
results.append(line)
# The first line is title, so no need to round the value
for result in results[1:]:
# The result[3] is the value of scalar, we want to compare the rounded value.
result[3] = str(round(float(result[3]), ROUND_NUM))
return results
@staticmethod
def get_expect_value(metadata):
"""change the format of expect value to compare with result."""
expect_value = [['tag', 'step', 'wall_time (unit: seconds)', 'value']]
for line in metadata:
expect_value.append(
[line.get('tag'), str(line.get('step')), str(line.get('wall_time')),
str(round(line.get('value'), ROUND_NUM))])
return expect_value
@staticmethod
def get_expect_image_names(test_tags, test_steps):
"""get the names of expected images to compare with result."""
expect_names = []
tag = quote(test_tags, safe="")
for step in test_steps:
expect_names.append("{}_{}.png".format(tag, step))
return sorted(expect_names)
|
d01fef3d8842edd54fed9db64d7ccb4389e4ffc1
|
afd2087e80478010d9df66e78280f75e1ff17d45
|
/torch/nn/quantized/modules/__init__.py
|
aad319312f6ae979de943357ed51ad6faecbf49b
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-secret-labs-2011",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] |
permissive
|
pytorch/pytorch
|
7521ac50c47d18b916ae47a6592c4646c2cb69b5
|
a6f7dd4707ac116c0f5fb5f44f42429f38d23ab4
|
refs/heads/main
| 2023-08-03T05:05:02.822937
| 2023-08-03T00:40:33
| 2023-08-03T04:14:52
| 65,600,975
| 77,092
| 24,610
|
NOASSERTION
| 2023-09-14T21:58:39
| 2016-08-13T05:26:41
|
Python
|
UTF-8
|
Python
| false
| false
| 2,433
|
py
|
__init__.py
|
r"""Quantized Modules
Note::
The `torch.nn.quantized` namespace is in the process of being deprecated.
Please, use `torch.ao.nn.quantized` instead.
"""
from torch.ao.nn.quantized.modules.activation import ReLU6, Hardswish, ELU, LeakyReLU, Sigmoid, Softmax, MultiheadAttention, PReLU
from torch.ao.nn.quantized.modules.batchnorm import BatchNorm2d, BatchNorm3d
from torch.ao.nn.quantized.modules.conv import Conv1d, Conv2d, Conv3d
from torch.ao.nn.quantized.modules.conv import ConvTranspose1d, ConvTranspose2d, ConvTranspose3d
from torch.ao.nn.quantized.modules.dropout import Dropout
from torch.ao.nn.quantized.modules.embedding_ops import Embedding, EmbeddingBag
from torch.ao.nn.quantized.modules.functional_modules import FloatFunctional, FXFloatFunctional, QFunctional
from torch.ao.nn.quantized.modules.linear import Linear
from torch.ao.nn.quantized.modules.normalization import LayerNorm, GroupNorm, InstanceNorm1d, InstanceNorm2d, InstanceNorm3d
from torch.ao.nn.quantized.modules.rnn import LSTM
from torch.ao.nn.quantized.modules import MaxPool2d
from torch.ao.nn.quantized.modules import Quantize, DeQuantize
# The following imports are needed in case the user decides
# to import the files directly,
# s.a. `from torch.nn.quantized.modules.conv import ...`.
# No need to add them to the `__all__`.
from torch.ao.nn.quantized.modules import activation
from torch.ao.nn.quantized.modules import batchnorm
from torch.ao.nn.quantized.modules import conv
from torch.ao.nn.quantized.modules import dropout
from torch.ao.nn.quantized.modules import embedding_ops
from torch.ao.nn.quantized.modules import functional_modules
from torch.ao.nn.quantized.modules import linear
from torch.ao.nn.quantized.modules import normalization
from torch.ao.nn.quantized.modules import rnn
from torch.ao.nn.quantized.modules import utils
__all__ = [
'BatchNorm2d',
'BatchNorm3d',
'Conv1d',
'Conv2d',
'Conv3d',
'ConvTranspose1d',
'ConvTranspose2d',
'ConvTranspose3d',
'DeQuantize',
'ELU',
'Embedding',
'EmbeddingBag',
'GroupNorm',
'Hardswish',
'InstanceNorm1d',
'InstanceNorm2d',
'InstanceNorm3d',
'LayerNorm',
'LeakyReLU',
'Linear',
'LSTM',
'MultiheadAttention',
'Quantize',
'ReLU6',
'Sigmoid',
'Softmax',
'Dropout',
'PReLU',
# Wrapper modules
'FloatFunctional',
'FXFloatFunctional',
'QFunctional',
]
|
8d4127b7de526d52dede0f67b6a52cfeae332f04
|
d4b77f464596746d68e7b45f1ce1339800752b88
|
/sounddevice_build.py
|
7befb06e4e2e20a0adbb08dec03ed1ae33e462f5
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
spatialaudio/python-sounddevice
|
dff9d517db9790a9165e3f9ae9ddd91bd62eee13
|
91171852b9d955e3abcb58f9b78f2868db40c8be
|
refs/heads/master
| 2023-07-09T03:43:19.314165
| 2023-03-12T18:07:36
| 2023-03-12T18:35:56
| 37,774,726
| 890
| 157
|
MIT
| 2023-09-07T17:51:54
| 2015-06-20T15:34:00
|
Python
|
UTF-8
|
Python
| false
| false
| 10,116
|
py
|
sounddevice_build.py
|
from cffi import FFI
ffibuilder = FFI()
ffibuilder.set_source('_sounddevice', None)
ffibuilder.cdef("""
int Pa_GetVersion( void );
const char* Pa_GetVersionText( void );
typedef int PaError;
typedef enum PaErrorCode
{
paNoError = 0,
paNotInitialized = -10000,
paUnanticipatedHostError,
paInvalidChannelCount,
paInvalidSampleRate,
paInvalidDevice,
paInvalidFlag,
paSampleFormatNotSupported,
paBadIODeviceCombination,
paInsufficientMemory,
paBufferTooBig,
paBufferTooSmall,
paNullCallback,
paBadStreamPtr,
paTimedOut,
paInternalError,
paDeviceUnavailable,
paIncompatibleHostApiSpecificStreamInfo,
paStreamIsStopped,
paStreamIsNotStopped,
paInputOverflowed,
paOutputUnderflowed,
paHostApiNotFound,
paInvalidHostApi,
paCanNotReadFromACallbackStream,
paCanNotWriteToACallbackStream,
paCanNotReadFromAnOutputOnlyStream,
paCanNotWriteToAnInputOnlyStream,
paIncompatibleStreamHostApi,
paBadBufferPtr
} PaErrorCode;
const char *Pa_GetErrorText( PaError errorCode );
PaError Pa_Initialize( void );
PaError Pa_Terminate( void );
typedef int PaDeviceIndex;
#define paNoDevice -1
#define paUseHostApiSpecificDeviceSpecification -2
typedef int PaHostApiIndex;
PaHostApiIndex Pa_GetHostApiCount( void );
PaHostApiIndex Pa_GetDefaultHostApi( void );
typedef enum PaHostApiTypeId
{
paInDevelopment=0,
paDirectSound=1,
paMME=2,
paASIO=3,
paSoundManager=4,
paCoreAudio=5,
paOSS=7,
paALSA=8,
paAL=9,
paBeOS=10,
paWDMKS=11,
paJACK=12,
paWASAPI=13,
paAudioScienceHPI=14
} PaHostApiTypeId;
typedef struct PaHostApiInfo
{
int structVersion;
PaHostApiTypeId type;
const char *name;
int deviceCount;
PaDeviceIndex defaultInputDevice;
PaDeviceIndex defaultOutputDevice;
} PaHostApiInfo;
const PaHostApiInfo * Pa_GetHostApiInfo( PaHostApiIndex hostApi );
PaHostApiIndex Pa_HostApiTypeIdToHostApiIndex( PaHostApiTypeId type );
PaDeviceIndex Pa_HostApiDeviceIndexToDeviceIndex( PaHostApiIndex hostApi,
int hostApiDeviceIndex );
typedef struct PaHostErrorInfo{
PaHostApiTypeId hostApiType;
long errorCode;
const char *errorText;
}PaHostErrorInfo;
const PaHostErrorInfo* Pa_GetLastHostErrorInfo( void );
PaDeviceIndex Pa_GetDeviceCount( void );
PaDeviceIndex Pa_GetDefaultInputDevice( void );
PaDeviceIndex Pa_GetDefaultOutputDevice( void );
typedef double PaTime;
typedef unsigned long PaSampleFormat;
#define paFloat32 0x00000001
#define paInt32 0x00000002
#define paInt24 0x00000004
#define paInt16 0x00000008
#define paInt8 0x00000010
#define paUInt8 0x00000020
#define paCustomFormat 0x00010000
#define paNonInterleaved 0x80000000
typedef struct PaDeviceInfo
{
int structVersion;
const char *name;
PaHostApiIndex hostApi;
int maxInputChannels;
int maxOutputChannels;
PaTime defaultLowInputLatency;
PaTime defaultLowOutputLatency;
PaTime defaultHighInputLatency;
PaTime defaultHighOutputLatency;
double defaultSampleRate;
} PaDeviceInfo;
const PaDeviceInfo* Pa_GetDeviceInfo( PaDeviceIndex device );
typedef struct PaStreamParameters
{
PaDeviceIndex device;
int channelCount;
PaSampleFormat sampleFormat;
PaTime suggestedLatency;
void *hostApiSpecificStreamInfo;
} PaStreamParameters;
#define paFormatIsSupported 0
PaError Pa_IsFormatSupported( const PaStreamParameters *inputParameters,
const PaStreamParameters *outputParameters,
double sampleRate );
typedef void PaStream;
#define paFramesPerBufferUnspecified 0
typedef unsigned long PaStreamFlags;
#define paNoFlag 0
#define paClipOff 0x00000001
#define paDitherOff 0x00000002
#define paNeverDropInput 0x00000004
#define paPrimeOutputBuffersUsingStreamCallback 0x00000008
#define paPlatformSpecificFlags 0xFFFF0000
typedef struct PaStreamCallbackTimeInfo{
PaTime inputBufferAdcTime;
PaTime currentTime;
PaTime outputBufferDacTime;
} PaStreamCallbackTimeInfo;
typedef unsigned long PaStreamCallbackFlags;
#define paInputUnderflow 0x00000001
#define paInputOverflow 0x00000002
#define paOutputUnderflow 0x00000004
#define paOutputOverflow 0x00000008
#define paPrimingOutput 0x00000010
typedef enum PaStreamCallbackResult
{
paContinue=0,
paComplete=1,
paAbort=2
} PaStreamCallbackResult;
typedef int PaStreamCallback(
const void *input, void *output,
unsigned long frameCount,
const PaStreamCallbackTimeInfo* timeInfo,
PaStreamCallbackFlags statusFlags,
void *userData );
PaError Pa_OpenStream( PaStream** stream,
const PaStreamParameters *inputParameters,
const PaStreamParameters *outputParameters,
double sampleRate,
unsigned long framesPerBuffer,
PaStreamFlags streamFlags,
PaStreamCallback *streamCallback,
void *userData );
PaError Pa_OpenDefaultStream( PaStream** stream,
int numInputChannels,
int numOutputChannels,
PaSampleFormat sampleFormat,
double sampleRate,
unsigned long framesPerBuffer,
PaStreamCallback *streamCallback,
void *userData );
PaError Pa_CloseStream( PaStream *stream );
typedef void PaStreamFinishedCallback( void *userData );
PaError Pa_SetStreamFinishedCallback( PaStream *stream,
PaStreamFinishedCallback* streamFinishedCallback );
PaError Pa_StartStream( PaStream *stream );
PaError Pa_StopStream( PaStream *stream );
PaError Pa_AbortStream( PaStream *stream );
PaError Pa_IsStreamStopped( PaStream *stream );
PaError Pa_IsStreamActive( PaStream *stream );
typedef struct PaStreamInfo
{
int structVersion;
PaTime inputLatency;
PaTime outputLatency;
double sampleRate;
} PaStreamInfo;
const PaStreamInfo* Pa_GetStreamInfo( PaStream *stream );
PaTime Pa_GetStreamTime( PaStream *stream );
double Pa_GetStreamCpuLoad( PaStream* stream );
PaError Pa_ReadStream( PaStream* stream,
void *buffer,
unsigned long frames );
PaError Pa_WriteStream( PaStream* stream,
const void *buffer,
unsigned long frames );
signed long Pa_GetStreamReadAvailable( PaStream* stream );
signed long Pa_GetStreamWriteAvailable( PaStream* stream );
PaHostApiTypeId Pa_GetStreamHostApiType( PaStream* stream );
PaError Pa_GetSampleSize( PaSampleFormat format );
void Pa_Sleep( long msec );
/* pa_mac_core.h */
typedef int32_t SInt32;
typedef struct
{
unsigned long size;
PaHostApiTypeId hostApiType;
unsigned long version;
unsigned long flags;
SInt32 const * channelMap;
unsigned long channelMapSize;
} PaMacCoreStreamInfo;
void PaMacCore_SetupStreamInfo( PaMacCoreStreamInfo *data, unsigned long flags );
void PaMacCore_SetupChannelMap( PaMacCoreStreamInfo *data, const SInt32 * const channelMap, unsigned long channelMapSize );
const char *PaMacCore_GetChannelName( int device, int channelIndex, bool input );
#define paMacCoreChangeDeviceParameters 0x01
#define paMacCoreFailIfConversionRequired 0x02
#define paMacCoreConversionQualityMin 0x0100
#define paMacCoreConversionQualityMedium 0x0200
#define paMacCoreConversionQualityLow 0x0300
#define paMacCoreConversionQualityHigh 0x0400
#define paMacCoreConversionQualityMax 0x0000
#define paMacCorePlayNice 0x00
#define paMacCorePro 0x01
#define paMacCoreMinimizeCPUButPlayNice 0x0100
#define paMacCoreMinimizeCPU 0x0101
/* pa_win_waveformat.h */
typedef unsigned long PaWinWaveFormatChannelMask;
/* pa_asio.h */
#define paAsioUseChannelSelectors 0x01
typedef struct PaAsioStreamInfo
{
unsigned long size;
PaHostApiTypeId hostApiType;
unsigned long version;
unsigned long flags;
int *channelSelectors;
} PaAsioStreamInfo;
/* pa_win_wasapi.h */
typedef enum PaWasapiFlags
{
paWinWasapiExclusive = 1,
paWinWasapiRedirectHostProcessor = 2,
paWinWasapiUseChannelMask = 4,
paWinWasapiPolling = 8,
paWinWasapiThreadPriority = 16
} PaWasapiFlags;
typedef void (*PaWasapiHostProcessorCallback) (
void *inputBuffer, long inputFrames,
void *outputBuffer, long outputFrames, void *userData);
typedef enum PaWasapiThreadPriority
{
eThreadPriorityNone = 0,
eThreadPriorityAudio,
eThreadPriorityCapture,
eThreadPriorityDistribution,
eThreadPriorityGames,
eThreadPriorityPlayback,
eThreadPriorityProAudio,
eThreadPriorityWindowManager
} PaWasapiThreadPriority;
typedef enum PaWasapiStreamCategory
{
eAudioCategoryOther = 0,
eAudioCategoryCommunications = 3,
eAudioCategoryAlerts = 4,
eAudioCategorySoundEffects = 5,
eAudioCategoryGameEffects = 6,
eAudioCategoryGameMedia = 7,
eAudioCategoryGameChat = 8,
eAudioCategorySpeech = 9,
eAudioCategoryMovie = 10,
eAudioCategoryMedia = 11
} PaWasapiStreamCategory;
typedef enum PaWasapiStreamOption
{
eStreamOptionNone = 0,
eStreamOptionRaw = 1,
eStreamOptionMatchFormat = 2
} PaWasapiStreamOption;
typedef struct PaWasapiStreamInfo
{
unsigned long size;
PaHostApiTypeId hostApiType;
unsigned long version;
unsigned long flags;
PaWinWaveFormatChannelMask channelMask;
PaWasapiHostProcessorCallback hostProcessorOutput;
PaWasapiHostProcessorCallback hostProcessorInput;
PaWasapiThreadPriority threadPriority;
PaWasapiStreamCategory streamCategory;
PaWasapiStreamOption streamOption;
} PaWasapiStreamInfo;
int PaWasapi_IsLoopback( PaDeviceIndex device );
""")
if __name__ == '__main__':
ffibuilder.compile(verbose=True)
|
28a40a46ab90623de30d4ce8e09b62b6482b2c2b
|
bbd69601912a3361d788efd03a47f9d4e3bac09e
|
/docs/sphinx/rest_substitutions/snippets/python/converted/wx.StopWatch.1.py
|
f25a8b96ccf7acbf8e6af6b6cd2c640b6e2e33fb
|
[] |
no_license
|
wxWidgets/Phoenix
|
56929484460a0399a8f1d9582bc77c20aa14748d
|
a1184286703cf24c4b88e5bc14cf2979c1b1ea00
|
refs/heads/master
| 2023-09-01T07:10:17.437093
| 2023-08-31T05:38:01
| 2023-08-31T05:38:01
| 5,078,061
| 2,268
| 677
| null | 2023-09-09T17:06:59
| 2012-07-17T06:22:25
|
Python
|
UTF-8
|
Python
| false
| false
| 311
|
py
|
wx.StopWatch.1.py
|
sw = wx.StopWatch()
CallLongRunningFunction()
wx.LogMessage("The long running function took %dms to execute", sw.Time())
sw.Pause()
# stopwatch is stopped now ...
sw.Resume()
CallLongRunningFunction()
wx.LogMessage("And calling it twice took %dms in all", sw.Time())
|
9dacf3a16f2b68b52b6f9dbddce2f5b6db489608
|
d2e75f67fbb9815a63b82082fafc173d8ae78ea3
|
/lib/utils/kitti_utils.py
|
c9a3e9160ab363e032d2d1e088abd512fa6d6db1
|
[
"MIT"
] |
permissive
|
happinesslz/EPNet
|
adb931478de38c4e9eed837fc34922e55b5953c9
|
0123c341243846aa3b412addcb9e2c07fd305237
|
refs/heads/master
| 2022-12-18T00:15:23.719957
| 2020-08-25T09:49:46
| 2020-08-25T09:49:46
| 276,774,529
| 231
| 43
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,639
|
py
|
kitti_utils.py
|
import numpy as np
from scipy.spatial import Delaunay
import scipy
import lib.utils.object3d as object3d
import torch
def get_objects_from_label(label_file):
with open(label_file, 'r') as f:
lines = f.readlines()
objects = [object3d.Object3d(line) for line in lines]
return objects
def dist_to_plane(plane, points):
"""
Calculates the signed distance from a 3D plane to each point in a list of points
:param plane: (a, b, c, d)
:param points: (N, 3)
:return: (N), signed distance of each point to the plane
"""
a, b, c, d = plane
points = np.array(points)
x = points[:, 0]
y = points[:, 1]
z = points[:, 2]
return (a * x + b * y + c * z + d) / np.sqrt(a ** 2 + b ** 2 + c ** 2)
def rotate_pc_along_y(pc, rot_angle):
"""
params pc: (N, 3+C), (N, 3) is in the rectified camera coordinate
params rot_angle: rad scalar
Output pc: updated pc with XYZ rotated
"""
cosval = np.cos(rot_angle)
sinval = np.sin(rot_angle)
rotmat = np.array([[cosval, -sinval], [sinval, cosval]])
pc[:, [0, 2]] = np.dot(pc[:, [0, 2]], np.transpose(rotmat))
return pc
def rotate_pc_along_y_torch(pc, rot_angle):
"""
:param pc: (N, 512, 3 + C)
:param rot_angle: (N)
:return:
TODO: merge with rotate_pc_along_y_torch in bbox_transform.py
"""
cosa = torch.cos(rot_angle).view(-1, 1) # (N, 1)
sina = torch.sin(rot_angle).view(-1, 1) # (N, 1)
raw_1 = torch.cat([cosa, -sina], dim = 1) # (N, 2)
raw_2 = torch.cat([sina, cosa], dim = 1) # (N, 2)
R = torch.cat((raw_1.unsqueeze(dim = 1), raw_2.unsqueeze(dim = 1)), dim = 1) # (N, 2, 2)
pc_temp = pc[:, :, [0, 2]] # (N, 512, 2)
pc[:, :, [0, 2]] = torch.matmul(pc_temp, R.permute(0, 2, 1)) # (N, 512, 2)
return pc
def boxes3d_to_corners3d(boxes3d, rotate = True):
"""
:param boxes3d: (N, 7) [x, y, z, h, w, l, ry]
:param rotate:
:return: corners3d: (N, 8, 3)
"""
boxes_num = boxes3d.shape[0]
h, w, l = boxes3d[:, 3], boxes3d[:, 4], boxes3d[:, 5]
x_corners = np.array([l / 2., l / 2., -l / 2., -l / 2., l / 2., l / 2., -l / 2., -l / 2.],
dtype = np.float32).T # (N, 8)
z_corners = np.array([w / 2., -w / 2., -w / 2., w / 2., w / 2., -w / 2., -w / 2., w / 2.],
dtype = np.float32).T # (N, 8)
y_corners = np.zeros((boxes_num, 8), dtype = np.float32)
y_corners[:, 4:8] = -h.reshape(boxes_num, 1).repeat(4, axis = 1) # (N, 8)
if rotate:
ry = boxes3d[:, 6]
zeros, ones = np.zeros(ry.size, dtype = np.float32), np.ones(ry.size, dtype = np.float32)
rot_list = np.array([[np.cos(ry), zeros, -np.sin(ry)],
[zeros, ones, zeros],
[np.sin(ry), zeros, np.cos(ry)]]) # (3, 3, N)
R_list = np.transpose(rot_list, (2, 0, 1)) # (N, 3, 3)
temp_corners = np.concatenate((x_corners.reshape(-1, 8, 1), y_corners.reshape(-1, 8, 1),
z_corners.reshape(-1, 8, 1)), axis = 2) # (N, 8, 3)
rotated_corners = np.matmul(temp_corners, R_list) # (N, 8, 3)
x_corners, y_corners, z_corners = rotated_corners[:, :, 0], rotated_corners[:, :, 1], rotated_corners[:, :, 2]
x_loc, y_loc, z_loc = boxes3d[:, 0], boxes3d[:, 1], boxes3d[:, 2]
x = x_loc.reshape(-1, 1) + x_corners.reshape(-1, 8)
y = y_loc.reshape(-1, 1) + y_corners.reshape(-1, 8)
z = z_loc.reshape(-1, 1) + z_corners.reshape(-1, 8)
corners = np.concatenate((x.reshape(-1, 8, 1), y.reshape(-1, 8, 1), z.reshape(-1, 8, 1)), axis = 2)
return corners.astype(np.float32)
def boxes3d_to_corners3d_torch(boxes3d, flip = False):
"""
:param boxes3d: (N, 7) [x, y, z, h, w, l, ry]
:return: corners_rotated: (N, 8, 3)
"""
boxes_num = boxes3d.shape[0]
h, w, l, ry = boxes3d[:, 3:4], boxes3d[:, 4:5], boxes3d[:, 5:6], boxes3d[:, 6:7]
if flip:
ry = ry + np.pi
centers = boxes3d[:, 0:3]
zeros = torch.cuda.FloatTensor(boxes_num, 1).fill_(0)
ones = torch.cuda.FloatTensor(boxes_num, 1).fill_(1)
x_corners = torch.cat([l / 2., l / 2., -l / 2., -l / 2., l / 2., l / 2., -l / 2., -l / 2.], dim = 1) # (N, 8)
y_corners = torch.cat([zeros, zeros, zeros, zeros, -h, -h, -h, -h], dim = 1) # (N, 8)
z_corners = torch.cat([w / 2., -w / 2., -w / 2., w / 2., w / 2., -w / 2., -w / 2., w / 2.], dim = 1) # (N, 8)
corners = torch.cat((x_corners.unsqueeze(dim = 1), y_corners.unsqueeze(dim = 1), z_corners.unsqueeze(dim = 1)),
dim = 1) # (N, 3, 8)
cosa, sina = torch.cos(ry), torch.sin(ry)
raw_1 = torch.cat([cosa, zeros, sina], dim = 1)
raw_2 = torch.cat([zeros, ones, zeros], dim = 1)
raw_3 = torch.cat([-sina, zeros, cosa], dim = 1)
R = torch.cat((raw_1.unsqueeze(dim = 1), raw_2.unsqueeze(dim = 1), raw_3.unsqueeze(dim = 1)), dim = 1) # (N, 3, 3)
corners_rotated = torch.matmul(R, corners) # (N, 3, 8)
corners_rotated = corners_rotated + centers.unsqueeze(dim = 2).expand(-1, -1, 8)
corners_rotated = corners_rotated.permute(0, 2, 1)
return corners_rotated
def boxes3d_to_bev_torch(boxes3d):
"""
:param boxes3d: (N, 7) [x, y, z, h, w, l, ry]
:return:
boxes_bev: (N, 5) [x1, y1, x2, y2, ry]
"""
boxes_bev = boxes3d.new(torch.Size((boxes3d.shape[0], 5)))
cu, cv = boxes3d[:, 0], boxes3d[:, 2]
half_l, half_w = boxes3d[:, 5] / 2, boxes3d[:, 4] / 2
boxes_bev[:, 0], boxes_bev[:, 1] = cu - half_l, cv - half_w
boxes_bev[:, 2], boxes_bev[:, 3] = cu + half_l, cv + half_w
boxes_bev[:, 4] = boxes3d[:, 6]
return boxes_bev
def enlarge_box3d(boxes3d, extra_width):
"""
:param boxes3d: (N, 7) [x, y, z, h, w, l, ry]
"""
if isinstance(boxes3d, np.ndarray):
large_boxes3d = boxes3d.copy()
else:
large_boxes3d = boxes3d.clone()
large_boxes3d[:, 3:6] += extra_width * 2
large_boxes3d[:, 1] += extra_width
return large_boxes3d
def in_hull(p, hull):
"""
:param p: (N, K) test points
:param hull: (M, K) M corners of a box
:return (N) bool
"""
try:
if not isinstance(hull, Delaunay):
hull = Delaunay(hull)
flag = hull.find_simplex(p) >= 0
except scipy.spatial.qhull.QhullError:
print('Warning: not a hull %s' % str(hull))
flag = np.zeros(p.shape[0], dtype = np.bool)
return flag
def objs_to_boxes3d(obj_list):
boxes3d = np.zeros((obj_list.__len__(), 7), dtype = np.float32)
for k, obj in enumerate(obj_list):
boxes3d[k, 0:3], boxes3d[k, 3], boxes3d[k, 4], boxes3d[k, 5], boxes3d[k, 6] \
= obj.pos, obj.h, obj.w, obj.l, obj.ry
return boxes3d
def objs_to_scores(obj_list):
scores = np.zeros((obj_list.__len__()), dtype = np.float32)
for k, obj in enumerate(obj_list):
scores[k] = obj.score
return scores
def get_iou3d(corners3d, query_corners3d, need_bev = False):
"""
:param corners3d: (N, 8, 3) in rect coords
:param query_corners3d: (M, 8, 3)
:return:
"""
from shapely.geometry import Polygon
A, B = corners3d, query_corners3d
N, M = A.shape[0], B.shape[0]
iou3d = np.zeros((N, M), dtype = np.float32)
iou_bev = np.zeros((N, M), dtype = np.float32)
# for height overlap, since y face down, use the negative y
min_h_a = -A[:, 0:4, 1].sum(axis = 1) / 4.0
max_h_a = -A[:, 4:8, 1].sum(axis = 1) / 4.0
min_h_b = -B[:, 0:4, 1].sum(axis = 1) / 4.0
max_h_b = -B[:, 4:8, 1].sum(axis = 1) / 4.0
for i in range(N):
for j in range(M):
max_of_min = np.max([min_h_a[i], min_h_b[j]])
min_of_max = np.min([max_h_a[i], max_h_b[j]])
h_overlap = np.max([0, min_of_max - max_of_min])
if h_overlap == 0:
continue
bottom_a, bottom_b = Polygon(A[i, 0:4, [0, 2]].T), Polygon(B[j, 0:4, [0, 2]].T)
if bottom_a.is_valid and bottom_b.is_valid:
# check is valid, A valid Polygon may not possess any overlapping exterior or interior rings.
bottom_overlap = bottom_a.intersection(bottom_b).area
else:
bottom_overlap = 0.
overlap3d = bottom_overlap * h_overlap
union3d = bottom_a.area * (max_h_a[i] - min_h_a[i]) + bottom_b.area * (max_h_b[j] - min_h_b[j]) - overlap3d
iou3d[i][j] = overlap3d / union3d
iou_bev[i][j] = bottom_overlap / (bottom_a.area + bottom_b.area - bottom_overlap)
if need_bev:
return iou3d, iou_bev
return iou3d
|
850b04bf31a68c3ab96a65813e932d1d81764b1f
|
3ca67d69abd4e74b7145b340cdda65532f90053b
|
/programmers/난이도별/level02.가장_큰_수/rockmiin_가장큰수.py
|
89944fc887db9cd966871579774f9bcf82486be7
|
[] |
no_license
|
DKU-STUDY/Algorithm
|
19549516984b52a1c5cd73e1ed1e58f774d6d30e
|
6f78efdbefd8eedab24e43d74c7dae7f95c2893b
|
refs/heads/master
| 2023-02-18T06:48:39.309641
| 2023-02-09T07:16:14
| 2023-02-09T07:16:14
| 258,455,710
| 175
| 49
| null | 2023-02-09T07:16:16
| 2020-04-24T08:42:27
|
Python
|
UTF-8
|
Python
| false
| false
| 313
|
py
|
rockmiin_가장큰수.py
|
def solution(num):
num= list(map(str, num))
slen= len(num)
for i in range(slen):
num[i]*=3
# print(num)
num.sort(reverse=True)
for i in range(slen):
num[i]=num[i][:len(num[i])//3]
if num[0]=='0': return '0'
return ''.join(num)
print(
solution([6, 2, 10])
)
|
d7d635572e3004d7b7b925f8210a82fd016d9d15
|
6d1e4c077808ee934ada7de44fe10ba99712e090
|
/brainrender/gui/widgets/screenshot_modal.py
|
c34e926487469b36ba8587ab6af5c476af0120ab
|
[
"BSD-3-Clause",
"GPL-3.0-only"
] |
permissive
|
brainglobe/brainrender
|
0e87c0134dddaa28af6084aa40978cd3549e9c44
|
a14ead80c1dbc75f20a145a49394dc467c4f7bf1
|
refs/heads/master
| 2023-08-09T22:13:31.266666
| 2023-08-01T08:50:57
| 2023-08-01T08:50:57
| 209,513,981
| 345
| 56
|
BSD-3-Clause
| 2023-09-12T14:56:25
| 2019-09-19T09:27:09
|
Python
|
UTF-8
|
Python
| false
| false
| 1,389
|
py
|
screenshot_modal.py
|
from qtpy.QtWidgets import QDialog, QLabel, QVBoxLayout
from qtpy import QtCore
from brainrender.gui.style import style, update_css
class ScreenshotModal(QDialog):
left = 250
top = 250
width = 400
height = 120
def __init__(self, main_window, palette):
"""
Creates a new window for user to input
which regions to add to scene.
Arguments:
----------
main_window: reference to the App's main window
palette: main_window's palette, used to style widgets
"""
super().__init__()
self.setWindowTitle("Add brain regions")
self.ui()
self.main_window = main_window
self.setStyleSheet(update_css(style, palette))
# Start timer to autoclose
self.timer = QtCore.QTimer(self)
self.timer.setInterval(1500)
self.timer.timeout.connect(self.close)
self.timer.start()
def ui(self):
"""
Define UI's elements
"""
self.setGeometry(self.left, self.top, self.width, self.height)
layout = QVBoxLayout()
label = QLabel(self)
label.setStyleSheet("font-size: 18pt; font-weight: 700;")
label.setObjectName("PopupLabel")
label.setText("Screenshot saved")
layout.addWidget(label)
self.setLayout(layout)
self.setModal(True)
self.show()
|
db304338b561b67fbce7c9fad32982972361965e
|
ffdc77394c5b5532b243cf3c33bd584cbdc65cb7
|
/tests/st/tensor/test_matrix_power.py
|
2dba5448fd88a7bc21eb22e25845cd7cf34d0e7a
|
[
"Apache-2.0",
"LicenseRef-scancode-proprietary-license",
"MPL-1.0",
"OpenSSL",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause-Open-MPI",
"MIT",
"MPL-2.0-no-copyleft-exception",
"NTP",
"BSD-3-Clause",
"GPL-1.0-or-later",
"0BSD",
"MPL-2.0",
"LicenseRef-scancode-free-unknown",
"AGPL-3.0-only",
"Libpng",
"MPL-1.1",
"IJG",
"GPL-2.0-only",
"BSL-1.0",
"Zlib",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-python-cwi",
"BSD-2-Clause",
"LicenseRef-scancode-gary-s-brown",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"Python-2.0",
"LicenseRef-scancode-mit-nagy",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense"
] |
permissive
|
mindspore-ai/mindspore
|
ca7d5bb51a3451c2705ff2e583a740589d80393b
|
54acb15d435533c815ee1bd9f6dc0b56b4d4cf83
|
refs/heads/master
| 2023-07-29T09:17:11.051569
| 2023-07-17T13:14:15
| 2023-07-17T13:14:15
| 239,714,835
| 4,178
| 768
|
Apache-2.0
| 2023-07-26T22:31:11
| 2020-02-11T08:43:48
|
C++
|
UTF-8
|
Python
| false
| false
| 1,666
|
py
|
test_matrix_power.py
|
# Copyright 2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore as ms
import mindspore.nn as nn
class NetMatrixPower(nn.Cell):
def construct(self, x, n):
return x.matrix_power(n)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [ms.GRAPH_MODE, ms.PYNATIVE_MODE])
@pytest.mark.skip(reason="I6BZ6M")
def test_matrix_power(mode):
"""
Feature: Tensor.matrix_power
Description: Verify the result of matrix_power
Expectation: success.
"""
ms.set_context(mode=mode)
arrs = [
np.random.rand(1, 2, 2).astype('float32'),
np.random.rand(2, 3, 3).astype('float32'),
np.random.rand(3, 4, 4).astype('float32'),
]
net_matrix_power = NetMatrixPower()
for arr in arrs:
for n in range(0, 4):
expect_out = np.linalg.matrix_power(arr, n)
out = net_matrix_power(ms.Tensor(arr), n)
assert np.allclose(out.asnumpy(), expect_out, rtol=1e-4, atol=1e-4)
|
4fe762c2d08121c67f5b7857f0984c30bf6a60b7
|
88dd4380e0d33d4a118ca4e69e4ca9b1c8f45e1f
|
/pyspedas/themis/ground/gmag.py
|
0cc27a3fbc0c6ed7f3c469d349d198c0112bb8a3
|
[
"MIT"
] |
permissive
|
spedas/pyspedas
|
16d34015961e3a4d3eaf8637d3cb6abca95df1b1
|
1d07b148753afa96e148c5835ed9545c507577da
|
refs/heads/master
| 2023-09-01T16:07:47.131334
| 2023-08-25T17:15:35
| 2023-08-25T17:15:35
| 167,614,292
| 125
| 61
|
MIT
| 2023-09-08T18:41:27
| 2019-01-25T21:11:14
|
Python
|
UTF-8
|
Python
| false
| false
| 8,292
|
py
|
gmag.py
|
import logging
import requests
from pyspedas.themis.load import load
def gmag(trange=['2007-03-23', '2007-03-24'],
sites=None,
group=None,
level='l2',
suffix='',
get_support_data=False,
varformat=None,
varnames=[],
downloadonly=False,
notplot=False,
no_update=False,
time_clip=False):
"""
This function loads ground magnetometer data
Parameters:
trange: list of str
time range of interest [starttime, endtime] with the format
'YYYY-MM-DD','YYYY-MM-DD'] or to specify more or less than a day
['YYYY-MM-DD/hh:mm:ss','YYYY-MM-DD/hh:mm:ss']
level: str
Data type; Valid options: 'l1', 'l2'
suffix: str
The tplot variable names will be given this suffix.
By default, no suffix is added.
get_support_data: bool
Data with an attribute "VAR_TYPE" with a value of "support_data"
will be loaded into tplot. By default, only loads in data with a
"VAR_TYPE" attribute of "data".
varformat: str
The file variable formats to load into tplot. Wildcard character
"*" is accepted. By default, all variables are loaded in.
varnames: list of str
List of variable names to load
(if not specified, all data variables are loaded)
downloadonly: bool
Set this flag to download the CDF files, but not load them into
tplot variables
notplot: bool
Return the data in hash tables instead of creating tplot variables
no_update: bool
If set, only load data from your local cache
time_clip: bool
Time clip the variables to exactly the range specified
in the trange keyword
sites: str/list of str
GMAG station names to load (e.g. 'bmls').
group: str
GMAG group of stations (eg. 'epo').
If specified, stations is ignored.
Returns:
List of tplot variables created.
"""
if sites is None:
thm_sites = 'atha chbg ekat fsim fsmi fykn gbay glyn gill inuv kapu '\
'kian kuuj mcgr nrsq pgeo pina rank snap snkq tpas whit '\
'yknf'.split(' ')
tgo_sites = ['nal', 'lyr', 'hop', 'bjn', 'nor', 'sor', 'tro', 'and',
'don', 'rvk', 'sol', 'kar', 'jan', 'jck', 'dob']
dtu_sites = ['atu', 'dmh', 'svs', 'tdc', 'bfe', 'roe', 'thl', 'kuv',
'upn', 'umq', 'gdh', 'stf', 'skt', 'ghb', 'fhb', 'naq',
'amk', 'sco', 'tab', 'sum', 'hov']
ua_sites = ['arct', 'bett', 'cigo', 'eagl', 'fykn', 'gako', 'hlms',
'homr', 'kako', 'pokr', 'trap']
maccs_sites = ['cdrt', 'chbr', 'crvr', 'gjoa', 'iglo', 'nain',
'pang', 'rbay']
usgs_sites = ['bou', 'brw', 'bsl', 'cmo', 'ded', 'frd', 'frn',
'gua', 'hon', 'new', 'shu', 'sit', 'sjg', 'tuc']
atha_sites = ['roth', 'leth', 'redr', 'larg', 'vldr', 'salu', 'akul',
'puvr', 'inuk', 'kjpk', 'radi', 'stfl', 'sept', 'schf']
epo_sites = ['bmls', 'ccnv', 'drby', 'fyts', 'hots', 'loys',
'pgeo', 'pine', 'ptrs', 'rmus', 'swno', 'ukia']
falcon_sites = ['hris', 'kodk', 'lrel', 'pblo', 'stfd', 'wlps']
mcmac_sites = ['amer', 'benn', 'glyn', 'lyfd', 'pcel', 'rich',
'satx', 'wrth']
nrcan_sites = ['blc', 'cbb', 'iqa', 'mea', 'ott', 'stj', 'vic']
step_sites = ['fsj', 'ftn', 'hrp', 'lcl', 'lrg', 'pks', 'whs']
fmi_sites = ['han', 'iva', 'kev', 'kil', 'mas', 'mek', 'muo', 'nur',
'ouj', 'pel', 'ran', 'tar']
aair_sites = ['amd', 'bbg', 'brn', 'dik', 'loz', 'pbk', 'tik', 'viz']
carisma_sites = ['anna', 'back', 'cont', 'daws', 'eski', 'fchp',
'fchu', 'gull', 'isll', 'lgrr', 'mcmu', 'mstk',
'norm', 'osak', 'oxfo', 'pols', 'rabb', 'sach',
'talo', 'thrf', 'vulc', 'weyb', 'wgry']
sites = (thm_sites + tgo_sites + dtu_sites + ua_sites + maccs_sites
+ usgs_sites + atha_sites + epo_sites + falcon_sites
+ mcmac_sites + nrcan_sites + step_sites + fmi_sites
+ aair_sites + carisma_sites)
if group is not None:
sites = eval(group+'_sites')
if not isinstance(sites, list):
sites = [sites]
# check for sites in Greenland
greenland = []
for site in sites:
if check_greenland(site):
greenland.append(True)
else:
greenland.append(False)
return load(instrument='gmag', trange=trange, level=level,
suffix=suffix, get_support_data=get_support_data,
varformat=varformat, varnames=varnames,
downloadonly=downloadonly, notplot=notplot, stations=sites,
greenland=greenland, time_clip=time_clip, no_update=no_update)
gmag_dict = {}
def query_gmags():
""" returns a dictionary of gmag stations and all their metadata
"""
url = 'http://themis.ssl.berkeley.edu/gmag/gmag_json.php'
global gmag_dict
params = dict(
station='',
group=''
)
resp = requests.get(url=url, params=params)
data = resp.json()
gmag_dict = data
return data
def get_group(station_name):
""" returns a list with the groups that station belongs to
"""
global gmag_dict
group_list = []
if gmag_dict == {}:
gmag_dict = query_gmags()
for station in gmag_dict:
if station['ccode'].lower() == station_name:
for key, value in station.items():
if value == 'Y':
if station['ccode'].lower() not in group_list:
group_list.append(key)
return group_list
def gmag_list(group='all'):
""" returns a list of stations
prints a list of "stations: start date - end date"
"""
global gmag_dict
station_list = []
if gmag_dict == {}:
gmag_dict = query_gmags()
for station in gmag_dict:
station_name = station['ccode'].lower()
station_group = get_group(station_name)
if group in ['all', '*', ''] or group in station_group:
station_list.append(station_name)
logging.info(station_name + ": from " + station['day_first'] + " to "
+ station['day_last'])
return station_list
def gmag_groups():
""" returns a dictionary of station groups with a list of stations
prints a list of "group:'stations'"
"""
global gmag_dict
group_dict = {}
if gmag_dict == {}:
gmag_dict = query_gmags()
for station in gmag_dict:
for key, value in station.items():
if value == 'Y':
if key in group_dict:
if station['ccode'].lower() not in group_dict[key]:
group_dict[key].append(station['ccode'].lower())
else:
group_dict[key] = []
group_dict[key].append(station['ccode'].lower())
# print them
for g, s in group_dict.items():
logging.info(g + ":" + ",'".join(s) + "'")
return group_dict
def check_gmag(station_name):
""" returns 1 if station_name is in the gmag list, 0 otherwise"
"""
global gmag_dict
if gmag_dict == {}:
gmag_dict = query_gmags()
for station in gmag_dict:
if station['ccode'].lower() == station_name.lower():
return 1
return 0
def check_greenland(station_name):
""" returns 1 if station_name is in the greenland gmag list, 0 otherwise"
"""
global gmag_dict
if gmag_dict == {}:
gmag_dict = query_gmags()
for station in gmag_dict:
if station['ccode'].lower() == station_name.lower():
if ((station['country'] is not None
and station['country'].lower() == 'greenland')
or (station['greenland'] is not None
and station['greenland'].lower() == 'y')):
return 1
return 0
|
5aed0df2c8aff1dbbbdf9b6274a103456a1d53c3
|
5860c281b7d0b79e4af58edd589971f5317ea68f
|
/src/foremast/awslambda/sns_event/sns_event.py
|
8dc948ff5313a07f7386fd6e9383fd172264df66
|
[
"Apache-2.0"
] |
permissive
|
foremast/foremast
|
b1728043640e19f1d43ac6de98d57cc69da5c530
|
d88001ea0e33fcd09707b81b5c4ed40e5e21fb59
|
refs/heads/master
| 2023-08-13T02:31:04.693625
| 2022-05-25T02:13:46
| 2022-06-20T01:38:21
| 65,238,442
| 151
| 31
|
Apache-2.0
| 2022-06-20T01:38:22
| 2016-08-08T20:52:07
|
Python
|
UTF-8
|
Python
| false
| false
| 2,025
|
py
|
sns_event.py
|
# Foremast - Pipeline Tooling
#
# Copyright 2018 Gogo, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create SNS event for lambda"""
import logging
import boto3
from ...utils import add_lambda_permissions, get_lambda_alias_arn, get_sns_topic_arn
LOG = logging.getLogger(__name__)
def create_sns_event(app_name, env, region, rules):
"""Create SNS lambda event from rules.
Args:
app_name (str): name of the lambda function
env (str): Environment/Account for lambda function
region (str): AWS region of the lambda function
rules (str): Trigger rules from the settings
"""
session = boto3.Session(profile_name=env, region_name=region)
sns_client = session.client('sns')
topic_name = rules.get('topic')
lambda_alias_arn = get_lambda_alias_arn(app=app_name, account=env, region=region)
topic_arn = get_sns_topic_arn(topic_name=topic_name, account=env, region=region)
protocol = 'lambda'
statement_id = '{}_sns_{}'.format(app_name, topic_name)
principal = 'sns.amazonaws.com'
add_lambda_permissions(
function=lambda_alias_arn,
statement_id=statement_id,
action='lambda:InvokeFunction',
principal=principal,
source_arn=topic_arn,
env=env,
region=region)
sns_client.subscribe(TopicArn=topic_arn, Protocol=protocol, Endpoint=lambda_alias_arn)
LOG.debug("SNS Lambda event created")
LOG.info("Created SNS event subscription on topic %s", topic_name)
|
efa9cb630857f2a238bfeb30762c4732347e3b21
|
5624a2063891918855c7832d4f4bab2c3df27a28
|
/examples/03_miscellaneous_examples/plot_large_number_metafeatures.py
|
1b7c3bf1a9ae0c230a83dd7ff80d9106485e0293
|
[
"MIT"
] |
permissive
|
ealcobaca/pymfe
|
52908a9e54d83b431e8aed47b4ea7943e4875b31
|
50131572309dd92cfdf1eceb313be7408f3941b6
|
refs/heads/master
| 2023-05-12T12:47:08.060399
| 2023-01-03T20:04:24
| 2023-01-03T20:04:24
| 158,245,631
| 117
| 33
|
MIT
| 2023-05-02T20:00:34
| 2018-11-19T15:20:11
|
Python
|
UTF-8
|
Python
| false
| false
| 1,080
|
py
|
plot_large_number_metafeatures.py
|
"""
Extracting large number of metafeatures
=======================================
In this example, we will extract all possible metafeatures from the Iris
dataset.
"""
from sklearn.datasets import load_iris
from pymfe.mfe import MFE
# Load a dataset
data = load_iris()
y = data.target
X = data.data
###############################################################################
# Using standard parameters, we will get only a few metafeatures. They are most
# commonly used in the community.
mfe = MFE()
mfe.fit(X, y)
ft = mfe.extract()
print(len(ft[0]))
###############################################################################
# Using the value ``all`` you can extract all available metafeatures. For
# this, set the ``groups`` and ``summary`` with ``all``.
mfe = MFE(groups="all", summary="all")
mfe.fit(X, y)
ft = mfe.extract()
print(len(ft[0]))
###############################################################################
# .. note::
# Be careful when using all the metafeatures because you can bring to
# meta-level the curse of dimensionality.
|
0ecfcf5aa7f8bb52e0e3d5287699a80c76c9a7bf
|
8ac730a23480f812e1cb74d98b5919a65684cd8e
|
/tests/check_glutinit_simplest.py
|
660fcca45179ec322287010a5f05a4085ff12cc8
|
[
"BSD-2-Clause",
"MIT",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-newlib-historical",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mcfletch/pyopengl
|
f0f0c9e54cdbe26f837e1fcbcb2729b0f2f7eb16
|
29b79e8966ba2930a5c44829b02dffc1ca600752
|
refs/heads/master
| 2023-08-17T07:40:52.302215
| 2023-08-08T00:16:08
| 2023-08-08T00:16:08
| 43,402,152
| 276
| 77
|
NOASSERTION
| 2023-08-08T14:19:37
| 2015-09-30T00:23:39
|
Python
|
UTF-8
|
Python
| false
| false
| 201
|
py
|
check_glutinit_simplest.py
|
from OpenGL.GLUT import *
glutInit()
glutInitDisplayMode(GLUT_RGB)
try:
if fgDeinitialize: fgDeinitialize(False)
except NameError as err:
pass # Older PyOpenGL, you may see a seg-fault here...
|
40f9ad949a9455cb7c7ef9a500d4c1e3bda89adb
|
b4afb834fc3a3e2c128b1bf825700031e3df519a
|
/examples/cobalt-preproc/nudging_coef/lib_nudgcoef.py
|
202126ed242cd2a3d61b1c38fc087c101f9e5197
|
[
"BSD-3-Clause"
] |
permissive
|
ESMG/pyroms
|
e4e5e9d70d66907a992846b06d61db31afcd24f3
|
5ea501ef904b01036dd2a0909b7bdc61a56e7eff
|
refs/heads/python3
| 2023-03-19T11:11:09.143443
| 2023-03-10T00:22:13
| 2023-03-10T00:22:13
| 1,012,779
| 102
| 63
|
NOASSERTION
| 2023-03-10T00:23:20
| 2010-10-21T17:22:48
|
Python
|
UTF-8
|
Python
| false
| false
| 9,145
|
py
|
lib_nudgcoef.py
|
import numpy as npy
import netCDF4 as nc
import pyroms
class nudgcoef():
''' A class to write the Nudging coeficient file for ROMS '''
def __init__(self,roms_grid):
''' init an object of the class with the pyroms grid ID '''
self.grd = pyroms.grid.get_ROMS_grid(roms_grid)
return None
def __call__(self,east_dict,west_dict,north_dict,south_dict,tracer_timescales,foutname='./nudging_coef.nc'):
''' call with following dictionaries :
4 boundaries dict + tracer timescales
for example :
east_dict = {'nudge':True,'factor': 1,'width':50,'transition':'linear'}
west_dict = {'nudge':True,'factor': 1,'width':50,'transition':'linear'}
north_dict = {'nudge':True,'factor': 1,'width':50,'transition':'linear'}
south_dict = {'nudge':True,'factor': 1,'width':50,'transition':'linear'}
tracer_timescales = {'M2':30,'M3':30,'temp':30,'salt':30,'tracer':30}
tips:
* nudge = True if open boundary, False otherwise
* factor allows to have different timescales at each boundary
* width is in grid points
* transition shapes how timescale varies spatially
* tracer timescales are in days
'''
self.east_dict = east_dict
self.west_dict = west_dict
self.north_dict = north_dict
self.south_dict = south_dict
self.tra_ts = tracer_timescales
self.foutname = foutname
# create 2d coef
self.nud2 = self._create_nudgcoef_2d()
# create 3d coef
self.nud3 = self._create_nudgcoef_3d()
# write to netcdf
self._write_nc_file()
return None
def _create_nudgcoef_3d(self):
''' expand 2d coef along the vertical '''
# RD: later we could imagine multiplying by
# a vertical profile if needed
ny, nx = self.grd.hgrid.mask_rho.shape
nz = self.grd.vgrid.N
nudgcoef = npy.zeros((nz,ny,nx))
for kz in npy.arange(nz):
nudgcoef[kz,:,:] = self.nud2[:,:]
return nudgcoef
def _create_nudgcoef_2d(self):
''' create the 2d nudging coef from dictionaries '''
ny, nx = self.grd.hgrid.mask_rho.shape
nudgcoef_west = npy.zeros((ny,nx))
nudgcoef_east = npy.zeros((ny,nx))
nudgcoef_north = npy.zeros((ny,nx))
nudgcoef_south = npy.zeros((ny,nx))
nudgcoef = npy.zeros((ny,nx))
mask = self.grd.hgrid.mask_rho
# west boundary
if self.west_dict['nudge'] is True:
fc = self.west_dict['factor']
wd = self.west_dict['width']
tr = self.west_dict['transition']
if tr == 'linear':
for ji in npy.arange(0,wd):
nudgcoef_west[:,ji] = fc * (wd-ji) / float(wd)
elif tr == 'linear_nocoast':
for ji in npy.arange(0,wd):
nudgcoef_west[:,ji] = mask[:,0] * fc * (wd-ji) / float(wd)
else:
print('transition not coded') ; pass
# east boundary
if self.east_dict['nudge'] is True:
fc = self.east_dict['factor']
wd = self.east_dict['width']
tr = self.east_dict['transition']
if tr == 'linear':
for ji in npy.arange(nx-wd,nx):
nudgcoef_east[:,ji] = fc * (wd-nx+ji) / float(wd)
elif tr == 'linear_nocoast':
for ji in npy.arange(nx-wd,nx):
nudgcoef_east[:,ji] = mask[:,-1] * fc * (wd-nx+ji) / float(wd)
else:
print('transition not coded') ; pass
# south boundary
if self.south_dict['nudge'] is True:
fc = self.south_dict['factor']
wd = self.south_dict['width']
tr = self.south_dict['transition']
if tr == 'linear':
for jj in npy.arange(0,wd):
nudgcoef_south[jj,:] = fc * (wd-jj) / float(wd)
if tr == 'linear_nocoast':
for jj in npy.arange(0,wd):
nudgcoef_south[jj,:] = mask[0,:] * fc * (wd-jj) / float(wd)
else:
print('transition not coded') ; pass
# north boundary
if self.north_dict['nudge'] is True:
fc = self.north_dict['factor']
wd = self.north_dict['width']
tr = self.north_dict['transition']
if tr == 'linear':
for jj in npy.arange(ny-wd,ny):
nudgcoef_south[jj,:] = fc * (wd-ny+jj) / float(wd)
if tr == 'linear_nocoast':
for jj in npy.arange(ny-wd,ny):
nudgcoef_south[jj,:] = mask[-1,:] * fc * (wd-ny+jj) / float(wd)
else:
print('transition not coded') ; pass
# create the total coefficient by combining all 4 fields
# the max functions is useful to make nice corners when
# individual field overlap
# maybe not the most efficient but short and readable
for jj in npy.arange(ny):
for ji in npy.arange(nx):
nudgcoef[jj,ji] = max(nudgcoef_west[jj,ji], \
nudgcoef_east[jj,ji],nudgcoef_north[jj,ji],nudgcoef_south[jj,ji])
return nudgcoef
def _write_nc_file(self):
''' writing to netcdf and multiplying by inverse timescales '''
ncfile = self.foutname
fid = nc.Dataset(ncfile, 'w', format='NETCDF3_CLASSIC')
# dimensions
fid.createDimension('xi_rho', npy.size(self.grd.hgrid.mask_rho,1))
fid.createDimension('eta_rho', npy.size(self.grd.hgrid.mask_rho,0))
fid.createDimension('s_rho', self.grd.vgrid.N)
fid.createDimension('s_w', self.grd.vgrid.Np)
fid.description = 'Nudging coefficients for grid' + self.grd.name
# vertical coordinate
fid.createVariable('s_rho', 'f8', ('s_rho'))
fid.variables['s_rho'].long_name = 'S-coordinate at RHO-points'
fid.variables['s_rho'].valid_min = '-1'
fid.variables['s_rho'].valid_max = '0'
fid.variables['s_rho'].field = 's_rho,scalar'
fid.variables['s_rho'][:] = self.grd.vgrid.s_rho
# variables
O_M2_NudgeCoef = fid.createVariable('M2_NudgeCoef', 'f8', ('eta_rho','xi_rho',))
O_M3_NudgeCoef = fid.createVariable('M3_NudgeCoef', 'f8', ('s_rho','eta_rho','xi_rho',))
O_temp_NudgeCoef = fid.createVariable('temp_NudgeCoef', 'f8', ('s_rho','eta_rho','xi_rho',))
O_salt_NudgeCoef = fid.createVariable('salt_NudgeCoef', 'f8', ('s_rho','eta_rho','xi_rho',))
O_tracer_NudgeCoef = fid.createVariable('tracer_NudgeCoef', 'f8', ('s_rho','eta_rho','xi_rho',))
# data
O_M2_NudgeCoef[:,:] = (1./self.tra_ts['M2']) * self.nud2
O_M3_NudgeCoef[:,:,:] = (1./self.tra_ts['M3']) * self.nud3
O_temp_NudgeCoef[:,:,:] = (1./self.tra_ts['temp']) * self.nud3
O_salt_NudgeCoef[:,:,:] = (1./self.tra_ts['salt']) * self.nud3
O_tracer_NudgeCoef[:,:,:] = (1./self.tra_ts['tracer']) * self.nud3
# attributes
O_M2_NudgeCoef.long_name = '2D momentum inverse nudging coefficients'
O_M2_NudgeCoef.units = 'days-1'
O_M2_NudgeCoef.coordinates = 'xi_rho eta_rho'
O_M3_NudgeCoef.long_name = '3D momentum inverse nudging coefficients'
O_M3_NudgeCoef.units = 'days-1'
O_M3_NudgeCoef.coordinates = 'xi_rho eta_rho s_rho'
O_temp_NudgeCoef.long_name = 'temp inverse nudging coefficients'
O_temp_NudgeCoef.units = 'days-1'
O_temp_NudgeCoef.coordinates = 'xi_rho eta_rho s_rho'
O_salt_NudgeCoef.long_name = 'salt inverse nudging coefficients'
O_salt_NudgeCoef.units = 'days-1'
O_salt_NudgeCoef.coordinates = 'xi_rho eta_rho s_rho'
O_tracer_NudgeCoef.long_name = 'generic tracer inverse nudging coefficients'
O_tracer_NudgeCoef.units = 'days-1'
O_tracer_NudgeCoef.coordinates = 'xi_rho eta_rho s_rho'
# close
fid.close()
return None
#----------------------------------------------------------------------------
# example :
ccs1 = nudgcoef('CCS')
east = {'nudge':False,'factor': 1,'width':10,'transition':'linear_nocoast'}
west = {'nudge':True,'factor': 1,'width':10,'transition':'linear_nocoast'}
north = {'nudge':True,'factor': 1,'width':10,'transition':'linear_nocoast'}
south = {'nudge':True,'factor': 1,'width':10,'transition':'linear_nocoast'}
tracer_timescales = {'M2':30,'M3':30,'temp':30,'salt':30,'tracer':30}
#ccs1(east,west,north,south,tracer_timescales)
# strong restoring
east = {'nudge':False,'factor': 1,'width':70,'transition':'linear_nocoast'}
west = {'nudge':True,'factor': 1,'width':70,'transition':'linear_nocoast'}
north = {'nudge':True,'factor': 1,'width':70,'transition':'linear_nocoast'}
south = {'nudge':True,'factor': 1,'width':70,'transition':'linear_nocoast'}
tracer_timescales = {'M2':10,'M3':10,'temp':10,'salt':10,'tracer':10}
ccs1(east,west,north,south,tracer_timescales,foutname='./CCS_nudging_coef_large.nc')
|
1484507347448e53fcdbfcb6d8845f4f4fe36cdb
|
36a094a44450d1353e9dfc8242a54e2bb70bb9b5
|
/tests/repository/metadata/meta_common.py
|
771b7bf039fde983ea0ce9c894ab258dd0822e2e
|
[
"Apache-2.0"
] |
permissive
|
zyfra/ebonite
|
52843ce847a3fd28e4ba8ab64d986dcfb23671c0
|
b01b662c43709d152940f488574d78ff25f89ecf
|
refs/heads/master
| 2022-11-29T21:20:02.358797
| 2020-10-19T12:22:49
| 2020-10-19T12:22:49
| 221,721,146
| 275
| 18
|
Apache-2.0
| 2022-11-21T22:44:02
| 2019-11-14T14:49:47
|
Python
|
UTF-8
|
Python
| false
| false
| 51,808
|
py
|
meta_common.py
|
import datetime
from typing import List
import pytest
from pyjackson.utils import get_class_fields
from ebonite.core.errors import (ExistingEnvironmentError, ExistingImageError, ExistingInstanceError,
ExistingModelError, ExistingPipelineError, ExistingProjectError, ExistingTaskError,
ImageNotInTaskError, ModelNotInTaskError, NonExistingEnvironmentError,
NonExistingImageError, NonExistingInstanceError, NonExistingModelError,
NonExistingPipelineError, NonExistingProjectError, NonExistingTaskError,
PipelineNotInTaskError, TaskNotInProjectError)
from ebonite.core.objects.core import Model, Pipeline, Project, Task
from ebonite.repository.metadata import MetadataRepository
# from tests.ext.sqlalchemy.conftest import sqlalchemy_meta as meta
# from tests.repository.metadata.test_local.conftest import local_meta as meta
# _ = [meta]
def assert_objects_equal_except_fields(o1, o2, *, excepted_fields: List[str] = None):
excepted_fields = excepted_fields or []
excepted_fields = set(excepted_fields)
assert type(o1) == type(o2)
fields1 = [f.name for f in get_class_fields(type(o1))]
fields2 = [f.name for f in get_class_fields(type(o2))]
assert fields1 == fields2
for field1, field2 in zip(fields1, fields2):
assert field1 == field2
v1 = getattr(o1, field1)
v2 = getattr(o2, field2)
if field1 in excepted_fields:
assert v1 != v2
else:
assert v1 == v2
def update_object_fields(o, *, excepted_fields: List[str] = None):
excepted_fields = excepted_fields or []
excepted_fields = set(excepted_fields)
for field in [f.name for f in get_class_fields(type(o))]:
additional_value = 2
if field not in excepted_fields:
v = getattr(o, field)
if isinstance(v, str):
additional_value = str(additional_value)
if isinstance(v, datetime.datetime):
additional_value = datetime.timedelta(additional_value)
if isinstance(v, list):
additional_value = [v[0]]
try:
setattr(o, field, v + additional_value)
except TypeError as e:
raise TypeError(f'Field {field}:', e)
return o
# ################# PROJECT ##########################
def test_create_project(meta: MetadataRepository, project: Project):
project = meta.create_project(project)
assert project is not None
assert project.has_meta_repo
def test_create_project_is_reference(meta: MetadataRepository, project: Project, author, creation_date):
expected_project = meta.create_project(project)
assert expected_project is not None
expected_project.name = "KEK"
actual_project = meta.get_project_by_id(expected_project.id)
assert_objects_equal_except_fields(expected_project, actual_project, excepted_fields=['name'])
def test_create_project_source_is_changed(meta: MetadataRepository, project: Project):
new_project = meta.create_project(project)
assert new_project is project
def test_create_existing_project(meta: MetadataRepository, project: Project):
meta.create_project(project)
with pytest.raises(ExistingProjectError):
meta.create_project(project)
def test_get_projects(meta: MetadataRepository, project: Project):
created_project = meta.create_project(project)
assert meta.get_projects() == [created_project]
def test_get_project_by_name(meta: MetadataRepository, project: Project):
project = meta.create_project(project)
assert project == meta.get_project_by_name(project.name)
assert project.has_meta_repo
def test_get_project_by_id(meta: MetadataRepository, project: Project):
project = meta.create_project(project)
assert project == meta.get_project_by_id(project.id)
assert project.has_meta_repo
def test_get_or_create_project_not_exists(meta: MetadataRepository):
project = meta.get_or_create_project("Test Project")
assert project is not None
assert project.id is not None
assert project.name == "Test Project"
def test_get_or_create_project_exists(meta: MetadataRepository, project: Project):
expected_project = meta.create_project(project)
actual_project = meta.get_or_create_project(expected_project.name)
assert actual_project is not None
assert expected_project.id == actual_project.id
assert expected_project.name == actual_project.name
assert id(expected_project) != id(actual_project)
def test_update_project_with_tasks(meta: MetadataRepository, project: Project, task: Task):
project = meta.create_project(project)
task.project = project
task = meta.create_task(task)
project.add_task(task)
project = update_object_fields(project, excepted_fields=['id', 'tasks'])
task = update_object_fields(task,
excepted_fields=['id', 'models', 'pipelines', 'datasets', 'evaluation_sets', 'metrics',
'project_id'])
updated_project = meta.update_project(project)
assert updated_project is project
assert project.has_meta_repo
assert "Test project2" == updated_project.name
assert project == meta.get_project_by_id(project.id)
assert len(updated_project.tasks) == 1
assert task.id in updated_project.tasks
assert task == updated_project.tasks.get(task.id)
def test_update_project_source_is_changed(meta: MetadataRepository, project: Project):
project = meta.create_project(project)
project.name = "Test project2"
new_project = meta.update_project(project)
assert new_project == project
def test_update_project_is_reference(meta: MetadataRepository, project: Project):
project = meta.create_project(project)
id = project._id
project.name = "Test project2"
expected_project = meta.update_project(project)
assert id == expected_project._id
assert "Test project2" == expected_project.name
expected_project.name = "KEK"
actual_project = meta.get_project_by_id(expected_project.id)
assert_objects_equal_except_fields(expected_project, actual_project, excepted_fields=['name'])
def test_update_not_existing_project(meta: MetadataRepository, project: Project):
with pytest.raises(NonExistingProjectError):
meta.update_project(project)
def test_save_not_existing_project(meta: MetadataRepository, project: Project):
saved_project = meta.save_project(project)
assert saved_project.name == project.name
assert project.name == meta.get_project_by_id(saved_project.id).name
assert project.has_meta_repo
def test_save_existing_project(meta: MetadataRepository, project: Project):
project = meta.create_project(project)
saved_project = meta.save_project(project)
assert saved_project.id == project.id
assert project == meta.get_project_by_id(saved_project.id)
def test_save_updated_existing_project(meta: MetadataRepository, project: Project):
project = meta.create_project(project)
project = update_object_fields(project, excepted_fields=['id', 'tasks'])
saved_project = meta.save_project(project)
assert saved_project == project
assert project == meta.get_project_by_id(saved_project.id)
def test_save_updated_existing_project_with_existing_name(meta: MetadataRepository,
project: Project,
project2: Project):
meta.create_project(project)
project2.name = project.name
with pytest.raises(ExistingProjectError):
meta.save_project(project2)
def test_save_project_is_reference(meta: MetadataRepository, project: Project):
saved_project = meta.save_project(project)
saved_project.name = "KEK"
actual_project = meta.get_project_by_id(saved_project.id)
assert_objects_equal_except_fields(saved_project, actual_project, excepted_fields=['name'])
def test_delete_project(meta: MetadataRepository, project: Project):
project = meta.create_project(project)
meta.delete_project(project)
assert meta.get_project_by_id(project.id) is None
assert not project.has_meta_repo
assert project.id is None
def test_delete_not_existing_project(meta: MetadataRepository, project: Project):
with pytest.raises(NonExistingProjectError):
meta.delete_project(project)
# ################## TASK ##########################
def test_create_task(meta: MetadataRepository, project: Project, task: Task):
task.project_id = meta.create_project(project).id
task = meta.create_task(task)
assert task is not None
assert task.has_meta_repo
project = meta.get_project_by_id(project.id)
assert len(project.tasks) == 1
assert task.id in project.tasks
assert project.tasks[task.id] == task
def test_create_task_without_project(meta: MetadataRepository, task: Task):
with pytest.raises(TaskNotInProjectError):
meta.create_task(task)
def test_create_task_with_unexisting_project(meta: MetadataRepository):
task_with_wrong_project = Task(name='failed_task', project_id=2)
with pytest.raises(NonExistingProjectError):
meta.create_task(task_with_wrong_project)
def test_create_task_source_is_not_changed(meta: MetadataRepository, project: Project, task: Task):
task.project = meta.create_project(project)
new_task = meta.create_task(task)
assert new_task is task
def test_create_task_is_reference(meta: MetadataRepository, project: Project, task: Task):
task.project_id = meta.create_project(project).id
expected_task = meta.create_task(task)
assert expected_task is not None
task.name = "KEK"
actual_task = meta.get_task_by_id(expected_task.id)
assert_objects_equal_except_fields(expected_task, actual_task, excepted_fields=['name'])
def test_create_existing_task(meta: MetadataRepository, project: Project, task: Task, task2: Task):
project_id = meta.create_project(project).id
task.project_id = project_id
task = meta.create_task(task)
assert task is not None
task2.name = task.name
task2.project_id = project_id
with pytest.raises(ExistingTaskError):
meta.create_task(task2)
def test_get_tasks(meta: MetadataRepository, project: Project, task: Task):
created_project = meta.create_project(project)
task.project = created_project
created_task = meta.create_task(task)
assert meta.get_tasks(created_project) == [created_task]
def test_get_task_by_name(meta: MetadataRepository, project: Project, task: Task):
project = meta.create_project(project)
task.project = project
task_new = meta.create_task(task)
assert task_new is not None
assert task_new == meta.get_task_by_name(project, task.name)
assert task.has_meta_repo
def test_get_task_by_id(meta: MetadataRepository, project: Project, task: Task):
project = meta.create_project(project)
task.project_id = project.id
task_new = meta.create_task(task) # Assume that we do not change the task var in the create_task.
assert task_new is not None
assert task_new == meta.get_task_by_id(task_new.id)
assert task.has_meta_repo
def test_get_or_create_task_not_exists(meta: MetadataRepository):
project_name = 'test project'
task = meta.get_or_create_task(project_name, "Test Task")
assert task is not None
assert task.id is not None
assert task.name == "Test Task"
assert task.project_id is not None
project = meta.get_project_by_name(project_name)
assert project.id is not None
assert project.name == project_name
assert len(project.tasks) != 0
assert task.id in project.tasks
assert task.has_meta_repo
def test_get_or_create_task_exists(meta: MetadataRepository, project: Project, task: Task):
task.project = meta.create_project(project)
expected_task = meta.create_task(task)
actual_task = meta.get_or_create_task(project.name, expected_task.name)
assert actual_task is not None
assert expected_task == actual_task
assert id(expected_task) != id(actual_task)
project = meta.get_project_by_name(project.name)
assert expected_task in project.tasks.values()
assert expected_task.project_id == actual_task.project_id
assert actual_task.has_meta_repo
def test_get_or_create_task_project_exists(meta: MetadataRepository, project: Project):
project = meta.create_project(project)
assert project is not None
task = meta.get_or_create_task(project.name, "Test Task")
assert task is not None
assert task.id is not None
assert task.name == "Test Task"
task_project = meta.get_project_by_id(task.project_id)
assert task_project.id == task.project_id
def test_update_task_with_models(meta: MetadataRepository, project: Project, task: Task, model: Model):
task.project = meta.create_project(project)
task = meta.create_task(task)
assert task is not None
id = task.id
model.task = task
model = meta.create_model(model)
task.add_model(model)
task = update_object_fields(task,
excepted_fields=['id', 'models', 'pipelines', 'datasets', 'evaluation_sets', 'metrics',
'project_id'])
model = update_object_fields(model, excepted_fields=['id', 'wrapper', 'artifact', 'requirements',
'wrapper_meta', 'task_id', 'wrapper_obj', 'params',
'evaluations'])
updated_task = meta.update_task(task)
assert id == task.id
assert updated_task is task
assert task == meta.get_task_by_id(task.id)
assert len(task.models) == 1
assert model.id in task.models
assert model == meta.get_model_by_id(model.id)
assert meta.get_model_by_id(model.id).name == 'Test Model2'
assert task.has_meta_repo
def test_update_task_with_pipelines(meta: MetadataRepository, project: Project, task: Task, pipeline: Pipeline):
task.project = meta.create_project(project)
task = meta.create_task(task)
assert task is not None
id = task.id
pipeline.task = task
pipeline = meta.create_pipeline(pipeline)
task.add_pipeline(pipeline)
task = update_object_fields(task, excepted_fields=['id', 'pipelines', 'models', 'images', 'project_id', 'datasets',
'metrics', 'evaluation_sets', 'evaluations'])
pipeline = update_object_fields(pipeline, excepted_fields=['id', 'steps', 'input_data', 'output_data',
'models', 'task_id', 'evaluations'])
updated_task = meta.update_task(task)
assert id == task.id
assert updated_task is task
assert task == meta.get_task_by_id(task.id)
assert len(task.pipelines) == 1
assert pipeline.id in task.pipelines
assert pipeline == meta.get_pipeline_by_id(pipeline.id)
assert meta.get_pipeline_by_id(pipeline.id).name == 'Test Pipeline2'
assert task.has_meta_repo
def test_update_task_with_images(meta: MetadataRepository, project: Project, task: Task, image, environment):
task.project = meta.create_project(project)
task = meta.create_task(task)
assert task is not None
id = task.id
env = meta.create_environment(environment)
image.task = task
image.environment = env
image = meta.create_image(image)
task.add_image(image)
task = update_object_fields(task, excepted_fields=['id', 'pipelines', 'models', 'images', 'project_id', 'datasets',
'evaluation_sets', 'metrics'])
image = update_object_fields(image, excepted_fields=['id', 'params', 'source', 'environment_id', 'task_id'])
updated_task = meta.update_task(task)
assert id == task.id
assert updated_task is task
assert task == meta.get_task_by_id(task.id)
assert len(task.images) == 1
assert image.id in task.images
assert image == meta.get_image_by_id(image.id)
assert meta.get_image_by_id(image.id).name == 'Meta Test Image2'
assert task.has_meta_repo
def test_update_task_source_is_changed(meta: MetadataRepository, project: Project, task: Task, model: Model):
task.project = meta.create_project(project)
saved_task = meta.create_task(task)
assert saved_task is task
id = saved_task.id
model.task = saved_task
model = meta.create_model(model)
saved_task = update_object_fields(saved_task,
excepted_fields=['id', 'models', 'pipelines', 'datasets', 'evaluation_sets',
'metrics', 'project_id'])
saved_task.add_model(model)
saved_task = meta.update_task(saved_task)
assert id == saved_task.id
assert saved_task == meta.get_task_by_id(saved_task.id)
assert model == saved_task.models.get(model.id)
assert task is saved_task
def test_update_task_is_reference(meta: MetadataRepository, project: Project, model: Model):
task_entity = Task("Test Task")
task_entity.project = meta.create_project(project)
task = meta.create_task(task_entity)
assert task is not None
id = task.id
model.task_id = task.id
model = meta.create_model(model)
task.name = "Test Task 2"
task.add_model(model)
task = meta.update_task(task)
assert id == task.id
assert "Test Task 2" == task.name
assert model == task.models.get(model.id)
task.name = "KEK"
actual_task = meta.get_task_by_id(task.id)
assert_objects_equal_except_fields(task, actual_task, excepted_fields=['name'])
def test_update_not_existing_task(meta: MetadataRepository, project: Project, task: Task):
project = meta.create_project(project)
assert project is not None
task.project = project
with pytest.raises(NonExistingTaskError):
meta.update_task(task)
def test_save_not_existing_task(meta: MetadataRepository, project: Project):
project = meta.create_project(project)
task = Task("Task")
task.project = project
saved_task = meta.save_task(task)
assert saved_task.name == task.name
assert saved_task.project_id == task.project_id
assert task.name == meta.get_task_by_id(saved_task.id).name
assert task.has_meta_repo
def test_save_existing_task(meta: MetadataRepository, project: Project):
project = meta.create_project(project)
task = Task("Task")
task.project = project
task = meta.create_task(task)
saved_task = meta.save_task(task)
assert saved_task.id == task.id
assert saved_task.project_id == task.project_id
assert task == meta.get_task_by_id(saved_task.id)
assert task.has_meta_repo
def test_save_updated_existing_task(meta: MetadataRepository, project: Project):
project = meta.create_project(project)
task = Task("Task")
task.project = project
task = meta.create_task(task)
task = update_object_fields(task,
excepted_fields=['id', 'models', 'pipelines', 'datasets', 'evaluation_sets', 'metrics',
'project_id'])
saved_task = meta.save_task(task)
assert saved_task == task
assert task == meta.get_task_by_id(saved_task.id)
def test_save_updated_existing_task_with_existing_name(meta: MetadataRepository, project: Project):
project = meta.create_project(project)
task = Task("Task")
task.project = project
task = meta.create_task(task)
task2 = Task("Task2")
task2.project = project
task2 = meta.create_task(task2)
task.name = "Task2"
with pytest.raises(ExistingTaskError):
meta.save_task(task)
def test_save_task_is_reference(meta: MetadataRepository, project: Project):
project = meta.create_project(project)
task = Task("Task")
task.project = project
saved_task = meta.save_task(task)
saved_task.name = "KEK"
actual_task = meta.get_task_by_id(saved_task.id)
assert_objects_equal_except_fields(saved_task, actual_task, excepted_fields=['name'])
def test_delete_task(meta: MetadataRepository, project: Project, task: Task):
task.project = meta.create_project(project)
task = meta.create_task(task)
assert task is not None
meta.delete_task(task)
assert meta.get_task_by_id(task.id) is None
assert not task.has_meta_repo
assert task.id is None
project = meta.get_project_by_id(project.id)
assert len(project.tasks) == 0
def test_delete_not_existing_task(meta: MetadataRepository, task: Task):
with pytest.raises(NonExistingTaskError):
meta.delete_task(task)
def test_create_model(meta: MetadataRepository, project: Project, task: Task, model: Model):
task.project = meta.create_project(project)
task = meta.create_task(task)
assert task is not None
model.task_id = task.id
model = meta.create_model(model)
assert model is not None
assert model.has_meta_repo
task = meta.get_task_by_id(task.id)
assert len(task.models) == 1
assert model.id in task.models
assert task.models[model.id] == model
def test_create_model_without_task(meta: MetadataRepository, model: Model):
with pytest.raises(ModelNotInTaskError):
meta.create_model(model)
def test_create_model_source_is_changed(meta: MetadataRepository, project: Project, task: Task, model: Model):
task.project = meta.create_project(project)
task = meta.create_task(task)
assert task is not None
model.task_id = task.id
saved_model = meta.create_model(model)
assert saved_model is model
def test_create_model_is_reference(meta: MetadataRepository, project: Project, task: Task, model: Model):
task.project = meta.create_project(project)
task = meta.create_task(task)
assert task is not None
model.task_id = task.id
model = meta.create_model(model)
assert model is not None
model.name = "KEK"
actual_model = meta.get_model_by_id(model.id)
assert_objects_equal_except_fields(model, actual_model, excepted_fields=['name'])
model.task_id = None
actual_model = meta.get_model_by_id(model.id)
assert_objects_equal_except_fields(model, actual_model, excepted_fields=['name', 'task_id'])
def test_create_existing_model(meta: MetadataRepository, project: Project, task: Task, model: Model, model2: Model):
task.project = meta.create_project(project)
task = meta.create_task(task)
assert task is not None
model.task_id = task.id
model = meta.create_model(model)
assert model is not None
model2.task_id = task.id
model2.name = model.name
with pytest.raises(ExistingModelError):
meta.create_model(model2)
def test_create_model_with_unexisting_task(meta: MetadataRepository, model: Model):
model.task_id = 3
with pytest.raises(NonExistingTaskError):
meta.create_model(model)
def test_get_models(meta: MetadataRepository, project: Project, task: Task, model: Model):
task.project = meta.create_project(project)
created_task = meta.create_task(task)
model.task = created_task
created_model = meta.create_model(model)
actual_models = meta.get_models(created_task)
assert actual_models == [created_model]
def test_get_model(meta: MetadataRepository, project: Project, task: Task, model: Model):
task.project = meta.create_project(project)
task = meta.create_task(task)
assert task is not None
model.task_id = task.id
model = meta.create_model(model)
assert model is not None
assert model == meta.get_model_by_name(model.name, model.task_id)
assert model.has_meta_repo
def test_get_model_by_id(meta: MetadataRepository, project: Project, task: Task, model: Model):
task.project = meta.create_project(project)
task = meta.create_task(task)
assert task is not None
model.task_id = task.id
model = meta.create_model(model)
assert model is not None
assert model == meta.get_model_by_id(model.id)
assert model.has_meta_repo
def test_update_model(meta: MetadataRepository, project: Project, task: Task, model: Model):
task.project = meta.create_project(project)
task = meta.create_task(task)
assert task is not None
model.task_id = task.id
model = meta.create_model(model)
assert model is not None
id = model.id
model = update_object_fields(model, excepted_fields=['id', 'wrapper', 'artifact', 'requirements',
'wrapper_meta', 'task_id', 'wrapper_obj', 'params',
'evaluations'])
model = meta.update_model(model)
assert id == model.id
assert model == meta.get_model_by_id(model.id)
assert model.has_meta_repo
def test_update_model_source_is_changed(meta: MetadataRepository, project: Project, task: Task, model: Model):
task.project = meta.create_project(project)
task = meta.create_task(task)
assert task is not None
model.task_id = task.id
saved_model = meta.create_model(model)
assert saved_model is not None
id = saved_model.id
saved_model = update_object_fields(model, excepted_fields=['id', 'wrapper', 'artifact', 'requirements',
'wrapper_meta', 'task_id', 'wrapper_obj', 'params',
'evaluations'])
saved_model = meta.update_model(saved_model)
assert id == saved_model.id
assert model == meta.get_model_by_id(saved_model.id)
assert model is saved_model
def test_update_model_is_reference(meta: MetadataRepository, project: Project, task: Task, model: Model):
task.project = meta.create_project(project)
task = meta.create_task(task)
assert task is not None
model.task_id = task.id
model = meta.create_model(model)
assert model is not None
id = model.id
model.name = "Test Model 2"
model = meta.update_model(model)
assert id == model.id
assert "Test Model 2" == model.name
model.name = "KEK"
actual_model = meta.get_model_by_id(model.id)
assert_objects_equal_except_fields(model, actual_model, excepted_fields=['name'])
def test_update_not_existing_model(meta: MetadataRepository, project: Project, task: Task, model: Model):
task.project = meta.create_project(project)
task = meta.create_task(task)
assert task is not None
model.task_id = task.id
with pytest.raises(NonExistingModelError):
meta.update_model(model)
def test_save_not_existing_model(meta: MetadataRepository, project: Project, task: Task, model: Model):
project = meta.create_project(project)
task.project = project
task = meta.create_task(task)
model.task_id = task.id
saved_model = meta.save_model(model)
assert saved_model.name == model.name
assert saved_model.task_id == model.task_id
assert model.name == meta.get_model_by_id(saved_model.id).name
assert model.has_meta_repo
def test_save_existing_model(meta: MetadataRepository, project: Project, task: Task, model: Model):
project = meta.create_project(project)
task.project = project
task = meta.create_task(task)
model.task_id = task.id
model = meta.create_model(model)
saved_model = meta.save_model(model)
assert saved_model.id == model.id
assert saved_model.task_id == model.task_id
assert model == meta.get_model_by_id(saved_model.id)
def test_save_updated_existing_model(meta: MetadataRepository, project: Project, task: Task, model: Model):
project = meta.create_project(project)
task.project = project
task = meta.create_task(task)
model.task_id = task.id
model = meta.create_model(model)
model = update_object_fields(model, excepted_fields=['id', 'wrapper', 'artifact', 'requirements',
'wrapper_meta', 'task_id', 'wrapper_obj', 'params',
'evaluations'])
saved_model = meta.save_model(model)
assert saved_model == model
assert model == meta.get_model_by_id(saved_model.id)
def test_save_updated_existing_model_with_existing_name(meta: MetadataRepository, project: Project, task: Task,
model: Model, model2: Model):
project = meta.create_project(project)
task.project = project
task = meta.create_task(task)
model.task_id = task.id
model = meta.create_model(model)
model2.task_id = task.id
model2 = meta.create_model(model2)
model.name = model2.name
with pytest.raises(ExistingModelError):
meta.save_model(model)
def test_save_model_is_reference(meta: MetadataRepository, project: Project, task: Task, model: Model):
project = meta.create_project(project)
task.project = project
task = meta.create_task(task)
model.task_id = task.id
saved_model = meta.save_model(model)
saved_model.name = "KEK"
actual_model = meta.get_model_by_id(saved_model.id)
assert_objects_equal_except_fields(saved_model, actual_model, excepted_fields=['name'])
def test_delete_model(meta: MetadataRepository, project: Project, task: Task, model: Model):
task.project = meta.create_project(project)
task = meta.create_task(task)
assert task is not None
model.task_id = task.id
model = meta.create_model(model)
assert model is not None
meta.delete_model(model)
assert meta.get_model_by_id(model.id) is None
assert not model.has_meta_repo
assert model.id is None
task = meta.get_task_by_id(task.id)
assert len(task.models) == 0
def test_delete_not_existing_model(meta: MetadataRepository, model: Model):
with pytest.raises(NonExistingModelError):
meta.delete_model(model)
# __________
def test_create_pipeline(meta: MetadataRepository, project: Project, task: Task, pipeline: Pipeline):
task.project = meta.create_project(project)
task = meta.create_task(task)
assert task is not None
pipeline.task_id = task.id
pipeline = meta.create_pipeline(pipeline)
assert pipeline is not None
assert pipeline.has_meta_repo
task = meta.get_task_by_id(task.id)
assert len(task.pipelines) == 1
assert pipeline.id in task.pipelines
assert task.pipelines[pipeline.id] == pipeline
def test_create_pipeline_without_task(meta: MetadataRepository, pipeline: Pipeline):
with pytest.raises(PipelineNotInTaskError):
meta.create_pipeline(pipeline)
def test_create_pipeline_with_unexisting_task(meta: MetadataRepository, pipeline: Pipeline):
pipeline.task_id = 3
with pytest.raises(NonExistingTaskError):
meta.create_pipeline(pipeline)
def test_create_pipeline_source_is_changed(meta: MetadataRepository, project: Project, task: Task, pipeline: Pipeline):
task.project = meta.create_project(project)
task = meta.create_task(task)
assert task is not None
pipeline.task_id = task.id
saved_pipeline = meta.create_pipeline(pipeline)
assert saved_pipeline is pipeline
def test_create_pipeline_is_reference(meta: MetadataRepository, project: Project, task: Task, pipeline: Pipeline):
task.project = meta.create_project(project)
task = meta.create_task(task)
assert task is not None
pipeline.task_id = task.id
pipeline = meta.create_pipeline(pipeline)
assert pipeline is not None
pipeline.name = "KEK"
actual_pipeline = meta.get_pipeline_by_id(pipeline.id)
assert_objects_equal_except_fields(pipeline, actual_pipeline, excepted_fields=['name'])
pipeline.task_id = None
actual_pipeline = meta.get_pipeline_by_id(pipeline.id)
assert_objects_equal_except_fields(pipeline, actual_pipeline, excepted_fields=['name', 'task_id'])
def test_create_existing_pipeline(meta: MetadataRepository, project: Project, task: Task, pipeline: Pipeline,
pipeline2: Pipeline):
task.project = meta.create_project(project)
task = meta.create_task(task)
assert task is not None
pipeline.task_id = task.id
pipeline = meta.create_pipeline(pipeline)
assert pipeline is not None
pipeline2.task_id = task.id
pipeline2.name = pipeline.name
with pytest.raises(ExistingPipelineError):
meta.create_pipeline(pipeline2)
def test_get_pipelines(meta: MetadataRepository, project: Project, task: Task, pipeline: Pipeline):
task.project = meta.create_project(project)
created_task = meta.create_task(task)
pipeline.task = created_task
created_pipeline = meta.create_pipeline(pipeline)
actual_pipelines = meta.get_pipelines(created_task)
assert actual_pipelines == [created_pipeline]
def test_get_pipeline(meta: MetadataRepository, project: Project, task: Task, pipeline: Pipeline):
task.project = meta.create_project(project)
task = meta.create_task(task)
assert task is not None
pipeline.task_id = task.id
pipeline = meta.create_pipeline(pipeline)
assert pipeline is not None
assert pipeline == meta.get_pipeline_by_name("Test Pipeline", pipeline.task_id)
assert pipeline.has_meta_repo
def test_get_pipeline_by_id(meta: MetadataRepository, project: Project, task: Task, pipeline: Pipeline):
task.project = meta.create_project(project)
task = meta.create_task(task)
assert task is not None
pipeline.task_id = task.id
pipeline = meta.create_pipeline(pipeline)
assert pipeline is not None
assert pipeline == meta.get_pipeline_by_id(pipeline.id)
assert pipeline.has_meta_repo
def test_update_pipeline(meta: MetadataRepository, project: Project, task: Task, pipeline: Pipeline):
task.project = meta.create_project(project)
task = meta.create_task(task)
assert task is not None
pipeline.task_id = task.id
pipeline = meta.create_pipeline(pipeline)
assert pipeline is not None
id = pipeline.id
pipeline = update_object_fields(pipeline,
excepted_fields=['id', 'input_data', 'output_data', 'task_id', 'evaluations'])
pipeline = meta.update_pipeline(pipeline)
assert id == pipeline.id
assert pipeline == meta.get_pipeline_by_id(pipeline.id)
assert pipeline.has_meta_repo
def test_update_pipeline_source_is_changed(meta: MetadataRepository, project: Project, task: Task, pipeline: Pipeline):
task.project = meta.create_project(project)
task = meta.create_task(task)
assert task is not None
pipeline.task_id = task.id
saved_pipeline = meta.create_pipeline(pipeline)
assert saved_pipeline is not None
id = saved_pipeline.id
saved_pipeline = update_object_fields(pipeline,
excepted_fields=['id', 'input_data', 'output_data', 'task_id', 'evaluations'])
saved_pipeline = meta.update_pipeline(saved_pipeline)
assert id == saved_pipeline.id
assert pipeline == meta.get_pipeline_by_id(saved_pipeline.id)
assert pipeline is saved_pipeline
def test_update_pipeline_is_reference(meta: MetadataRepository, project: Project, task: Task, pipeline: Pipeline):
task.project = meta.create_project(project)
task = meta.create_task(task)
assert task is not None
pipeline.task_id = task.id
pipeline = meta.create_pipeline(pipeline)
assert pipeline is not None
id = pipeline.id
pipeline.name = "Test Pipeline 2"
pipeline = meta.update_pipeline(pipeline)
assert id == pipeline.id
assert "Test Pipeline 2" == pipeline.name
pipeline.name = "KEK"
actual_pipeline = meta.get_pipeline_by_id(pipeline.id)
assert_objects_equal_except_fields(pipeline, actual_pipeline, excepted_fields=['name'])
def test_update_not_existing_pipeline(meta: MetadataRepository, project: Project, task: Task, pipeline: Pipeline):
task.project = meta.create_project(project)
task = meta.create_task(task)
assert task is not None
pipeline.task_id = task.id
with pytest.raises(NonExistingPipelineError):
meta.update_pipeline(pipeline)
def test_save_not_existing_pipeline(meta: MetadataRepository, project: Project, task: Task, pipeline: Pipeline):
project = meta.create_project(project)
task.project = project
task = meta.create_task(task)
pipeline.task_id = task.id
saved_pipeline = meta.save_pipeline(pipeline)
assert saved_pipeline.name == pipeline.name
assert saved_pipeline.task_id == pipeline.task_id
assert pipeline.name == meta.get_pipeline_by_id(saved_pipeline.id).name
assert pipeline.has_meta_repo
def test_save_existing_pipeline(meta: MetadataRepository, project: Project, task: Task, pipeline: Pipeline):
project = meta.create_project(project)
task.project = project
task = meta.create_task(task)
pipeline.task_id = task.id
pipeline = meta.create_pipeline(pipeline)
saved_pipeline = meta.save_pipeline(pipeline)
assert saved_pipeline.id == pipeline.id
assert saved_pipeline.task_id == pipeline.task_id
assert pipeline == meta.get_pipeline_by_id(saved_pipeline.id)
def test_save_updated_existing_pipeline(meta: MetadataRepository, project: Project, task: Task, pipeline: Pipeline):
project = meta.create_project(project)
task.project = project
task = meta.create_task(task)
pipeline.task_id = task.id
pipeline = meta.create_pipeline(pipeline)
pipeline = update_object_fields(pipeline,
excepted_fields=['id', 'input_data', 'output_data', 'task_id', 'evaluations'])
saved_pipeline = meta.save_pipeline(pipeline)
assert saved_pipeline == pipeline
assert pipeline == meta.get_pipeline_by_id(saved_pipeline.id)
def test_save_updated_existing_pipeline_with_existing_name(meta: MetadataRepository, project: Project, task: Task,
pipeline: Pipeline, pipeline2: Pipeline):
project = meta.create_project(project)
task.project = project
task = meta.create_task(task)
pipeline.task_id = task.id
pipeline = meta.create_pipeline(pipeline)
pipeline2.task_id = task.id
pipeline2 = meta.create_pipeline(pipeline2)
pipeline.name = pipeline2.name
with pytest.raises(ExistingPipelineError):
meta.save_pipeline(pipeline)
def test_save_pipeline_is_reference(meta: MetadataRepository, project: Project, task: Task, pipeline: Pipeline):
project = meta.create_project(project)
task.project = project
task = meta.create_task(task)
pipeline.task_id = task.id
saved_pipeline = meta.save_pipeline(pipeline)
saved_pipeline.name = "KEK"
actual_pipeline = meta.get_pipeline_by_id(saved_pipeline.id)
assert_objects_equal_except_fields(saved_pipeline, actual_pipeline, excepted_fields=['name'])
def test_delete_pipeline(meta: MetadataRepository, project: Project, task: Task, pipeline: Pipeline):
task.project = meta.create_project(project)
task = meta.create_task(task)
assert task is not None
pipeline.task_id = task.id
pipeline = meta.create_pipeline(pipeline)
assert pipeline is not None
meta.delete_pipeline(pipeline)
assert meta.get_pipeline_by_id(pipeline.id) is None
assert not pipeline.has_meta_repo
assert pipeline.id is None
task = meta.get_task_by_id(task.id)
assert len(task.pipelines) == 0
def test_delete_not_existing_pipeline(meta: MetadataRepository, pipeline: Pipeline):
with pytest.raises(NonExistingPipelineError):
meta.delete_pipeline(pipeline)
# ___________
def test_get_images__empty(meta: MetadataRepository, created_task):
assert meta.get_images(created_task) == []
def test_get_images__full(meta: MetadataRepository, created_task, created_image):
assert meta.get_images(created_task) == [created_image]
def test_get_image_by_name(meta: MetadataRepository, created_task, created_image):
assert meta.get_image_by_name(created_image.name, created_task) == created_image
def test_get_image_by_id(meta: MetadataRepository, created_image):
assert meta.get_image_by_id(created_image.id) == created_image
def test_create_image__ok(meta: MetadataRepository, image, created_image, created_task):
assert image.id is None
assert created_image.id is not None
assert created_image.task_id == created_task.id
assert created_image.name == image.name
assert created_image.params == image.params
task = meta.get_task_by_id(created_image.task_id)
assert len(task.images) == 1
assert created_image.id in task.images
assert task.images[created_image.id] == created_image
def test_create_image__no_task(meta: MetadataRepository, image):
with pytest.raises(ImageNotInTaskError):
meta.create_image(image)
def test_create_image__saved_image(meta: MetadataRepository, created_image):
with pytest.raises(ExistingImageError):
meta.create_image(created_image)
def test_create_image_with_unexisting_task(meta: MetadataRepository, image):
image.task_id = 3
with pytest.raises(NonExistingTaskError):
meta.create_image(image)
def test_update_image__ok(meta: MetadataRepository, created_image):
author = 'hey'
key = 2
assert created_image.author != author
assert created_image.params.key != key
created_image.author = author
created_image.params.key = key
i = meta.update_image(created_image)
assert i.author == author
assert i.params.key == key
def test_update_image__no_task(meta: MetadataRepository, created_image):
created_image.task_id = None
with pytest.raises(ImageNotInTaskError):
meta.create_image(created_image)
def test_update_image__unsaved_image(meta: MetadataRepository, created_task, image):
image.task = created_task
with pytest.raises(NonExistingImageError):
meta.update_image(image)
def test_delete_image__ok(meta: MetadataRepository, created_image):
task = meta.get_task_by_id(created_image.task_id)
meta.delete_image(created_image)
assert created_image.id is None
assert not created_image.has_meta_repo
task = meta.get_task_by_id(task.id)
assert len(task.images) == 0
def test_delete_image__unsaved_image(meta: MetadataRepository, image):
with pytest.raises(NonExistingImageError):
meta.delete_image(image)
def test_save_image_ok_unsaved(meta: MetadataRepository, created_task, created_environment, image):
image.task = created_task
image.environment = created_environment
image = meta.save_image(image)
assert image.id is not None
assert image.task_id is not None
assert image.task == created_task
assert image.has_meta_repo
def test_save_image_ok_saved(meta: MetadataRepository, created_image):
image = meta.save_image(created_image)
assert image.id is not None
assert image.has_meta_repo
def test_save_image__no_model(meta: MetadataRepository, image):
with pytest.raises(ImageNotInTaskError):
meta.save_image(image)
def test_save_image__other_id(meta: MetadataRepository, created_image):
created_image._id = 12345
with pytest.raises(ExistingImageError):
meta.save_image(created_image)
def test_get_environments__empty(meta: MetadataRepository):
assert meta.get_environments() == []
def test_get_environments__full(meta: MetadataRepository, created_environment):
assert meta.get_environments() == [created_environment]
def test_get_environment_by_id__empty(meta: MetadataRepository):
assert meta.get_environment_by_id(12345) is None
def test_get_environment_by_id__full(meta: MetadataRepository, created_environment):
assert meta.get_environment_by_id(created_environment.id) == created_environment
def test_get_environment_by_name__empty(meta: MetadataRepository):
assert meta.get_environment_by_name('qwerty') is None
def test_get_environment_by_name__full(meta: MetadataRepository, created_environment):
assert meta.get_environment_by_name(created_environment.name) == created_environment
def test_create_environment__ok(meta: MetadataRepository, environment, created_environment):
assert environment.id is None
assert not environment.has_meta_repo
assert created_environment.id is not None
assert created_environment.has_meta_repo
assert created_environment.name == environment.name
assert created_environment.params == environment.params
def test_create_environment__saved(meta: MetadataRepository, created_environment):
with pytest.raises(ExistingEnvironmentError):
meta.create_environment(created_environment)
def test_update_environment__ok(meta: MetadataRepository, created_environment):
key = 2
assert created_environment.params.key != key
created_environment.params.key = key
environment = meta.update_environment(created_environment)
assert environment.params.key == key
def test_update_environment__not_existing(meta: MetadataRepository, environment):
with pytest.raises(NonExistingEnvironmentError):
meta.update_environment(environment)
def test_delete_environment__ok(meta: MetadataRepository, created_environment):
assert meta.get_environments() == [created_environment]
meta.delete_environment(created_environment)
assert meta.get_environments() == []
def test_delete_environment__not_existing(meta: MetadataRepository, environment):
with pytest.raises(NonExistingEnvironmentError):
meta.delete_environment(environment)
def test_save_environment__ok_existing(meta: MetadataRepository, created_environment):
key = 2
assert created_environment.params.key != key
created_environment.params.key = key
environment = meta.save_environment(created_environment)
assert environment.params.key == key
def test_save_environment__ok_not_existing(meta: MetadataRepository, environment):
assert environment.id is None
assert not environment.has_meta_repo
created_environment = meta.save_environment(environment)
assert created_environment.id is not None
assert created_environment.has_meta_repo
assert created_environment.name == environment.name
assert created_environment.params == environment.params
def test_save_environment__other_id(meta: MetadataRepository, created_environment):
created_environment._id = 12345
with pytest.raises(ExistingEnvironmentError):
meta.save_environment(created_environment)
def test_get_instances__empty(meta: MetadataRepository, created_image, created_environment):
assert meta.get_instances(created_image, created_environment) == []
def test_get_instances__full(meta: MetadataRepository, created_image, created_environment, created_instance):
assert meta.get_instances(created_image, created_environment) == [created_instance]
def test_get_instances__empty_only_image(meta: MetadataRepository, created_image):
assert meta.get_instances(created_image, None) == []
def test_get_instance__only_image(meta: MetadataRepository, created_image, created_instance):
assert meta.get_instances(created_image, None) == [created_instance]
def test_get_instances__full_only_image(meta: MetadataRepository, created_image, created_instance):
assert meta.get_instances(created_image, None) == [created_instance]
def test_get_instances__empty_only_environment(meta: MetadataRepository, created_environment):
assert meta.get_instances(None, created_environment) == []
def test_get_instance__only_environment(meta: MetadataRepository, created_environment, created_instance):
assert meta.get_instances(None, created_environment) == [created_instance]
def test_get_instances__full_only_environment(meta: MetadataRepository, created_environment, created_instance):
assert meta.get_instances(None, created_environment) == [created_instance]
def test_get_instance_by_name__empty(meta: MetadataRepository, created_image, created_environment):
assert meta.get_instance_by_name('qwerty', created_image, created_environment) is None
def test_get_instance_by_name__full(meta: MetadataRepository, created_image, created_environment, created_instance):
assert meta.get_instance_by_name(created_instance.name, created_image, created_environment) == created_instance
def test_get_instance_by_id__empty(meta: MetadataRepository):
assert meta.get_instance_by_id(12345) is None
def test_get_instance_by_id__full(meta: MetadataRepository, created_instance):
assert meta.get_instance_by_id(created_instance.id) == created_instance
def test_create_instance__ok(meta: MetadataRepository, instance, created_instance):
assert instance.id is None
assert not instance.has_meta_repo
assert created_instance.id is not None
assert created_instance.has_meta_repo
assert created_instance.name == instance.name
assert created_instance.params == instance.params
def test_created_instance__existing(meta: MetadataRepository, created_instance):
with pytest.raises(ExistingInstanceError):
meta.create_instance(created_instance)
def test_update_instance__ok(meta: MetadataRepository, created_instance):
key = 2
assert created_instance.params.key != key
created_instance.params.key = key
instance = meta.update_instance(created_instance)
assert instance.params.key == key
def test_update_instance__not_existing(meta: MetadataRepository, created_instance):
created_instance._id = 12345
with pytest.raises(NonExistingInstanceError):
meta.update_instance(created_instance)
def test_delete_instance__ok(meta: MetadataRepository, created_instance):
image, environment = created_instance.image_id, created_instance.environment_id
assert meta.get_instances(image, environment) == [created_instance]
meta.delete_instance(created_instance)
assert meta.get_instances(image, environment) == []
def test_delete_instance__not_existing(meta: MetadataRepository, instance):
with pytest.raises(NonExistingInstanceError):
meta.delete_instance(instance)
def test_save_instance__ok_existing(meta: MetadataRepository, created_instance):
key = 2
assert created_instance.params.key != key
created_instance.params.key = key
instance = meta.save_instance(created_instance)
assert instance.params.key == key
def test_save_instance__ok_not_existing(meta: MetadataRepository, created_image, created_environment, instance):
assert instance.id is None
assert not instance.has_meta_repo
instance.image = created_image
instance.environment = created_environment
created_instance = meta.save_instance(instance)
assert created_instance.id is not None
assert created_instance.has_meta_repo
assert created_instance.image == created_image
assert created_instance.environment == created_environment
assert created_instance.name == instance.name
assert created_instance.params == instance.params
def test_inner_objects_binded(meta: MetadataRepository, project: Project, task: Task, model: Model):
task.project = meta.create_project(project)
meta.create_task(task)
model.task_id = task.id
model = meta.create_model(model)
new_project = meta.get_project_by_name(project.name)
assert new_project.id is not None
assert new_project.has_meta_repo
new_task = new_project.tasks(task.name)
assert new_task.id is not None
assert new_task.has_meta_repo
new_model = new_task.models(model.name)
assert new_model.id is not None
assert new_model.has_meta_repo
|
8340e7e2e0331539b0e940c390e8781199199fcc
|
c530897cb72b6943c7226b25824444cad5f3503b
|
/usaspending_api/awards/migrations/0100_ctodlinkageupdates.py
|
938aff12cccca1e725fd1385ac25992310545e36
|
[
"CC0-1.0"
] |
permissive
|
fedspendingtransparency/usaspending-api
|
fc63a22d32ea0207b7273d3e1ef26ba9dbabc42a
|
38f920438697930ae3ac57bbcaae9034877d8fb7
|
refs/heads/master
| 2023-09-01T22:00:36.633612
| 2023-08-29T18:39:18
| 2023-08-29T18:39:18
| 65,394,827
| 276
| 118
|
CC0-1.0
| 2023-09-14T20:33:15
| 2016-08-10T15:39:45
|
Python
|
UTF-8
|
Python
| false
| false
| 637
|
py
|
0100_ctodlinkageupdates.py
|
# Generated by Django 3.2.13 on 2023-02-03 19:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('awards', '0099_auto_20230113_1858'),
]
operations = [
migrations.CreateModel(
name='CToDLinkageUpdates',
fields=[
('financial_accounts_by_awards_id', models.IntegerField(primary_key=True, serialize=False)),
('award_id', models.IntegerField()),
],
options={
'db_table': 'c_to_d_linkage_updates',
'managed': True,
},
),
]
|
ef5e3c3e1dd21c1b1f66a6b4adac7fee6b7a47f5
|
1d2466bcd14b2837076d509d26404f96dca1fefe
|
/setup.py
|
2e64b79cc96a2f7f39d45a7e328e8015ccde46cd
|
[
"MIT"
] |
permissive
|
foutaise/texttable
|
637952d8a637f665ebc9b8b34229485253f054fb
|
834a9993a625cc4c6ac04b79cae1299e23474f16
|
refs/heads/master
| 2022-12-02T08:20:46.206256
| 2022-11-23T07:16:22
| 2022-11-23T07:16:22
| 46,265,696
| 350
| 77
|
MIT
| 2022-12-02T11:39:45
| 2015-11-16T09:45:41
|
Python
|
UTF-8
|
Python
| false
| false
| 1,723
|
py
|
setup.py
|
#!/usr/bin/env python
#
# texttable - module to create simple ASCII tables
# Copyright (C) 2003-2022 Gerome Fournier <jef(at)foutaise.org>
from setuptools import setup
DESCRIPTION = "module to create simple ASCII tables"
with open("README.md") as f:
LONG_DESCRIPTION = f.read()
setup(
name="texttable",
version="1.6.7",
author="Gerome Fournier",
author_email="jef@foutaise.org",
url="https://github.com/foutaise/texttable/",
download_url="https://github.com/foutaise/texttable/archive/v1.6.7.tar.gz",
license="MIT",
py_modules=["texttable"],
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
platforms="any",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: MacOS',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing',
'Topic :: Utilities',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
options={"bdist_wheel": {"universal": "1"}}
)
|
1c84fbc7b80ae3434d3776610b082b2ac4c3bf70
|
197a2fbaefb1fa438ca5c90628472f4a1ae0e8a6
|
/csbdeep/data/prepare.py
|
971740e75fba9040748dd046c4c30807da53f03e
|
[
"BSD-3-Clause"
] |
permissive
|
CSBDeep/CSBDeep
|
c66537da84f5a5f8609406c8c46837d170b2b5b0
|
f95b1c5e7da326d21ad7ee50ccd306a98d23f718
|
refs/heads/master
| 2023-07-19T20:30:58.392692
| 2023-07-18T12:45:16
| 2023-07-18T12:45:16
| 136,194,704
| 250
| 94
|
BSD-3-Clause
| 2023-07-18T12:42:15
| 2018-06-05T15:03:32
|
Python
|
UTF-8
|
Python
| false
| false
| 8,066
|
py
|
prepare.py
|
from __future__ import print_function, unicode_literals, absolute_import, division
from six.moves import range, zip, map, reduce, filter
from ..utils import _raise, consume, normalize_mi_ma, axes_dict, axes_check_and_normalize, move_image_axes
import warnings
import numpy as np
from six import add_metaclass
from abc import ABCMeta, abstractmethod, abstractproperty
@add_metaclass(ABCMeta)
class Normalizer():
"""Abstract base class for normalization methods."""
@abstractmethod
def before(self, x, axes):
"""Normalization of the raw input image (method stub).
Parameters
----------
x : :class:`numpy.ndarray`
Raw input image.
axes : str
Axes of input image x
Returns
-------
:class:`numpy.ndarray`
Normalized input image with suitable values for neural network input.
"""
@abstractmethod
def after(self, mean, scale, axes):
"""Possible adjustment of predicted restored image (method stub).
Parameters
----------
mean : :class:`numpy.ndarray`
Predicted restored image or per-pixel ``mean`` of Laplace distributions
for probabilistic model.
scale: :class:`numpy.ndarray` or None
Per-pixel ``scale`` of Laplace distributions for probabilistic model (``None`` otherwise.)
axes : str
Axes of ``mean`` and ``scale``
Returns
-------
:class:`numpy.ndarray`
Adjusted restored image(s).
"""
def __call__(self, x, axes):
"""Alias for :func:`before` to make this callable."""
return self.before(x, axes)
@abstractproperty
def do_after(self):
"""bool : Flag to indicate whether :func:`after` should be called."""
class NoNormalizer(Normalizer):
"""No normalization.
Parameters
----------
do_after : bool
Flag to indicate whether to undo normalization.
Raises
------
ValueError
If :func:`after` is called, but parameter `do_after` was set to ``False`` in the constructor.
"""
def __init__(self, do_after=False):
self._do_after = do_after
def before(self, x, axes):
return x
def after(self, mean, scale, axes):
self.do_after or _raise(ValueError())
return mean, scale
@property
def do_after(self):
return self._do_after
class PercentileNormalizer(Normalizer):
"""Percentile-based image normalization.
Parameters
----------
pmin : float
Low percentile.
pmax : float
High percentile.
do_after : bool
Flag to indicate whether to undo normalization (original data type will not be restored).
dtype : type
Data type after normalization.
kwargs : dict
Keyword arguments for :func:`csbdeep.utils.normalize_mi_ma`.
"""
def __init__(self, pmin=2, pmax=99.8, do_after=True, dtype=np.float32, **kwargs):
"""TODO."""
(np.isscalar(pmin) and np.isscalar(pmax) and 0 <= pmin < pmax <= 100) or _raise(ValueError())
self.pmin = pmin
self.pmax = pmax
self._do_after = do_after
self.dtype = dtype
self.kwargs = kwargs
def before(self, x, axes):
"""Percentile-based normalization of raw input image.
See :func:`csbdeep.predict.Normalizer.before` for parameter descriptions.
Note that percentiles are computed individually for each channel (if present in `axes`).
"""
self.axes_before = axes_check_and_normalize(axes,x.ndim)
axis = tuple(d for d,a in enumerate(self.axes_before) if a != 'C')
self.mi = np.percentile(x,self.pmin,axis=axis,keepdims=True).astype(self.dtype,copy=False)
self.ma = np.percentile(x,self.pmax,axis=axis,keepdims=True).astype(self.dtype,copy=False)
return normalize_mi_ma(x, self.mi, self.ma, dtype=self.dtype, **self.kwargs)
def after(self, mean, scale, axes):
"""Undo percentile-based normalization to map restored image to similar range as input image.
See :func:`csbdeep.predict.Normalizer.after` for parameter descriptions.
Raises
------
ValueError
If parameter `do_after` was set to ``False`` in the constructor.
"""
self.do_after or _raise(ValueError())
self.axes_after = axes_check_and_normalize(axes,mean.ndim)
mi = move_image_axes(self.mi, self.axes_before, self.axes_after, True)
ma = move_image_axes(self.ma, self.axes_before, self.axes_after, True)
alpha = ma - mi
beta = mi
return (
( alpha*mean+beta ).astype(self.dtype,copy=False),
( alpha*scale ).astype(self.dtype,copy=False) if scale is not None else None
)
@property
def do_after(self):
"""``do_after`` parameter from constructor."""
return self._do_after
@add_metaclass(ABCMeta)
class Resizer():
"""Abstract base class for resizing methods."""
@abstractmethod
def before(self, x, axes, axes_div_by):
"""Resizing of the raw input image (method stub).
Parameters
----------
x : :class:`numpy.ndarray`
Raw input image.
axes : str
Axes of input image x
axes_div_by : iterable of int
Resized image must be evenly divisible by the provided values for each axis.
Returns
-------
:class:`numpy.ndarray`
Resized input image.
"""
@abstractmethod
def after(self, x, axes):
"""Resizing of the restored image (method stub).
Parameters
----------
x : :class:`numpy.ndarray`
Restored image.
axes : str
Axes of restored image x
Returns
-------
:class:`numpy.ndarray`
Resized restored image.
"""
class NoResizer(Resizer):
"""No resizing.
Raises
------
ValueError
In :func:`before`, if image resizing is necessary.
"""
def before(self, x, axes, axes_div_by):
axes = axes_check_and_normalize(axes,x.ndim)
consume (
(s%div_n==0) or _raise(ValueError('%d (axis %s) is not divisible by %d.' % (s,a,div_n)))
for a, div_n, s in zip(axes, axes_div_by, x.shape)
)
return x
def after(self, x, axes):
return x
class PadAndCropResizer(Resizer):
"""Resize image by padding and cropping.
If necessary, input image is padded before prediction
and restored image is cropped back to size of input image
after prediction.
Parameters
----------
mode : str
Parameter ``mode`` of :func:`numpy.pad` that
controls how the image is padded.
kwargs : dict
Keyword arguments for :func:`numpy.pad`.
"""
def __init__(self, mode='reflect', **kwargs):
"""TODO."""
self.mode = mode
self.kwargs = kwargs
def before(self, x, axes, axes_div_by):
"""Pad input image.
See :func:`csbdeep.predict.Resizer.before` for parameter descriptions.
"""
axes = axes_check_and_normalize(axes,x.ndim)
def _split(v):
a = v // 2
return a, v-a
self.pad = {
a : _split((div_n-s%div_n)%div_n)
for a, div_n, s in zip(axes, axes_div_by, x.shape)
}
# print(self.pad)
x_pad = np.pad(x, tuple(self.pad[a] for a in axes), mode=self.mode, **self.kwargs)
return x_pad
def after(self, x, axes):
"""Crop restored image to retain size of input image.
See :func:`csbdeep.predict.Resizer.after` for parameter descriptions.
"""
axes = axes_check_and_normalize(axes,x.ndim)
all(a in self.pad for a in axes) or _raise(ValueError())
crop = tuple (
slice(p[0], -p[1] if p[1]>0 else None)
for p in (self.pad[a] for a in axes)
)
# print(crop)
return x[crop]
|
a124a6c5a8d0d34fbb23340f5f18577481e4d5a8
|
635cddc2adfd5195d1b05b120160bbf957c6adb2
|
/archived/earthengine-to-bigquery/download_ee_image_to_gcs.py
|
c6796da17c81fd3c02e19bdcbe3e81fe12a22438
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"CC-BY-4.0"
] |
permissive
|
GoogleCloudPlatform/community
|
984b9d53c59a82d6cd470c0407b3fa45109e6450
|
6f68203dec458268f177e97e2b57f3e085ca3668
|
refs/heads/master
| 2023-08-09T17:29:26.393300
| 2023-07-31T01:30:38
| 2023-07-31T01:30:38
| 50,372,538
| 1,999
| 2,069
|
NOASSERTION
| 2023-07-31T01:30:39
| 2016-01-25T18:46:16
|
Java
|
UTF-8
|
Python
| false
| false
| 1,276
|
py
|
download_ee_image_to_gcs.py
|
import ee
import os
# Initialize the Earth Engine API
ee.Initialize()
# Retrieve the environment variable of the Cloud Storage bucket for storing images
bucket_name = os.environ['IMAGES_BUCKET']
# Specify a region in the US (roughly the state of Colorado) to reduce the export time for the sake of example
colorado = ee.Geometry.Rectangle([-104, 37, -102, 38]);
# Select the first (and only) image from the Cropland image collection for the year 2019, and the cropland band, which gives us the crop type. Currently, Geobeam will only ingest a single band from a GeoTIFF at time.
image = ee.ImageCollection('USDA/NASS/CDL').filter(ee.Filter.date('2019-01-01', '2019-01-02')).first();
cropland = image.select('cropland');
task_config = {
'description': 'cropland',
'crs': 'EPSG:4326', # specify this projection to ensure Biquery can ingest it properly
'scale': 30, # also necessary to specify scale when reprojecting (30m is the original dataset scale)
'bucket': bucket_name,
'fileNamePrefix': 'croplandExport',
'region': colorado,
'maxPixels': 1e12 #increase max pixels limit for exports
}
task = ee.batch.Export.image.toCloudStorage(cropland, **task_config)
task.start()
print('Please wait for 5 minutes for the export to GCS to complete')
|
ce1f7450a1b09ab932022e082716d20cf1003e35
|
7edc26a54f4b71085db5758ee15e87dfc822c372
|
/openelex/api/exceptions.py
|
4c8fb77d2e2891d1d1e8f5cabdd7c1a5e462214e
|
[
"MIT"
] |
permissive
|
openelections/openelections-core
|
55f1b440644588502a6a1a67f8924024a2f1dffb
|
3c516d8c4cf1166b1868b738a248d48f3378c525
|
refs/heads/master
| 2022-02-06T01:24:38.557078
| 2021-04-22T17:53:34
| 2021-04-22T17:53:34
| 11,376,829
| 161
| 99
|
MIT
| 2022-01-21T18:56:12
| 2013-07-12T19:52:57
|
Python
|
UTF-8
|
Python
| false
| false
| 38
|
py
|
exceptions.py
|
class InvalidUrl(Exception):
pass
|
4dba48a3c32a86372e95b503fa0aff933848dde8
|
f80ef3a3cf859b13e8af8433af549b6b1043bf6e
|
/pyobjc-core/PyObjCTest/test_lazy_import.py
|
edb3e982c1da89dbc1abffd564006cd10885ddd4
|
[
"MIT"
] |
permissive
|
ronaldoussoren/pyobjc
|
29dc9ca0af838a56105a9ddd62fb38ec415f0b86
|
77b98382e52818690449111cd2e23cd469b53cf5
|
refs/heads/master
| 2023-09-01T05:15:21.814504
| 2023-06-13T20:00:17
| 2023-06-13T20:00:17
| 243,933,900
| 439
| 49
| null | 2023-06-25T02:49:07
| 2020-02-29T08:43:12
|
Python
|
UTF-8
|
Python
| false
| false
| 33,095
|
py
|
test_lazy_import.py
|
import os
import struct
import sys
import copy
import warnings
import objc
import objc._lazyimport as lazyimport
from PyObjCTest import metadatafunction
from PyObjCTools.TestSupport import TestCase
from PyObjCTest.test_deprecations import deprecation_warnings
def lookupClasses(*names):
result = []
for nm in names:
try:
result.append(objc.lookUpClass(nm))
except objc.nosuchclass_error:
pass
return tuple(result)
class TestLazyImport(TestCase):
def test_exports(self):
self.assertIs(objc.ObjCLazyModule, lazyimport.ObjCLazyModule)
self.assertTrue(issubclass(objc.ObjCLazyModule, type(objc)))
def test_getattr_map(self):
o = lazyimport.GetAttrMap(sys)
self.assertEqual(o["path"], sys.path)
self.assertEqual(o["version"], sys.version)
with self.assertRaisesRegex(KeyError, "nosuchkey"):
o["nosuchkey"]
v = o["CFSTR"]
self.assertEqual(v(b"hello"), "hello")
def test_load_bundle(self):
NSBundle = objc.lookUpClass("NSBundle")
# _objc is linked with Foundation, hence should be able to load without
# providing a valid path
o = lazyimport._loadBundle("Foundation", "com.apple.Foundation", "/dev/null")
self.assertIsInstance(o, NSBundle)
self.assertEqual(o.bundleIdentifier(), "com.apple.Foundation")
self.assertTrue(o.isLoaded())
# Load using path
o = lazyimport._loadBundle(
"AppKit", None, "/System/Library/Frameworks/AppKit.framework"
)
o.load()
self.assertEqual(o.bundleIdentifier(), "com.apple.AppKit")
self.assertTrue(o.isLoaded())
# Should not be loaded yet, hence fallback from identifier to path
o = lazyimport._loadBundle(
"PreferencePanes",
"com.apple.frameworks.preferencepanes",
"/System/Library/Frameworks/PreferencePanes.framework",
)
o.load()
self.assertEqual(o.bundleIdentifier(), "com.apple.frameworks.preferencepanes")
self.assertTrue(o.isLoaded())
def test_all_types_without_all(self):
self.do_test_all_types(dunder_all=False)
def test_all_types_with_all(self):
self.do_test_all_types(dunder_all=True)
def do_test_all_types(self, dunder_all):
metadict = {
"nometadata": 42, # Ignored...
"protocols": {
"NSMachPortDelegateMethods": objc.informal_protocol(
"NSMachPortDelegateMethods",
[
objc.selector(
None, b"handleMachMessage:", b"v@:^v", isRequired=False
)
],
)
},
"constants": "$NSWorkspaceMoveOperation$NSWorkspaceCopyOperation@@$",
"constants_dict": {
"NSWorkspaceLinkOperation": "@",
"NSWindowWillCloseNotification": "@",
"NSUnderlineByWordMask": objc._C_NSUInteger.decode("ascii"),
},
"enums": "$NSAWTEventType@16$NSAboveBottom@4$NSAboveTop@1$",
"functions": {
"NSRectClipList": (
b"v^{CGRect={CGPoint=dd}{CGSize=dd}}q",
"",
{
"arguments": {
0: {"c_array_length_in_arg": 1, "type_modifier": b"n"}
}
},
),
"FunctionThatDoesNotExist": (
b"v^{CGRect={CGPoint=dd}{CGSize=dd}}q",
"",
{},
),
"NSAccessibilityActionDescription": (b"@@", "", {}),
},
"aliases": {"doc_string": "__doc__", "invalid_alias": "does_not_exist"},
"expressions": {
"mysum": "NSAWTEventType + NSAboveBottom + 3",
"invalid_expression1": "no_such_name + 1",
"invalid_expression2": 'NSAboveBottom + "b"',
},
}
initial_dict = {"__doc__": "AppKit test module"}
mod = objc.ObjCLazyModule(
"AppKit",
None,
"/System/Library/Frameworks/AppKit.framework",
metadict,
None,
initial_dict,
(),
)
self.assertIsInstance(mod, objc.ObjCLazyModule)
with self.assertRaisesRegex(AttributeError, "Foo"):
mod.Foo
with self.assertRaisesRegex(AttributeError, "Foo"):
mod.Foo
with self.assertRaisesRegex(AttributeError, "42"):
getattr(mod, "42")
if dunder_all:
# Force precalculation of all attributes by accessing the __all__
# attribute
self.assertEqual(set(dir(mod)), set(mod.__all__))
self.assertEqual(mod.__doc__, initial_dict["__doc__"])
self.assertEqual(mod.doc_string, initial_dict["__doc__"])
with self.assertRaisesRegex(AttributeError, "invalid_alias"):
mod.invalid_alias
self.assertIsInstance(mod.NSWorkspaceMoveOperation, objc.pyobjc_unicode)
self.assertTrue(
(mod.NSWorkspaceMoveOperation.nsstring().__flags__ & 0x10) == 0x00
)
self.assertIsInstance(mod.NSWorkspaceCopyOperation, objc.pyobjc_unicode)
self.assertIsInstance(mod.NSWorkspaceLinkOperation, objc.pyobjc_unicode)
self.assertIsInstance(mod.NSUnderlineByWordMask, int)
self.assertEqual(mod.NSAWTEventType, 16)
self.assertEqual(mod.NSAboveBottom, 4)
self.assertEqual(mod.NSAboveTop, 1)
self.assertIsInstance(mod.NSRectClipList, objc.function)
self.assertEqual(mod.NSRectClipList.__name__, "NSRectClipList")
self.assertArgSizeInArg(mod.NSRectClipList, 0, 1)
with self.assertRaisesRegex(AttributeError, "FunctionThatDoesNotExist"):
mod.FunctionThatDoesNotExist
self.assertEqual(mod.mysum, mod.NSAWTEventType + mod.NSAboveBottom + 3)
with self.assertRaisesRegex(AttributeError, "invalid_expression1"):
mod.invalid_expression1
with self.assertRaisesRegex(AttributeError, "invalid_expression2"):
mod.invalid_expression2
self.assertIs(mod.NSURL, objc.lookUpClass("NSURL"))
with self.assertRaisesRegex(AttributeError, "NSNonExistingClass"):
mod.NSNonExistingClass
mod.NSAccessibilityActionDescription = 99
mod.NSWindowWillCloseNotification = 100
self.assertEqual(set(dir(mod)), set(mod.__all__))
self.assertIn("NSRectClipList", mod.__dict__)
self.assertIn("NSRectClipList", mod.__all__)
self.assertIn("NSAccessibilityActionDescription", mod.__all__)
self.assertEqual(mod.NSAccessibilityActionDescription, 99)
self.assertIn("mysum", mod.__all__)
self.assertIn("NSWorkspaceMoveOperation", mod.__all__)
self.assertIn("NSWindowWillCloseNotification", mod.__all__)
self.assertEqual(mod.NSWindowWillCloseNotification, 100)
self.assertNotIn("__doc__", mod.__all__)
self.assertIn(
"NSMachPortDelegateMethods", mod._ObjCLazyModule__informal_protocols
)
def test_nameless_enum_label(self):
# XXX: This tests a workaround for a bug in libdispatch, to
# be removed later.
initial_dict = {
"__doc__": "rootless test module",
"__spec__": object(),
"__loader__": object(),
}
metadict = {
"enums": "$$NSAWTEventType@16$$@4@",
"constants": "$$",
}
mod = objc.ObjCLazyModule(
"RootLess", None, None, metadict, None, initial_dict, ()
)
self.assertIn("NSAWTEventType", mod.__all__)
self.assertNotIn("", mod.__all__)
def test_without_framework(self):
initial_dict = {
"__doc__": "rootless test module",
"__spec__": object(),
"__loader__": object(),
}
metadict = {
"constants": "$AEAssessmentErrorDomain$",
"constants_dict": {"ITLibMediaEntityPropertyPersistentID": "@"},
"enums": "$NSAWTEventType@16$NSAboveBottom@4$NSAboveTop@1$",
"functions": {
"ABPersonSetImageData": (
objc._C_BOOL + objc._C_ID + objc._C_ID,
"",
{},
),
"MTLTextureSwizzleChannelsMake": (
b"{_MTLTextureSwizzleChannels=CCCC}CCCC",
),
},
"aliases": {"doc_string": "__doc__"},
"expressions": {"mysum": "NSAWTEventType + NSAboveBottom + 3"},
}
mod = objc.ObjCLazyModule(
"RootLess", None, None, metadict, None, initial_dict, ()
)
self.assertEqual(mod.__doc__, "rootless test module")
self.assertEqual(mod.__doc__, mod.doc_string)
self.assertIs(mod.__spec__, initial_dict["__spec__"])
self.assertIs(mod.__loader__, initial_dict["__loader__"])
self.assertEqual(mod.NSAboveBottom, 4)
self.assertEqual(mod.mysum, mod.NSAWTEventType + mod.NSAboveBottom + 3)
with self.assertRaisesRegex(AttributeError, "MTLTextureSwizzleChannelsMake"):
mod.MTLTextureSwizzleChannelsMake
with self.assertRaisesRegex(AttributeError, "AEAssessmentErrorDomain"):
mod.AEAssessmentErrorDomain
with self.assertRaisesRegex(
AttributeError, "ITLibMediaEntityPropertyPersistentID"
):
mod.ITLibMediaEntityPropertyPersistentID
def test_function_wont_override_existing(self):
metadict = {
"functions": {
"CFAllocatorGetTypeID": (objc._C_NSUInteger, ""),
"CFArrayGetTypeID": (objc._C_NSUInteger, ""),
},
}
mod = objc.ObjCLazyModule(
"AppKit",
None,
"/System/Library/Frameworks/CoreFoundation.framework",
metadict,
None,
{"CFAllocatorGetTypeID": 42},
(),
)
self.assertEqual(mod.CFAllocatorGetTypeID, 42)
self.assertIn("CFAllocatorGetTypeID", mod.__all__)
self.assertIn("CFArrayGetTypeID", mod.__all__)
self.assertEqual(mod.CFAllocatorGetTypeID, 42)
def test_inline_function_wont_override_existing(self):
metadict = {
"functions": {
"makeArrayWithFormat_": (
b"@@",
"",
{"variadic": True, "arguments": {0: {"printf_format": True}}},
),
"makeArrayWithCFormat_": (
b"@*",
"",
{"variadic": True, "arguments": {0: {"printf_format": True}}},
),
}
}
inline_list = metadatafunction.function_list
mod = objc.ObjCLazyModule(
"MyFramework",
None,
None,
metadict,
inline_list,
{
"makeArrayWithFormat_": 42,
},
(),
)
self.assertEqual(mod.makeArrayWithFormat_, 42)
self.assertIn("makeArrayWithFormat_", mod.__all__)
self.assertIn("makeArrayWithCFormat_", mod.__all__)
self.assertEqual(mod.makeArrayWithFormat_, 42)
def test_with_parents(self):
mod = objc.ObjCLazyModule("RootLess", None, None, None, None, None, (sys, os))
self.assertEqual(mod.path, sys.path)
self.assertIn("path", mod.__dict__)
self.assertEqual(mod.unlink, os.unlink)
self.assertIn("unlink", mod.__dict__)
mod.__dict__["version_info"] = 42
self.assertEqual(mod.version_info, 42)
self.assertIn("walk", mod.__all__)
self.assertIn("version", mod.__all__)
self.assertNotIn("__doc__", mod.__all__)
def test_all_clearing(self):
metadict = {"enums": "$NSAWTEventType@16$NSAboveBottom@4$NSAboveTop@1$"}
initial_dict = {"__doc__": "AppKit test module"}
mod = objc.ObjCLazyModule(
"AppKit",
None,
"/System/Library/Frameworks/AppKit.framework",
metadict,
None,
initial_dict,
(sys,),
)
self.assertIsInstance(mod, objc.ObjCLazyModule)
mod.__all__ = 42
self.assertIs(mod.path, sys.path)
self.assertNotIn("__all__", mod.__dict__)
mod.__all__ = 42
self.assertEqual(mod.NSAWTEventType, 16)
self.assertNotIn("__all__", mod.__dict__)
mod.__all__ = 42
self.assertIs(mod.NSObject, objc.lookUpClass("NSObject"))
self.assertNotIn("__all__", mod.__dict__)
self.assertTrue("NSAWTEventType" in mod.__all__)
self.assertTrue("NSAboveBottom" in mod.__all__)
def test_enum_formats(self):
metadict = {"enums": "$intval@16$floatval@4.5$charval@'1234'$floatval2@1e3$"}
initial_dict = {"__doc__": "AppKit test module"}
mod = objc.ObjCLazyModule(
"AppKit",
None,
"/System/Library/Frameworks/AppKit.framework",
metadict,
None,
initial_dict,
(),
)
self.assertIsInstance(mod, objc.ObjCLazyModule)
self.assertEqual(mod.intval, 16)
self.assertEqual(mod.floatval, 4.5)
self.assertEqual(mod.charval, struct.unpack(">l", b"1234")[0])
self.assertEqual(mod.floatval2, 1.0e3)
def test_magic_aliases(self):
metadict = {
"aliases": {
"umax": "ULONG_MAX",
"max": "LONG_MAX",
"min": "LONG_MIN",
"dblmx": "DBL_MAX",
"dblmn": "DBL_MIN",
"dbleps": "DBL_EPSILON",
"fltmx": "FLT_MAX",
"fltmn": "FLT_MIN",
"null": "objc.NULL",
"umx": "UINT32_MAX",
}
}
initial_dict = {"__doc__": "AppKit test module"}
mod = objc.ObjCLazyModule(
"AppKit",
None,
"/System/Library/Frameworks/AppKit.framework",
metadict,
None,
initial_dict,
(),
)
self.assertIsInstance(mod, objc.ObjCLazyModule)
self.assertEqual(mod.umax, 2**64 - 1)
self.assertEqual(mod.max, sys.maxsize)
self.assertEqual(mod.min, -sys.maxsize - 1)
self.assertEqual(mod.dblmx, sys.float_info.max)
self.assertEqual(mod.dblmn, sys.float_info.min)
self.assertEqual(mod.dbleps, sys.float_info.epsilon)
self.assertEqual(mod.fltmx, objc._FLT_MAX)
self.assertEqual(mod.fltmn, objc._FLT_MIN)
self.assertEqual(mod.null, objc.NULL)
self.assertEqual(mod.umx, 2**32 - 1)
def test_existing_submodules(self):
try:
sys.modules["MyFramework.submodule"] = 42
sys.modules["MyFramework.submodule.x"] = 99
sys.modules["MyFramework.submodule2"] = 1
sys.modules["MyFramework.submodule3"] = None
mod = objc.ObjCLazyModule("MyFramework", None, None, {}, None, {}, ())
self.assertIsInstance(mod, objc.ObjCLazyModule)
self.assertEqual(mod.submodule, 42)
self.assertEqual(mod.submodule2, 1)
with self.assertRaisesRegex(KeyError, "submodule3"):
mod.__dict__["submodule3"]
with self.assertRaisesRegex(KeyError, "x"):
mod.__dict__["submodule.x"]
finally:
for nm in (
"MyFramework.submodule",
"MyFramework.submodule.x",
"MyFramework.submodule2",
):
if nm in sys.modules:
del sys.modules[nm]
def test_inline_list(self):
# Use inlinetab from PyObjCTest.metadatafunction extension
# -> Also check that '__all__' processing loads inline functions!
metadict = {
"functions": {
"makeArrayWithFormat_": (
b"@@",
"",
{"variadic": True, "arguments": {0: {"printf_format": True}}},
),
"makeArrayWithCFormat_": (
b"@*",
"",
{"variadic": True, "arguments": {0: {"printf_format": True}}},
),
"make4Tuple_": (
b"@^d",
"",
{
"arguments": {
0: {
"type_modifier": objc._C_IN,
"c_array_of_fixed_length": 4,
"null_accepted": False,
}
}
},
),
"NoSuchFunction": (b"@d", "", {}),
}
}
inline_list = metadatafunction.function_list
mod = objc.ObjCLazyModule(
"MyFramework", None, None, metadict, inline_list, {}, ()
)
self.assertIsInstance(mod, objc.ObjCLazyModule)
self.assertIsInstance(mod.makeArrayWithFormat_, objc.function)
v = mod.makeArrayWithFormat_("%3d", 10)
self.assertEqual(list(v), ["%3d", " 10"])
with self.assertRaisesRegex(AttributeError, "NoSuchFunction"):
mod.NoSuchFunction
mod.make4Tuple_ = 42
self.assertIn("makeArrayWithFormat_", mod.__all__)
self.assertIn("makeArrayWithCFormat_", mod.__all__)
self.assertEqual(mod.make4Tuple_, 42)
def test_inline_list__all__(self):
# Check __all__ handling for inline functions
metadict = {
"functions": {
"makeArrayWithFormat_": (
b"@@",
"",
{"variadic": True, "arguments": {0: {"printf_format": True}}},
),
"makeArrayWithCFormat_": (
b"@*",
"",
{"variadic": True, "arguments": {0: {"printf_format": True}}},
),
"make4Tuple_": (
b"@^d",
"",
{
"arguments": {
0: {
"type_modifier": objc._C_IN,
"c_array_of_fixed_length": 4,
"null_accepted": False,
}
}
},
),
"NoSuchFunction": (b"@d", "", {}),
}
}
inline_list = metadatafunction.function_list
mod = objc.ObjCLazyModule(
"MyFramework", None, None, metadict, inline_list, {}, ()
)
self.assertIsInstance(mod, objc.ObjCLazyModule)
mod.__dict__["makeArrayWithFormat_"] = 42
mod.__dict__["make4Tuple_"] = 43
self.assertIn("makeArrayWithFormat_", mod.__all__)
self.assertIn("makeArrayWithCFormat_", mod.__all__)
self.assertIn("make4Tuple_", mod.__all__)
self.assertEqual(mod.makeArrayWithFormat_, 42)
self.assertEqual(mod.make4Tuple_, 43)
self.assertIsInstance(mod.makeArrayWithCFormat_, objc.function)
def test_cftype(self):
# XXX: Need test for a magic cookie constant for a type that is unknown to the bridge
metadict = {
"cftypes": [
("CFAllocatorRef", b"^{__CFAllocator=}", "CFAllocatorGetTypeID", None),
(
"CFArrayRef",
b"^{__CFArray=}",
"CFArrayGetTypeID",
"DoesNotExist,NSArray",
),
(
"CFAttributedStringRef",
b"^{__CFAttributedString=}",
"CFAttributedStringGetTypeID",
"__NSCFAttributedString,NSCFAttributedString",
),
("CFBagRef", b"^{__CFBag=}", "xCFBagGetTypeID", None),
("CFNoType", b"^{__CFNoType", "CFNoTypeGetTypeID", "DoesNotExist"),
],
"functions": {
"CFAllocatorGetTypeID": (objc._C_NSUInteger, ""),
"CFArrayGetTypeID": (objc._C_NSUInteger, ""),
},
"constants": "$kCFAllocatorDefault@=^{__CFAllocator=}$"
"kCFAllocatorMalloc@=^{__CFAllocator=}$kCFAllocatorMissing@=^{__CFAllocator=}$", # noqa: B950
"constants_dict": {
"kCFAllocatorSystemDefault": "=^{__CFAllocator=}",
"kCFAllocatorMallocZone": "=^{__CFAllocator=}",
"kCFAllocatorMissingZone": "=^{__CFAllocator=}",
"kCFAllocatorMissingOtherZone": "=^{__CFAllocator=}",
},
}
mod = objc.ObjCLazyModule(
"AppKit",
None,
"/System/Library/Frameworks/CoreFoundation.framework",
metadict,
None,
{},
(),
)
# Ensure that all types are loaded:
self.assertIn("CFAllocatorRef", mod.__dict__)
self.assertIn("CFArrayRef", mod.__dict__)
self.assertIn("CFAttributedStringRef", mod.__dict__)
self.assertIn("CFBagRef", mod.__dict__)
self.assertNotIn("CFNoType", mod.__dict__)
# Type validation:
self.assertIn("NSCFType", mod.CFAllocatorRef.__bases__[0].__name__)
self.assertIs(mod.CFArrayRef, objc.lookUpClass("NSArray"))
self.assertIn(
mod.CFAttributedStringRef,
lookupClasses("NSCFAttributedString", "__NSCFAttributedString"),
)
self.assertIn("NSCFType", mod.CFBagRef.__name__)
self.assertIsNot(mod.CFBagRef, mod.CFAllocatorRef)
# Tests for 'magic cookie' constants:
self.assertIsInstance(mod.kCFAllocatorDefault, objc.objc_object)
self.assertTrue((mod.kCFAllocatorDefault.__flags__ & 0x10) == 0x10)
self.assertIsInstance(mod.kCFAllocatorDefault, mod.CFAllocatorRef)
self.assertIsInstance(repr(mod.kCFAllocatorDefault), str)
self.assertIn("magic instance", repr(mod.kCFAllocatorDefault))
# XXX: These need to be in a different test file
self.assertTrue(getattr(mod.kCFAllocatorDefault, "__is_magic")()) # noqa: B009
self.assertTrue(mod.kCFAllocatorDefault == mod.kCFAllocatorDefault)
self.assertFalse(mod.kCFAllocatorDefault != mod.kCFAllocatorDefault)
self.assertTrue(mod.kCFAllocatorDefault != mod.CFBagRef)
self.assertFalse(mod.kCFAllocatorDefault == mod.CFBagRef)
with self.assertRaisesRegex(
TypeError,
"'<' not supported between instances of 'CFAllocatorRef' and 'CFAllocatorRef'",
):
mod.kCFAllocatorDefault < mod.kCFAllocatorDefault # noqa: B015
with self.assertRaisesRegex(
TypeError,
"'<=' not supported between instances of 'CFAllocatorRef' and 'CFAllocatorRef'",
):
mod.kCFAllocatorDefault <= mod.kCFAllocatorDefault # noqa: B015
with self.assertRaisesRegex(
TypeError,
"'>' not supported between instances of 'CFAllocatorRef' and 'CFAllocatorRef'",
):
mod.kCFAllocatorDefault > mod.kCFAllocatorDefault # noqa: B015
with self.assertRaisesRegex(
TypeError,
"'>=' not supported between instances of 'CFAllocatorRef' and 'CFAllocatorRef'",
):
mod.kCFAllocatorDefault >= mod.kCFAllocatorDefault # noqa: B015
self.assertIsInstance(mod.kCFAllocatorSystemDefault, objc.objc_object)
self.assertTrue((mod.kCFAllocatorSystemDefault.__flags__ & 0x10) == 0x10)
self.assertIsInstance(mod.kCFAllocatorSystemDefault, mod.CFAllocatorRef)
with self.assertRaisesRegex(AttributeError, "kCFAllocatorMissing"):
mod.kCFAllocatorMissing
with self.assertRaisesRegex(AttributeError, "kCFAllocatorMissingZone"):
mod.kCFAllocatorMissingZone
self.assertIn("kCFAllocatorDefault", mod.__all__)
self.assertIn("kCFAllocatorSystemDefault", mod.__all__)
self.assertIn("kCFAllocatorMallocZone", mod.__all__)
self.assertIn("kCFAllocatorMalloc", mod.__all__)
with self.assertRaisesRegex(AttributeError, "kCFAllocatorOtherMissingZone"):
mod.kCFAllocatorOtherMissingZone
self.assertIsInstance(mod.kCFAllocatorMalloc, objc.objc_object)
self.assertTrue((mod.kCFAllocatorMalloc.__flags__ & 0x10) == 0x10)
self.assertIsInstance(mod.kCFAllocatorMalloc, mod.CFAllocatorRef)
self.assertIsInstance(mod.kCFAllocatorMallocZone, objc.objc_object)
self.assertTrue((mod.kCFAllocatorMallocZone.__flags__ & 0x10) == 0x10)
self.assertIsInstance(mod.kCFAllocatorMallocZone, mod.CFAllocatorRef)
def test_magic_objc_does_not_work(self):
metadict = {
"constants_dict": {
"kCFURLUbiquitousItemDownloadingStatusDownloaded": "=@",
},
}
mod = objc.ObjCLazyModule(
"AppKit",
None,
"/System/Library/Frameworks/CoreFoundation.framework",
metadict,
None,
{},
(),
)
with self.assertRaisesRegex(
ValueError,
"Don't know CF type for typestr '@', cannot create special wrapper",
):
mod.kCFURLUbiquitousItemDownloadingStatusDownloaded
def assertDeprecationWarning(self, func):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
func()
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, objc.ApiDeprecationWarning))
def assertNoDeprecationWarning(self, func):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
func()
self.assertEqual(len(w), 0)
def test_deprecations(self):
metadict = {
"constants": "$NSWorkspaceMoveOperation$NSWorkspaceCopyOperation@@$",
"constants_dict": {
"NSWorkspaceLinkOperation": "@",
"NSWindowWillCloseNotification": "@",
"NSUnderlineByWordMask": objc._C_NSUInteger.decode(),
},
"enums": "$NSAWTEventType@16$NSAboveBottom@4$NSAboveTop@1$",
"aliases": {"min": "LONG_MIN", "max": "LONG_MAX"},
"deprecated_aliases": {
"min": 1004,
"max": 1008,
},
"deprecated_constants": {
"NSWorkspaceLinkOperation": 1004,
"NSWorkspaceMoveOperation": 1008,
},
"deprecated_enums": {
"NSAWTEventType": 1004,
"NSAboveBottom": 1008,
},
}
def make_mod():
initial_dict = {"__doc__": "AppKit test module"}
return objc.ObjCLazyModule(
"AppKit",
None,
"/System/Library/Frameworks/AppKit.framework",
copy.deepcopy(metadict),
None,
initial_dict,
(),
)
with deprecation_warnings(1003):
mod = make_mod()
self.assertIsInstance(mod, objc.ObjCLazyModule)
self.assertNoDeprecationWarning(lambda: mod.NSWorkspaceLinkOperation)
self.assertNoDeprecationWarning(lambda: mod.NSWorkspaceMoveOperation)
self.assertNoDeprecationWarning(lambda: mod.NSAWTEventType)
self.assertNoDeprecationWarning(lambda: mod.NSAboveBottom)
self.assertNoDeprecationWarning(lambda: mod.min)
self.assertNoDeprecationWarning(lambda: mod.max)
with deprecation_warnings(1005):
mod = make_mod()
self.assertIsInstance(mod, objc.ObjCLazyModule)
self.assertDeprecationWarning(lambda: mod.NSWorkspaceLinkOperation)
self.assertNoDeprecationWarning(lambda: mod.NSWorkspaceMoveOperation)
self.assertDeprecationWarning(lambda: mod.NSAWTEventType)
self.assertNoDeprecationWarning(lambda: mod.NSAboveBottom)
self.assertDeprecationWarning(lambda: mod.min)
self.assertNoDeprecationWarning(lambda: mod.max)
with deprecation_warnings(1200):
mod = make_mod()
self.assertIsInstance(mod, objc.ObjCLazyModule)
self.assertDeprecationWarning(lambda: mod.NSWorkspaceLinkOperation)
self.assertDeprecationWarning(lambda: mod.NSWorkspaceMoveOperation)
self.assertDeprecationWarning(lambda: mod.NSAWTEventType)
self.assertDeprecationWarning(lambda: mod.NSAboveBottom)
self.assertDeprecationWarning(lambda: mod.min)
self.assertDeprecationWarning(lambda: mod.max)
def test_functions_all(self):
for override in (False, True):
metadict = {
"functions": {
"LSSharedFileListItemGetTypeID": (b"Q",),
},
}
initial_dict = {"__doc__": "AppKit test module"}
mod = objc.ObjCLazyModule(
"AppKit",
None,
"/System/Library/Frameworks/AppKit.framework",
copy.deepcopy(metadict),
None,
initial_dict,
(),
)
if override:
mod.LSSharedFileListItemGetTypeID = 42
mod.__all__
self.assertEqual(mod.LSSharedFileListItemGetTypeID, 42)
else:
self.assertIsInstance(mod.LSSharedFileListItemGetTypeID, objc.function)
def do_indirect_magic(self, fetchall):
metadict = {
"cftypes": [
(
"LSSharedFileListItemRef",
b"^{OpaqueLSSharedFileListItemRef=}",
"LSSharedFileListItemGetTypeID",
None,
),
],
"functions": {
"LSSharedFileListItemGetTypeID": (b"Q",),
},
"constants": "$kLSSharedFileListItemBeforeFirst@==^{OpaqueLSSharedFileListItemRef=}$",
}
initial_dict = {"__doc__": "AppKit test module"}
mod = objc.ObjCLazyModule(
"AppKit",
None,
"/System/Library/Frameworks/AppKit.framework",
copy.deepcopy(metadict),
None,
initial_dict,
(),
)
if fetchall:
mod.__all__
self.assertIsInstance(
mod.kLSSharedFileListItemBeforeFirst, mod.LSSharedFileListItemRef
)
def test_indirect_magic(self):
self.do_indirect_magic(False)
self.do_indirect_magic(True)
def do_indirect_magic_dict(self, fetchall):
metadict = {
"cftypes": [
(
"LSSharedFileListItemRef",
b"^{OpaqueLSSharedFileListItemRef=}",
"LSSharedFileListItemGetTypeID",
None,
),
],
"functions": {
"LSSharedFileListItemGetTypeID": (b"Q",),
},
"constants_dict": {
"kLSSharedFileListItemBeforeFirst": "==^{OpaqueLSSharedFileListItemRef=}",
"kLSSharedFileListItemLaatste": "==^{OpaqueLSSharedFileListItemRef=}",
},
}
initial_dict = {"__doc__": "AppKit test module"}
mod = objc.ObjCLazyModule(
"AppKit",
None,
"/System/Library/Frameworks/AppKit.framework",
copy.deepcopy(metadict),
None,
initial_dict,
(),
)
if fetchall:
mod.__all__
self.assertIsInstance(
mod.kLSSharedFileListItemBeforeFirst, mod.LSSharedFileListItemRef
)
self.assertNotHasAttr(mod, "kLSSharedFileListItemLaatste")
def test_indirect_magic_dict(self):
self.do_indirect_magic_dict(False)
self.do_indirect_magic_dict(True)
def test_default_cftype(self):
metadict = {
"cftypes": [
(
"LSSharedFileListItemRef",
b"^{OpaqueLSSharedFileListItemRef=}",
None,
None,
),
],
}
initial_dict = {"__doc__": "AppKit test module"}
mod = objc.ObjCLazyModule(
"AppKit",
None,
"/System/Library/Frameworks/AppKit.framework",
copy.deepcopy(metadict),
None,
initial_dict,
(),
)
self.assertIn("CFType", mod.LSSharedFileListItemRef.__name__)
|
78ced80745dcce07fb3ff7335088c635c2f9e00c
|
1d989f79f0fbaf9682b8d66d1e65d1a4192f2ba9
|
/mkdocs/tests/utils/templates_tests.py
|
c6a8daaab6ecebd93553b34c6f92afc030bed828
|
[
"BSD-2-Clause"
] |
permissive
|
mkdocs/mkdocs
|
cf30863d3a684406ea19a4eda8557babf65a0f09
|
94e9f17cd7a69e70c18ae282d55ba2f34f93c542
|
refs/heads/master
| 2023-08-31T23:58:30.807317
| 2023-08-26T16:33:50
| 2023-08-26T16:33:50
| 15,830,664
| 17,026
| 3,070
|
BSD-2-Clause
| 2023-09-14T21:24:19
| 2014-01-11T21:05:21
|
Python
|
UTF-8
|
Python
| false
| false
| 1,801
|
py
|
templates_tests.py
|
import unittest
from textwrap import dedent
import yaml
from mkdocs.tests.base import load_config
from mkdocs.utils import templates
class UtilsTemplatesTests(unittest.TestCase):
def test_script_tag(self):
cfg_yaml = dedent(
'''
extra_javascript:
- some_plain_javascript.js
- implicitly_as_module.mjs
- path: explicitly_as_module.mjs
type: module
- path: deferred_plain.js
defer: true
- path: scripts/async_module.mjs
type: module
async: true
- path: 'aaaaaa/"my script".mjs'
type: module
async: true
defer: true
- path: plain.mjs
'''
)
config = load_config(**yaml.safe_load(cfg_yaml))
config.extra_javascript.append('plain_string.mjs')
self.assertEqual(
[
str(templates.script_tag_filter({'page': None, 'base_url': 'here'}, item))
for item in config.extra_javascript
],
[
'<script src="here/some_plain_javascript.js"></script>',
'<script src="here/implicitly_as_module.mjs" type="module"></script>',
'<script src="here/explicitly_as_module.mjs" type="module"></script>',
'<script src="here/deferred_plain.js" defer></script>',
'<script src="here/scripts/async_module.mjs" type="module" async></script>',
'<script src="here/aaaaaa/"my script".mjs" type="module" defer async></script>',
'<script src="here/plain.mjs"></script>',
'<script src="here/plain_string.mjs"></script>',
],
)
|
ce5c2c1fcc420d3320f5d7582c840e88e9a45bfe
|
857938ac2024b1e37f32a6631e85e179ae04b601
|
/src/python/gudhi/sklearn/cubical_persistence.py
|
7f86a95f9cea4e3fb07c9ce60dc19001ffb83348
|
[
"MIT"
] |
permissive
|
GUDHI/gudhi-devel
|
a2b08232a2ea66047b7a626d85dff0d50decc71c
|
2f76d9416e145282adcd8264438480008bd59f77
|
refs/heads/master
| 2023-08-31T13:44:17.776336
| 2023-08-29T20:20:16
| 2023-08-29T20:20:16
| 174,304,137
| 212
| 69
|
MIT
| 2023-09-14T15:34:48
| 2019-03-07T08:34:04
|
C++
|
UTF-8
|
Python
| false
| false
| 6,230
|
py
|
cubical_persistence.py
|
# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
# Author(s): Vincent Rouvreau
#
# Copyright (C) 2021 Inria
#
# Modification(s):
# - YYYY/MM Author: Description of the modification
from .. import CubicalComplex
from .._pers_cub_low_dim import _persistence_on_a_line, _persistence_on_rectangle_from_top_cells
from sklearn.base import BaseEstimator, TransformerMixin
import numpy as np
# joblib is required by scikit-learn
from joblib import Parallel, delayed
# Mermaid sequence diagram - https://mermaid-js.github.io/mermaid-live-editor/
# sequenceDiagram
# USER->>CubicalPersistence: fit_transform(X)
# CubicalPersistence->>thread1: _tranform(X[0])
# CubicalPersistence->>thread2: _tranform(X[1])
# Note right of CubicalPersistence: ...
# thread1->>CubicalPersistence: [array( H0(X[0]) ), array( H1(X[0]) )]
# thread2->>CubicalPersistence: [array( H0(X[1]) ), array( H1(X[1]) )]
# Note right of CubicalPersistence: ...
# CubicalPersistence->>USER: [[array( H0(X[0]) ), array( H1(X[0]) )],<br/> [array( H0(X[1]) ), array( H1(X[1]) )],<br/> ...]
class CubicalPersistence(BaseEstimator, TransformerMixin):
"""
This is a class for computing the persistence diagrams from a cubical complex.
"""
def __init__(
self,
homology_dimensions,
input_type='top_dimensional_cells',
homology_coeff_field=11,
min_persistence=0.0,
n_jobs=None,
):
"""
Constructor for the CubicalPersistence class.
Parameters:
homology_dimensions (int or list of int): The returned persistence diagrams dimension(s).
Short circuit the use of :class:`~gudhi.representations.preprocessing.DimensionSelector` when only one
dimension matters (in other words, when `homology_dimensions` is an int).
input_type (str): 'top_dimensional_cells' if the filtration values passed to `transform()` are those of the
top-dimensional cells, 'vertices' if they correspond to the vertices.
homology_coeff_field (int): The homology coefficient field. Must be a prime number. Default value is 11.
min_persistence (float): The minimum persistence value to take into account (strictly greater than
`min_persistence`). Default value is `0.0`. Set `min_persistence` to `-1.0` to see all values.
n_jobs (int): cf. https://joblib.readthedocs.io/en/latest/generated/joblib.Parallel.html
"""
self.homology_dimensions = homology_dimensions
self.input_type = input_type
self.homology_coeff_field = homology_coeff_field
self.min_persistence = min_persistence
self.n_jobs = n_jobs
def fit(self, X, Y=None):
"""
Nothing to be done, but useful when included in a scikit-learn Pipeline.
"""
return self
def __transform(self, cells):
cells = np.asarray(cells)
if len(cells.shape) == 1 and self.min_persistence >= 0:
res = _persistence_on_a_line(cells)
if self.min_persistence > 0:
# It would be more efficient inside _persistence_on_a_line, but not worth it?
res = res[res[:, 1] - res[:, 0] > self.min_persistence]
# Wasteful if dim_list_ does not contain 0, but that seems unlikely.
return [res if i == 0 else np.empty((0,2)) for i in self.dim_list_]
if len(cells.shape) == 2 and self.input_type == 'top_dimensional_cells' and self.min_persistence >= 0:
if cells.size == 0:
diags = [np.empty((0,2)), np.empty((0,2))]
elif cells.shape[0] == 1 or cells.shape[1] == 1:
diags = [_persistence_on_a_line(cells.reshape(-1)), np.empty((0,2))]
elif cells.shape[0] == 2:
diags = [_persistence_on_a_line(cells.min(0)), np.empty((0,2))]
elif cells.shape[1] == 2:
diags = [_persistence_on_a_line(cells.min(1)), np.empty((0,2))]
else:
diags = _persistence_on_rectangle_from_top_cells(cells, self.min_persistence)
return [diags[i] if i in (0, 1) else np.empty((0,2)) for i in self.dim_list_]
if self.input_type == 'top_dimensional_cells':
cubical_complex = CubicalComplex(top_dimensional_cells=cells)
elif self.input_type == 'vertices':
cubical_complex = CubicalComplex(vertices=cells)
else:
raise ValueError("input_type can only be 'top_dimensional_cells' or 'vertices'")
cubical_complex.compute_persistence(
homology_coeff_field=self.homology_coeff_field, min_persistence=self.min_persistence
)
return [
cubical_complex.persistence_intervals_in_dimension(dim) for dim in self.dim_list_
]
def transform(self, X, Y=None):
"""Compute all the cubical complexes and their associated persistence diagrams.
:param X: Filtration values of the top-dimensional cells or vertices for each complex.
:type X: list of array-like
:return: Persistence diagrams in the format:
- If `homology_dimensions` was set to `n`: `[array( Hn(X[0]) ), array( Hn(X[1]) ), ...]`
- If `homology_dimensions` was set to `[i, j]`: `[[array( Hi(X[0]) ), array( Hj(X[0]) )], [array( Hi(X[1]) ), array( Hj(X[1]) )], ...]`
:rtype: list of (,2) array_like or list of list of (,2) array_like
"""
# Depends on homology_dimensions is an integer or a list of integer (else case)
if isinstance(self.homology_dimensions, int):
unwrap = True
self.dim_list_ = [ self.homology_dimensions ]
else:
unwrap = False
self.dim_list_ = self.homology_dimensions
# threads is preferred as cubical construction and persistence computation releases the GIL
res = Parallel(n_jobs=self.n_jobs, prefer="threads")(delayed(self.__transform)(cells) for cells in X)
if unwrap:
res = [d[0] for d in res]
return res
|
579229bb581121ec713264b00ae34250845575d7
|
ad61cc119a42abfd3d64224a753817ae0f9ba058
|
/tests/functional/ecs/test_execute_command.py
|
b0ca876edef77bb627d9429118f77199fd82c74f
|
[
"Apache-2.0"
] |
permissive
|
aws/aws-cli
|
30b0e5b0fb6d736f1540990955f0a7351ee7a908
|
147d16dfdb72dc9cf362b676a57e46a49375afbd
|
refs/heads/develop
| 2023-09-03T19:52:07.955543
| 2023-09-01T20:37:50
| 2023-09-01T20:37:50
| 6,780,767
| 13,038
| 4,107
|
NOASSERTION
| 2023-09-13T19:48:11
| 2012-11-20T16:07:36
|
Python
|
UTF-8
|
Python
| false
| false
| 4,549
|
py
|
test_execute_command.py
|
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import errno
import json
from awscli.testutils import BaseAWSCommandParamsTest
from awscli.testutils import BaseAWSHelpOutputTest
from awscli.testutils import mock
class TestExecuteCommand(BaseAWSCommandParamsTest):
@mock.patch('awscli.customizations.ecs.executecommand.check_call')
def test_execute_command_success(self, mock_check_call):
cmdline = 'ecs execute-command --cluster someCluster ' \
'--task someTaskId ' \
'--interactive --command ls ' \
'--region us-west-2'
mock_check_call.return_value = 0
self.parsed_responses = [{
"containerName": "someContainerName",
"containerArn": "someContainerArn",
"taskArn": "someTaskArn",
"session": {"sessionId": "session-id",
"tokenValue": "token-value",
"streamUrl": "stream-url"},
"clusterArn": "someCluster",
"interactive": "true"
}, {
"failures": [],
"tasks": [
{
"clusterArn": "ecs/someCLuster",
"desiredStatus": "RUNNING",
"createdAt": "1611619514.46",
"taskArn": "someTaskArn",
"containers": [
{
"containerArn": "ecs/someContainerArn",
"taskArn": "ecs/someTaskArn",
"name": "someContainerName",
"managedAgents": [
{
"reason": "Execute Command Agent started",
"lastStatus": "RUNNING",
"lastStartedAt": "1611619528.272",
"name": "ExecuteCommandAgent"
}
],
"runtimeId": "someRuntimeId"
},
{
"containerArn": "ecs/dummyContainerArn",
"taskArn": "ecs/someTaskArn",
"name": "dummyContainerName",
"managedAgents": [
{
"reason": "Execute Command Agent started",
"lastStatus": "RUNNING",
"lastStartedAt": "1611619528.272",
"name": "ExecuteCommandAgent"
}
],
"runtimeId": "dummyRuntimeId"
}
],
"lastStatus": "RUNNING",
"enableExecuteCommand": "true"
}
]
}]
self.run_cmd(cmdline, expected_rc=0)
self.assertEqual(self.operations_called[0][0].name,
'ExecuteCommand'
)
actual_response = json.loads(mock_check_call.call_args[0][0][1])
self.assertEqual(
{"sessionId": "session-id",
"tokenValue": "token-value",
"streamUrl": "stream-url"},
actual_response
)
@mock.patch('awscli.customizations.ecs.executecommand.check_call')
def test_execute_command_fails(self, mock_check_call):
cmdline = 'ecs execute-command --cluster someCluster ' \
'--task someTaskId ' \
'--interactive --command ls ' \
'--region us-west-2'
mock_check_call.side_effect = OSError(errno.ENOENT, 'some error')
self.run_cmd(cmdline, expected_rc=255)
class TestHelpOutput(BaseAWSHelpOutputTest):
def test_execute_command_output(self):
self.driver.main(['ecs', 'execute-command', 'help'])
self.assert_contains('Output\n======\n\nNone')
|
c9391a689ea01735d127fde1df6d73778bfc7efb
|
444a9480bce2035565332d4d4654244c0b5cd47b
|
/research/cv/StarGAN/preprocess.py
|
3d8f79f8f9a99e0a6e755030b52938f4e8cb5af9
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
mindspore-ai/models
|
7ede9c6454e77e995e674628204e1c6e76bd7b27
|
eab643f51336dbf7d711f02d27e6516e5affee59
|
refs/heads/master
| 2023-07-20T01:49:34.614616
| 2023-07-17T11:43:18
| 2023-07-17T11:43:18
| 417,393,380
| 301
| 92
|
Apache-2.0
| 2023-05-17T11:22:28
| 2021-10-15T06:38:37
|
Python
|
UTF-8
|
Python
| false
| false
| 2,271
|
py
|
preprocess.py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""pre process for 310 inference"""
import os
from src.utils import create_labels
from src.config import get_config
from src.dataset import dataloader
if __name__ == "__main__":
config = get_config()
# Define Dataset
data_path = config.celeba_image_dir
attr_path = config.attr_path
dataset, length = dataloader(img_path=data_path,
attr_path=attr_path,
batch_size=1,
selected_attr=config.selected_attrs,
dataset=config.dataset,
mode='test',
shuffle=False)
img_path = os.path.join('../bin_data', "img_data")
label_path = os.path.join('../bin_data', "label")
if not os.path.exists(img_path):
os.makedirs(img_path)
os.makedirs(label_path)
ds = dataset.create_dict_iterator(num_epochs=1)
print('Start preprocessing!')
for idx, data in enumerate(ds):
x_real = data['image']
c_trg_list = create_labels(data['attr'].asnumpy(), selected_attrs=config.selected_attrs)
for i in range(5):
file_name = "sop_" + str(idx) + "_" + str(i) + ".bin"
img_file_path = os.path.join(img_path, file_name)
x_real.asnumpy().tofile(img_file_path)
label_file_path = os.path.join(label_path, file_name)
c_trg_list.asnumpy()[i].tofile(label_file_path)
print('Finish processing img', idx, "saving as", file_name)
print("=" * 20, "export bin files finished", "=" * 20)
|
c50bb4804f91c3143663a728243863f0c14584a3
|
eb7814e2f053e4ff2c24b3457b8633ac61a2ac99
|
/binding/python/setup.py
|
0ddd6e7a2021f79ca510c988413f1a2150dfdcfa
|
[
"Apache-2.0"
] |
permissive
|
Picovoice/rhino
|
28cdd1c0ca0818133ce636fec84d03d63f756d89
|
11eb8eb2c665de2d5212404da2257070b453fe3d
|
refs/heads/master
| 2023-09-03T22:38:37.985833
| 2023-08-30T22:33:13
| 2023-08-30T22:33:13
| 155,038,855
| 557
| 83
|
Apache-2.0
| 2023-08-30T22:33:14
| 2018-10-28T05:34:00
|
Python
|
UTF-8
|
Python
| false
| false
| 2,604
|
py
|
setup.py
|
import os
import shutil
import setuptools
os.system('git clean -dfx')
package_folder = os.path.join(os.path.dirname(__file__), 'pvrhino')
os.mkdir(package_folder)
shutil.copy(os.path.join(os.path.dirname(__file__), '../../LICENSE'), package_folder)
shutil.copy(os.path.join(os.path.dirname(__file__), '__init__.py'), os.path.join(package_folder, '__init__.py'))
shutil.copy(os.path.join(os.path.dirname(__file__), '_rhino.py'), os.path.join(package_folder, '_rhino.py'))
shutil.copy(os.path.join(os.path.dirname(__file__), '_factory.py'), os.path.join(package_folder, '_factory.py'))
shutil.copy(os.path.join(os.path.dirname(__file__), '_util.py'), os.path.join(package_folder, '_util.py'))
platforms = ('beaglebone', 'jetson', 'linux', 'mac', 'raspberry-pi', 'windows')
os.mkdir(os.path.join(package_folder, 'lib'))
for platform in ('common',) + platforms:
shutil.copytree(
os.path.join(os.path.dirname(__file__), '../../lib', platform),
os.path.join(package_folder, 'lib', platform))
MANIFEST_IN = """
include pvrhino/LICENSE
include pvrhino/__init__.py
include pvrhino/_rhino.py
include pvrhino/_factory.py
include pvrhino/_util.py
include pvrhino/lib/common/rhino_params.pv
include pvrhino/lib/beaglebone/libpv_rhino.so
recursive-include pvrhino/lib/jetson *
include pvrhino/lib/linux/x86_64/libpv_rhino.so
include pvrhino/lib/mac/x86_64/libpv_rhino.dylib
include pvrhino/lib/mac/arm64/libpv_rhino.dylib
recursive-include pvrhino/lib/raspberry-pi *
include pvrhino/lib/windows/amd64/libpv_rhino.dll
"""
with open(os.path.join(os.path.dirname(__file__), 'MANIFEST.in'), 'w') as f:
f.write(MANIFEST_IN.strip('\n '))
with open(os.path.join(os.path.dirname(__file__), 'README.md'), 'r') as f:
long_description = f.read()
setuptools.setup(
name="pvrhino",
version="2.2.1",
author="Picovoice",
author_email="hello@picovoice.ai",
description="Rhino Speech-to-Intent engine.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Picovoice/rhino",
packages=["pvrhino"],
include_package_data=True,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Topic :: Multimedia :: Sound/Audio :: Speech"
],
python_requires='>=3.5',
keywords="Speech-to-Intent, voice commands, voice control, speech recognition, natural language understanding"
)
|
61f8213218e1de555b6805e24de10885fc3d7018
|
dcd772f567ef8a8a1173a9f437cd68f211fb9362
|
/tests/framework/unit_tests/CustomDrivers/DemoPythonRavenRunningRaven.py
|
ca5804bafabd6db303eb40c892dc7aeb67734cac
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
idaholab/raven
|
39cdce98ad916c638399232cdc01a9be00e200a2
|
2b16e7aa3325fe84cab2477947a951414c635381
|
refs/heads/devel
| 2023-08-31T08:40:16.653099
| 2023-08-29T16:21:51
| 2023-08-29T16:21:51
| 85,989,537
| 201
| 126
|
Apache-2.0
| 2023-09-13T21:55:43
| 2017-03-23T19:29:27
|
C++
|
UTF-8
|
Python
| false
| false
| 1,905
|
py
|
DemoPythonRavenRunningRaven.py
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Demonstrate RAVEN running RAVEN in Python workflows.
"""
import os, sys
import matplotlib.pyplot as plt
# note: we use this complicated way to find RAVEN because we don't know how RAVEN
# is installed on specific machines; it can be simplified greatly for specific applications
frameworkDir = os.path.abspath(os.path.join(*([os.path.dirname(__file__)]+[os.pardir]*4)))
sys.path.append(frameworkDir)
# instantiate a RAVEN instance
from ravenframework import Raven
raven = Raven()
# load workflow XML file
raven.loadWorkflowFromFile('basic.xml')
# run the workflow
returnCode = raven.runWorkflow()
# check for successful run
if returnCode != 0:
raise RuntimeError('RAVEN did not run successfully!')
# create a simple plot
results = raven.getEntity('DataObjects', 'outer_samples')
data = results.asDataset()
data.plot.scatter(x='mean_y1', y='mean_y2', hue='mean_ans')
# uncomment to see the plot
# plt.show()
"""
<TestInfo>
<name>framework.demo_python_raven_running_raven</name>
<author>dgarrett622</author>
<created>2022-04-20</created>
<classesTested>PythonRaven</classesTested>
<description>
Demo of using PythonRaven to run RAVEN running RAVEN workflows.
Different from unit tests in that this is easier to read and unerstand
</description>
</TestInfo>
"""
|
d235f93eba38c8cb5f621a45476e43e8efa7ae76
|
e58aaa29a356d19f3b43b614db08e47f387dd0af
|
/sol7.py
|
9dfe18e747677e4a5a4a068e2783af7f3c6e2f27
|
[] |
no_license
|
posquit0/PythonChallenge
|
100ad89779de24cf3039c95bf63d4c00012c2025
|
fa709fc8170d02a6511d5f07f5a7d314b180ff82
|
refs/heads/master
| 2020-05-19T21:30:40.198767
| 2015-01-27T08:12:45
| 2015-01-27T08:12:45
| 27,856,891
| 742
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 661
|
py
|
sol7.py
|
#!/usr/bin/env python
# encoding: utf-8
from PIL import Image
import StringIO
import requests
import re
IMG_URL = "http://www.pythonchallenge.com/pc/def/oxygen.png"
MSG_LINE = 50
MESSAGE_REGEX = r"(\d+)"
def main():
data = requests.get(IMG_URL).content
data = StringIO.StringIO(data)
img = Image.open(data)
w, h = img.size
filtered = [img.getpixel((x, MSG_LINE)) for x in range(0, w, 7)]
message = "".join(chr(r) for (r, g, b, _) in filtered if r == g == b)
print message
message = "".join(
chr(ch) for ch in map(int, re.findall(MESSAGE_REGEX, message))
)
print message
if __name__ == '__main__':
main()
|
09f8941234a9024b3a4919941fd534bae0e740a7
|
ea82312a43465be66835918f40c73a5d3be4b6eb
|
/src/Puzzle/Edge.py
|
52bc40aa5f180241e34b8752eef43506d4112adf
|
[
"MIT"
] |
permissive
|
Kawaboongawa/Zolver
|
297364c4a9bf175115959ff398c219f96f5a4298
|
c4ea07041e7fa617ebfe2c1cbf117eb345c7ce1e
|
refs/heads/master
| 2022-07-12T10:31:40.769807
| 2020-05-01T18:55:54
| 2020-05-01T18:55:54
| 152,142,675
| 110
| 31
|
MIT
| 2022-06-21T21:39:30
| 2018-10-08T20:30:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,556
|
py
|
Edge.py
|
import numpy as np
from Puzzle.Enums import TypeEdge, Directions
class Edge:
"""
Wrapper for edges.
Contains shape, colors, type and positions informations in the puzzle of an edge.
"""
def __init__(self, shape, color, type=TypeEdge.HOLE, connected=False, direction=Directions.N):
self.shape = shape
self.shape_backup = shape
self.color = color
self.type = type
self.connected = connected
self.direction = direction
def is_border(self, threshold):
"""
Fast check to determine of the edge is a border.
:param threshold: distance threshold
:return: Boolean
"""
def dist_to_line(p1, p2, p3):
return np.linalg.norm(np.cross(p2 - p1, p1 - p3)) / np.linalg.norm(p2 - p1)
total_dist = 0
for p in self.shape:
total_dist += dist_to_line(self.shape[0], self.shape[-1], p)
return total_dist < threshold
def backup_shape(self):
""" Copy the shape for backup """
self.shape_backup = np.copy(self.shape)
def restore_backup_shape(self):
""" Restore the shape previously backedup """
self.shape = self.shape_backup
def is_compatible(self, e2):
""" Helper to determine if two edges are compatible """
return (self.type == TypeEdge.HOLE and e2.type == TypeEdge.HEAD) or (self.type == TypeEdge.HEAD and e2.type == TypeEdge.HOLE) \
or self.type == TypeEdge.UNDEFINED or e2.type == TypeEdge.UNDEFINED
|
fd10a9c6928c05ecff74b8ee52249e23586c797c
|
28bd76bd768656eee11c5e79e284b75e4af10559
|
/features/environment.py
|
0057a854b922b0044747744070c98956a051205b
|
[
"MIT"
] |
permissive
|
MicroPyramid/opensource-job-portal
|
debfa0b15f8cd0ab82153a143841c3af0801f46a
|
e21aa8fa62df96f41ddbea913f386ee7c6780ed0
|
refs/heads/master
| 2023-07-29T09:38:00.241309
| 2022-11-29T02:17:04
| 2022-11-29T02:17:04
| 227,341,330
| 360
| 208
|
MIT
| 2023-07-25T15:19:17
| 2019-12-11T10:42:38
|
HTML
|
UTF-8
|
Python
| false
| false
| 537
|
py
|
environment.py
|
import django
import os
from django.core.management import call_command
from splinter.browser import Browser
from features.helpers import initiate_test_data
from peeldb.models import User
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "jobsp.settings_server")
django.setup()
def before_all(context):
User.objects.filter(email="test@mp.com").delete()
context.browser = Browser("firefox")
context.server_url = "http://test.peeljobs.com:8000"
def after_all(context):
context.browser.quit()
context.browser = None
|
a960f5b227bd531f13023a2141543606a0b7bae5
|
cf28b08c7c47fe3fa8890fcd2a0429d83d9dbb3e
|
/cx_Freeze/importshed/PyQt5/QtWidgets.pyi
|
56a5b828696edc72c6a71f7cd81af0edc51a2ebc
|
[
"Python-2.0"
] |
permissive
|
marcelotduarte/cx_Freeze
|
bc948693f5b650bf2459c0093c3a98ca419a1df9
|
38438b2418af9cda5f982c74ca55dc235d08aaa1
|
refs/heads/main
| 2023-08-18T11:37:29.282372
| 2023-08-16T01:32:06
| 2023-08-16T01:32:06
| 79,693,503
| 628
| 121
| null | 2023-09-14T19:15:58
| 2017-01-22T04:25:52
|
Python
|
UTF-8
|
Python
| false
| false
| 108
|
pyi
|
QtWidgets.pyi
|
# Generated by cx_Freeze
import PyQt5.sip
from PyQt5 import QtCore
from PyQt5 import QtGui
import datetime
|
1c72e88ca6b091ee0d1f6ad204e74e5011922db2
|
3d74f759ee48d383aa82eeff0a55864a93a001ba
|
/DEPS
|
7f7a074a5d698bbc53eb6bba7456a7544e8ab889
|
[
"BSD-3-Clause"
] |
permissive
|
flutter/engine
|
78be5418a9b2f7730dda9ca9fcb25b7055f3da85
|
902ece7f89d7730cc69f35e098b223cbbf4e25f1
|
refs/heads/main
| 2023-09-04T06:12:34.462953
| 2023-09-04T05:33:32
| 2023-09-04T05:33:32
| 39,211,337
| 7,090
| 6,862
|
BSD-3-Clause
| 2023-09-14T21:58:17
| 2015-07-16T17:39:56
|
C++
|
UTF-8
|
Python
| false
| false
| 43,719
|
DEPS
|
# The dependencies referenced by the Flutter Engine.
#
# This file is referenced by the .gclient file at the root of the checkout.
# To preview changes to the dependencies, update this file and run
# `gclient sync`.
#
# When adding a new dependency, please update the top-level .gitignore file
# to list the dependency's destination directory.
vars = {
'chromium_git': 'https://chromium.googlesource.com',
'swiftshader_git': 'https://swiftshader.googlesource.com',
'dart_git': 'https://dart.googlesource.com',
'flutter_git': 'https://flutter.googlesource.com',
'fuchsia_git': 'https://fuchsia.googlesource.com',
'github_git': 'https://github.com',
'skia_git': 'https://skia.googlesource.com',
'llvm_git': 'https://llvm.googlesource.com',
# OCMock is for testing only so there is no google clone
'ocmock_git': 'https://github.com/erikdoe/ocmock.git',
'skia_revision': '4d0501380011a8f7261a9ed3ba135f7056dceaa4',
# WARNING: DO NOT EDIT canvaskit_cipd_instance MANUALLY
# See `lib/web_ui/README.md` for how to roll CanvasKit to a new version.
'canvaskit_cipd_instance': '61aeJQ9laGfEFF_Vlc_u0MCkqB6xb2hAYHRBxKH-Uw4C',
# Do not download the Emscripten SDK by default.
# This prevents us from downloading the Emscripten toolchain for builds
# which do not build for the web. This toolchain is needed to build CanvasKit
# for the web engine.
'download_emsdk': False,
# For experimental features some dependencies may only be avaialable in the master/main
# channels. This variable is being set when CI is checking out the repository.
'release_candidate': False,
# As Dart does, we use Fuchsia's GN and Clang toolchain. These revision
# should be kept up to date with the revisions pulled by Dart.
# The list of revisions for these tools comes from Fuchsia, here:
# https://fuchsia.googlesource.com/integration/+/HEAD/toolchain
# If there are problems with the toolchain, contact fuchsia-toolchain@.
'clang_version': 'git_revision:020d2fb7711d70e296f19d83565f8d93d2cfda71',
# The goma version and the clang version can be tightly coupled. If goma
# stops working on a clang roll, this may need to be updated using the value
# from the 'integration' tag of
# https://chrome-infra-packages.appspot.com/p/fuchsia/third_party/goma/client
'goma_version': ' git_revision:41b3bcb64014144a844153fd5588c36411fffb56',
'reclient_version': 'git_revision:81e819b39d4743462857cc55430d898b9fcca1af',
'gcloud_version': 'version:2@444.0.0.chromium.3',
# When updating the Dart revision, ensure that all entries that are
# dependencies of Dart are also updated to match the entries in the
# Dart SDK's DEPS file for that revision of Dart. The DEPS file for
# Dart is: https://github.com/dart-lang/sdk/blob/main/DEPS
# You can use //tools/dart/create_updated_flutter_deps.py to produce
# updated revision list of existing dependencies.
'dart_revision': 'a5c7102af50981571d8ac94f0ec24f94727c2461',
# WARNING: DO NOT EDIT MANUALLY
# The lines between blank lines above and below are generated by a script. See create_updated_flutter_deps.py
'dart_binaryen_rev': 'cdb7aeab40b4c522de20b242019f7e88641445d5',
'dart_boringssl_gen_rev': 'a468ba9fec3f59edf46a7db98caaca893e1e4d96',
'dart_boringssl_rev': '74646566e93de7551bfdfc5f49de7462f13d1d05',
'dart_browser_launcher_rev': '27ec600af41b0d0ebe9a3db6ad36e9ed11976b84',
'dart_clock_rev': '263e508a36ed90e4d85b60dd70552d20e71a9ae9',
'dart_collection_rev': '1a9b7eb64be10a8ba4ced7eb36b4b265a49d5d41',
'dart_devtools_rev': 'acbc179425b4596b7c2ba7d9c4263077f2e18098',
'dart_libprotobuf_rev': '24487dd1045c7f3d64a21f38a3f0c06cc4cf2edb',
'dart_perfetto_rev': 'b8da07095979310818f0efde2ef3c69ea70d62c5',
'dart_protobuf_gn_rev': 'ca669f79945418f6229e4fef89b666b2a88cbb10',
'dart_protobuf_rev': '5e8f36b48f015532cd1165b47686b659fc8870da',
'dart_pub_rev': '42819a1e10f803eb7f6296692c5a976e1c647360',
'dart_root_certificates_rev': '692f6d6488af68e0121317a9c2c9eb393eb0ee50',
'dart_tools_rev': 'b72fae8673a5fa30b0eff4077005ac95f960dc9b',
'dart_watcher_rev': '7457413060ed7403b90b01533a61bd959932122e',
'dart_webdev_rev': 'fc876cb0de59526160ed17efaa920557a6e2ba32',
'dart_webkit_inspection_protocol_rev': '39a3c297ff573635e7936b015ce4f3466e4739d6',
'dart_yaml_edit_rev': '87dcf31fcaada207ae7c3527f9885982534badce',
'dart_zlib_rev': '14dd4c4455602c9b71a1a89b5cafd1f4030d2e3f',
'ocmock_rev': 'c4ec0e3a7a9f56cfdbd0aa01f4f97bb4b75c5ef8', # v3.7.1
# Download a prebuilt Dart SDK by default
'download_dart_sdk': True,
# Checkout Android dependencies only on platforms where we build for Android targets.
'download_android_deps': 'host_os == "mac" or (host_os == "linux" and host_cpu == "x64")',
# Checkout Windows dependencies only if we are building on Windows.
'download_windows_deps' : 'host_os == "win"',
# Checkout Linux dependencies only when building on Linux.
'download_linux_deps': 'host_os == "linux"',
# Downloads the fuchsia SDK as listed in fuchsia_sdk_path var. This variable
# is currently only used for the Fuchsia LSC process and is not intended for
# local development.
'download_fuchsia_sdk': False,
'fuchsia_sdk_path': '',
# An LLVM backend needs LLVM binaries and headers. To avoid build time
# increases we can use prebuilts. We don't want to download this on every
# CQ/CI bot nor do we want the average Dart developer to incur that cost.
# So by default we will not download prebuilts. This varible is needed in
# the flutter engine to ensure that Dart gn has access to it as well.
"checkout_llvm": False,
# Setup Git hooks by default.
'setup_githooks': True,
# When this is true, the goma client will be downloaded from cipd, and
# the engine build will prefer to use this client over a client that is
# specified by GOMA_DIR, or installed in the default goma install location.
'use_cipd_goma': False,
# This is not downloaded be default because it increases the
# `gclient sync` time by between 1 and 3 minutes. This option is enabled
# in flutter/ci/builders/mac_impeller_cmake_example.json, and is likely to
# only be useful locally when reproducing issues found by the bot.
'download_impeller_cmake_example': False,
# Upstream URLs for third party dependencies, used in
# determining common ancestor commit for vulnerability scanning
# prefixed with 'upstream_' in order to be identified by parsing tool.
# The vulnerabiity database being used in this scan can be browsed
# using this UI https://osv.dev/list
# If a new dependency needs to be added, the upstream (non-mirrored)
# git URL for that dependency should be added to this list
# with the key-value pair being:
# 'upstream_[dep name from last slash and before .git in URL]':'[git URL]'
# example:
"upstream_abseil-cpp": "https://github.com/abseil/abseil-cpp.git",
"upstream_angle": "https://github.com/google/angle.git",
"upstream_archive": "https://github.com/brendan-duncan/archive.git",
"upstream_args": "https://github.com/dart-lang/args.git",
"upstream_async": "https://github.com/dart-lang/async.git",
"upstream_bazel_worker": "https://github.com/dart-lang/bazel_worker.git",
"upstream_benchmark": "https://github.com/google/benchmark.git",
"upstream_boolean_selector": "https://github.com/dart-lang/boolean_selector.git",
"upstream_boringssl_gen": "https://github.com/dart-lang/boringssl_gen.git",
"upstream_boringssl": "https://github.com/openssl/openssl.git",
"upstream_browser_launcher": "https://github.com/dart-lang/browser_launcher.git",
"upstream_buildroot": "https://github.com/flutter/buildroot.git",
"upstream_cli_util": "https://github.com/dart-lang/cli_util.git",
"upstream_clock": "https://github.com/dart-lang/clock.git",
"upstream_collection": "https://github.com/dart-lang/collection.git",
"upstream_colorama": "https://github.com/tartley/colorama.git",
"upstream_convert": "https://github.com/dart-lang/convert.git",
"upstream_crypto": "https://github.com/dart-lang/crypto.git",
"upstream_csslib": "https://github.com/dart-lang/csslib.git",
"upstream_dart_style": "https://github.com/dart-lang/dart_style.git",
"upstream_dartdoc": "https://github.com/dart-lang/dartdoc.git",
"upstream_equatable": "https://github.com/felangel/equatable.git",
"upstream_ffi": "https://github.com/dart-lang/ffi.git",
"upstream_file": "https://github.com/google/file.dart.git",
"upstream_fixnum": "https://github.com/dart-lang/fixnum.git",
"upstream_flatbuffers": "https://github.com/google/flatbuffers.git",
"upstream_fontconfig": "https://gitlab.freedesktop.org/fontconfig/fontconfig.git",
"upstream_freetype2": "https://gitlab.freedesktop.org/freetype/freetype.git",
"upstream_gcloud": "https://github.com/dart-lang/gcloud.git",
"upstream_glfw": "https://github.com/glfw/glfw.git",
"upstream_glob": "https://github.com/dart-lang/glob.git",
"upstream_googleapis": "https://github.com/google/googleapis.dart.git",
"upstream_googletest": "https://github.com/google/googletest.git",
"upstream_gtest-parallel": "https://github.com/google/gtest-parallel.git",
"upstream_harfbuzz": "https://github.com/harfbuzz/harfbuzz.git",
"upstream_html": "https://github.com/dart-lang/html.git",
"upstream_http_multi_server": "https://github.com/dart-lang/http_multi_server.git",
"upstream_http_parser": "https://github.com/dart-lang/http_parser.git",
"upstream_http": "https://github.com/dart-lang/http.git",
"upstream_icu": "https://github.com/unicode-org/icu.git",
"upstream_intl": "https://github.com/dart-lang/intl.git",
"upstream_imgui": "https://github.com/ocornut/imgui.git",
"upstream_inja": "https://github.com/pantor/inja.git",
"upstream_json": "https://github.com/nlohmann/json.git",
"upstream_json_rpc_2": "https://github.com/dart-lang/json_rpc_2.git",
"upstream_libcxx": "https://github.com/llvm-mirror/libcxx.git",
"upstream_libcxxabi": "https://github.com/llvm-mirror/libcxxabi.git",
"upstream_libexpat": "https://github.com/libexpat/libexpat.git",
"upstream_libjpeg-turbo": "https://github.com/libjpeg-turbo/libjpeg-turbo.git",
"upstream_libpng": "https://github.com/glennrp/libpng.git",
"upstream_libtess2": "https://github.com/memononen/libtess2.git",
"upstream_libwebp": "https://chromium.googlesource.com/webm/libwebp.git",
"upstream_libxml": "https://gitlab.gnome.org/GNOME/libxml2.git",
"upstream_leak_tracker": "https://github.com/dart-lang/leak_tracker.git",
"upstream_logging": "https://github.com/dart-lang/logging.git",
"upstream_markdown": "https://github.com/dart-lang/markdown.git",
"upstream_matcher": "https://github.com/dart-lang/matcher.git",
"upstream_mime": "https://github.com/dart-lang/mime.git",
"upstream_mockito": "https://github.com/dart-lang/mockito.git",
"upstream_oauth2": "https://github.com/dart-lang/oauth2.git",
"upstream_ocmock": "https://github.com/erikdoe/ocmock.git",
"upstream_package_config": "https://github.com/dart-lang/package_config.git",
"upstream_packages": "https://github.com/flutter/packages.git",
"upstream_path": "https://github.com/dart-lang/path.git",
"upstream_platform": "https://github.com/google/platform.dart.git",
"upstream_pool": "https://github.com/dart-lang/pool.git",
"upstream_process_runner": "https://github.com/google/process_runner.git",
"upstream_process": "https://github.com/google/process.dart.git",
"upstream_protobuf": "https://github.com/google/protobuf.dart.git",
"upstream_pub_semver": "https://github.com/dart-lang/pub_semver.git",
"upstream_pub": "https://github.com/dart-lang/pub.git",
"upstream_pyyaml": "https://github.com/yaml/pyyaml.git",
"upstream_quiver-dart": "https://github.com/google/quiver-dart.git",
"upstream_rapidjson": "https://github.com/Tencent/rapidjson.git",
"upstream_root_certificates": "https://github.com/dart-lang/root_certificates.git",
"upstream_sdk": "https://github.com/dart-lang/sdk.git",
"upstream_shaderc": "https://github.com/google/shaderc.git",
"upstream_shelf": "https://github.com/dart-lang/shelf.git",
"upstream_skia": "https://skia.googlesource.com/skia.git",
"upstream_source_map_stack_trace": "https://github.com/dart-lang/source_map_stack_trace.git",
"upstream_source_maps": "https://github.com/dart-lang/source_maps.git",
"upstream_source_span": "https://github.com/dart-lang/source_span.git",
"upstream_sqlite": "https://github.com/sqlite/sqlite.git",
"upstream_sse": "https://github.com/dart-lang/sse.git",
"upstream_stack_trace": "https://github.com/dart-lang/stack_trace.git",
"upstream_stb": "https://github.com/nothings/stb.git",
"upstream_stream_channel": "https://github.com/dart-lang/stream_channel.git",
"upstream_string_scanner": "https://github.com/dart-lang/string_scanner.git",
"upstream_SwiftShader": "https://swiftshader.googlesource.com/SwiftShader.git",
"upstream_term_glyph": "https://github.com/dart-lang/term_glyph.git",
"upstream_test_reflective_loader": "https://github.com/dart-lang/test_reflective_loader.git",
"upstream_test": "https://github.com/dart-lang/test.git",
"upstream_tinygltf": "https://github.com/syoyo/tinygltf.git",
"upstream_typed_data": "https://github.com/dart-lang/typed_data.git",
"upstream_usage": "https://github.com/dart-lang/usage.git",
"upstream_vector_math": "https://github.com/google/vector_math.dart.git",
"upstream_Vulkan-Headers": "https://github.com/KhronosGroup/Vulkan-Headers.git",
"upstream_VulkanMemoryAllocator": "https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator.git",
"upstream_watcher": "https://github.com/dart-lang/watcher.git",
"upstream_web_socket_channel": "https://github.com/dart-lang/web_socket_channel.git",
"upstream_webdev": "https://github.com/dart-lang/webdev.git",
"upstream_webkit_inspection_protocol": "https://github.com/google/webkit_inspection_protocol.dart.git",
"upstream_wuffs-mirror-release-c": "https://github.com/google/wuffs-mirror-release-c.git",
"upstream_yaml_edit": "https://github.com/dart-lang/yaml_edit.git",
"upstream_yaml": "https://github.com/dart-lang/yaml.git",
"upstream_yapf": "https://github.com/google/yapf.git",
"upstream_zlib": "https://github.com/madler/zlib.git",
}
gclient_gn_args_file = 'src/third_party/dart/build/config/gclient_args.gni'
gclient_gn_args = [
'checkout_llvm'
]
# Only these hosts are allowed for dependencies in this DEPS file.
# If you need to add a new host, contact chrome infrastructure team.
allowed_hosts = [
'boringssl.googlesource.com',
'chrome-infra-packages.appspot.com',
'chromium.googlesource.com',
'dart.googlesource.com',
'flutter.googlesource.com',
'fuchsia.googlesource.com',
'llvm.googlesource.com',
'skia.googlesource.com',
'swiftshader.googlesource.com',
]
deps = {
'src': 'https://github.com/flutter/buildroot.git' + '@' + 'b615dd6af4c2e95a388060151c7fd1e429e34d61',
# Fuchsia compatibility
#
# The dependencies in this section should match the layout in the Fuchsia gn
# build. Eventually, we'll manage these dependencies together with Fuchsia
# and not have to specific hashes.
'src/third_party/rapidjson':
Var('fuchsia_git') + '/third_party/rapidjson' + '@' + 'ef3564c5c8824989393b87df25355baf35ff544b',
'src/third_party/harfbuzz':
Var('flutter_git') + '/third_party/harfbuzz' + '@' + '5f0431572e143056410fbf6b8865be5a78befbdc',
'src/third_party/libcxx':
Var('llvm_git') + '/llvm-project/libcxx' + '@' + '44079a4cc04cdeffb9cfe8067bfb3c276fb2bab0',
'src/third_party/libcxxabi':
Var('llvm_git') + '/llvm-project/libcxxabi' + '@' + '2ce528fb5e0f92e57c97ec3ff53b75359d33af12',
'src/third_party/glfw':
Var('fuchsia_git') + '/third_party/glfw' + '@' + 'dd8a678a66f1967372e5a5e3deac41ebf65ee127',
'src/third_party/shaderc':
Var('github_git') + '/google/shaderc.git' + '@' + '7ea834ecc59258a5c13c3d3e6fa0582bdde7c543',
'src/third_party/vulkan-deps':
Var('chromium_git') + '/vulkan-deps' + '@' + '40b75117a60b11c42a1fb87bf14c0f49bcdb8b3d',
'src/third_party/flatbuffers':
Var('github_git') + '/google/flatbuffers.git' + '@' + '0a80646371179f8a7a5c1f42c31ee1d44dcf6709',
'src/third_party/icu':
Var('chromium_git') + '/chromium/deps/icu.git' + '@' + '985b9a6f70e13f3db741fed121e4dcc3046ad494',
'src/third_party/khronos':
Var('chromium_git') + '/chromium/src/third_party/khronos.git' + '@' + '676d544d2b8f48903b7da9fceffaa534a5613978',
'src/third_party/gtest-parallel':
Var('chromium_git') + '/external/github.com/google/gtest-parallel' + '@' + '38191e2733d7cbaeaef6a3f1a942ddeb38a2ad14',
'src/third_party/benchmark':
Var('github_git') + '/google/benchmark' + '@' + '431abd149fd76a072f821913c0340137cc755f36',
'src/third_party/googletest':
Var('github_git') + '/google/googletest' + '@' + '054a986a8513149e8374fc669a5fe40117ca6b41',
'src/third_party/boringssl':
Var('github_git') + '/dart-lang/boringssl_gen.git' + '@' + Var('dart_boringssl_gen_rev'),
'src/third_party/yapf':
Var('github_git') + '/google/yapf' + '@' + '212c5b5ad8e172d2d914ae454c121c89cccbcb35',
'src/third_party/boringssl/src':
'https://boringssl.googlesource.com/boringssl.git' + '@' + Var('dart_boringssl_rev'),
'src/third_party/perfetto':
Var('fuchsia_git') + "/third_party/android.googlesource.com/platform/external/perfetto"
+ '@' + Var('dart_perfetto_rev'),
'src/third_party/protobuf':
Var('fuchsia_git') + '/third_party/protobuf' + '@' + Var('dart_libprotobuf_rev'),
'src/build/secondary/third_party/protobuf':
Var('fuchsia_git') + '/protobuf-gn' + '@' + Var('dart_protobuf_gn_rev'),
'src/third_party/dart':
Var('dart_git') + '/sdk.git' + '@' + Var('dart_revision'),
# WARNING: Unused Dart dependencies in the list below till "WARNING:" marker are removed automatically - see create_updated_flutter_deps.py.
'src/third_party/dart/third_party/binaryen/src':
Var('chromium_git') + '/external/github.com/WebAssembly/binaryen.git@cdb7aeab40b4c522de20b242019f7e88641445d5',
'src/third_party/dart/third_party/devtools':
{'dep_type': 'cipd', 'packages': [{'package': 'dart/third_party/flutter/devtools', 'version': 'git_revision:acbc179425b4596b7c2ba7d9c4263077f2e18098'}]},
'src/third_party/dart/third_party/pkg/args':
Var('dart_git') + '/args.git@da56b18ebcb600e050bf57b9c1103b1d2a9fb2ff',
'src/third_party/dart/third_party/pkg/async':
Var('dart_git') + '/async.git@b65622afa33c5bfc574ae6b34d5a61f18a98f83c',
'src/third_party/dart/third_party/pkg/bazel_worker':
Var('dart_git') + '/bazel_worker.git@c29d1620b1a935dc88d13a4eec0d9950d3e9df27',
'src/third_party/dart/third_party/pkg/boolean_selector':
Var('dart_git') + '/boolean_selector.git@303635d0262e679fb6a81686724a5dc1dbc850a7',
'src/third_party/dart/third_party/pkg/browser_launcher':
Var('dart_git') + '/browser_launcher.git' + '@' + Var('dart_browser_launcher_rev'),
'src/third_party/dart/third_party/pkg/cli_util':
Var('dart_git') + '/cli_util.git@9b7ce784c2889d62be0d6f66022331cb1e53b5b6',
'src/third_party/dart/third_party/pkg/clock':
Var('dart_git') + '/clock.git' + '@' + Var('dart_clock_rev'),
'src/third_party/dart/third_party/pkg/collection':
Var('dart_git') + '/collection.git' + '@' + Var('dart_collection_rev'),
'src/third_party/dart/third_party/pkg/convert':
Var('dart_git') + '/convert.git@79ee174280149817f9925db0613983aadb46eeca',
'src/third_party/dart/third_party/pkg/crypto':
Var('dart_git') + '/crypto.git@8b704c601f4843050624cd334e3b74f6c17315a4',
'src/third_party/dart/third_party/pkg/csslib':
Var('dart_git') + '/csslib.git@7e91228c2c2428455e5bc63bbf89c7bf0f3401b0',
'src/third_party/dart/third_party/pkg/dart_style':
Var('dart_git') + '/dart_style.git@2956b1a705953f880a5dae9d3a0969df0fc45e99',
'src/third_party/dart/third_party/pkg/dartdoc':
Var('dart_git') + '/dartdoc.git@5fda5eb2e004b6cf7c73fbcffbc246a71119be98',
'src/third_party/dart/third_party/pkg/ffi':
Var('dart_git') + '/ffi.git@e2c01a960b84d1074b0a1849909ae2d269d004be',
'src/third_party/dart/third_party/pkg/file':
Var('dart_git') + '/external/github.com/google/file.dart@5d9a6027756b5846e8f5380f983390f61f564a75',
'src/third_party/dart/third_party/pkg/fixnum':
Var('dart_git') + '/fixnum.git@00fa1207768bd07d04c895cbe0f1fe99af14e727',
'src/third_party/dart/third_party/pkg/glob':
Var('dart_git') + '/glob.git@5b243935154daf53c54981b98f625bace90b2112',
'src/third_party/dart/third_party/pkg/html':
Var('dart_git') + '/html.git@4060496b0443451c38f8b789db2e44c0d7966171',
'src/third_party/dart/third_party/pkg/http':
Var('dart_git') + '/http.git@cad7d609b18512d74cc30ef8ad9faf02d2ea4451',
'src/third_party/dart/third_party/pkg/http_multi_server':
Var('dart_git') + '/http_multi_server.git@aa128cfaf6ef1c9c1ace962ca2dcf6e5dddad441',
'src/third_party/dart/third_party/pkg/http_parser':
Var('dart_git') + '/http_parser.git@c14fbf6aa7ada5e8912eab4581eb26ff4d101452',
'src/third_party/dart/third_party/pkg/intl':
Var('dart_git') + '/intl.git@5d65e3808ce40e6282e40881492607df4e35669f',
'src/third_party/dart/third_party/pkg/json_rpc_2':
Var('dart_git') + '/json_rpc_2.git@509f71eef90ec5afb5486b69dab7fed97b9f1eef',
'src/third_party/dart/third_party/pkg/leak_tracker':
Var('dart_git') + '/leak_tracker.git@098bafcf99a5220e3c352d895d991e163568ee03',
'src/third_party/dart/third_party/pkg/logging':
Var('dart_git') + '/logging.git@521498757ed3eeae151c2d4796404e8947baa04c',
'src/third_party/dart/third_party/pkg/markdown':
Var('dart_git') + '/markdown.git@56e75df897ac01a886358e79124844977aa8157c',
'src/third_party/dart/third_party/pkg/matcher':
Var('dart_git') + '/matcher.git@ce8f40934c90e12992071172795b3bca29fac295',
'src/third_party/dart/third_party/pkg/mime':
Var('dart_git') + '/mime.git@799b398140817fdb134f639d84e91c552e129136',
'src/third_party/dart/third_party/pkg/mockito':
Var('dart_git') + '/mockito.git@f5abf11f8e21e61eebc2081e322bdfcab057e988',
'src/third_party/dart/third_party/pkg/native':
Var('dart_git') + '/native.git@5a1361b6d98a84f8070c97872e3d3587fc0ba435',
'src/third_party/dart/third_party/pkg/package_config':
Var('dart_git') + '/package_config.git@981c49dfec1e3e3e90f336dcd7c225923d2fd321',
'src/third_party/dart/third_party/pkg/path':
Var('dart_git') + '/path.git@7c2324bdb4c75a17de8a3d1e6afe8cc0756ef5f9',
'src/third_party/dart/third_party/pkg/pool':
Var('dart_git') + '/pool.git@77001024a16126cc5718e654ea3e57bbf6e7fac3',
'src/third_party/dart/third_party/pkg/protobuf':
Var('dart_git') + '/protobuf.git' + '@' + Var('dart_protobuf_rev'),
'src/third_party/dart/third_party/pkg/pub':
Var('dart_git') + '/pub.git' + '@' + Var('dart_pub_rev'),
'src/third_party/dart/third_party/pkg/pub_semver':
Var('dart_git') + '/pub_semver.git@028b43506a3f7ec7f7b4673a78ba3da3d5fb138d',
'src/third_party/dart/third_party/pkg/shelf':
Var('dart_git') + '/shelf.git@73edd2b6e18ee50afac57e4e224b8c714b81e66d',
'src/third_party/dart/third_party/pkg/source_map_stack_trace':
Var('dart_git') + '/source_map_stack_trace.git@16e54fd9fc088961773340cb5c3688a089387135',
'src/third_party/dart/third_party/pkg/source_maps':
Var('dart_git') + '/source_maps.git@97c4833100b1bd8ea7e4a2fa1808383007e2d1e8',
'src/third_party/dart/third_party/pkg/source_span':
Var('dart_git') + '/source_span.git@37735aecc5d8c0fb75ed61691bae056510b357bb',
'src/third_party/dart/third_party/pkg/sse':
Var('dart_git') + '/sse.git@8cc5b11aa0c82cd0d89758d20782221cc6ac6dec',
'src/third_party/dart/third_party/pkg/stack_trace':
Var('dart_git') + '/stack_trace.git@4ddd86d5d22aad9a8e8e9a06fd0a6a6271736135',
'src/third_party/dart/third_party/pkg/stream_channel':
Var('dart_git') + '/stream_channel.git@e54234f94da929153b012de2bba75c5246a52538',
'src/third_party/dart/third_party/pkg/string_scanner':
Var('dart_git') + '/string_scanner.git@413b57a3b14fa273e8ed52578edfbe0446084795',
'src/third_party/dart/third_party/pkg/term_glyph':
Var('dart_git') + '/term_glyph.git@423700a3c019dc67f93d2bd6578016a1402506f7',
'src/third_party/dart/third_party/pkg/test':
Var('dart_git') + '/test.git@d0fc4bde2e05e62c75bc3ac7b3de3f510816ea44',
'src/third_party/dart/third_party/pkg/test_reflective_loader':
Var('dart_git') + '/test_reflective_loader.git@0bfaad91ed308ce9da11b48395c8210d7542c16b',
'src/third_party/dart/third_party/pkg/tools':
Var('dart_git') + '/tools.git' + '@' + Var('dart_tools_rev'),
'src/third_party/dart/third_party/pkg/typed_data':
Var('dart_git') + '/typed_data.git@a20be901e11eddcbd6e5735fb01b64d28c94c49d',
'src/third_party/dart/third_party/pkg/usage':
Var('dart_git') + '/usage.git@09bb8472fdafff2c48a19aabbcf57b3af0f43934',
'src/third_party/dart/third_party/pkg/watcher':
Var('dart_git') + '/watcher.git' + '@' + Var('dart_watcher_rev'),
'src/third_party/dart/third_party/pkg/web_socket_channel':
Var('dart_git') + '/web_socket_channel.git@4d1b5438d1bdfc6317bf99fd9d9c6e4edb7e9ec5',
'src/third_party/dart/third_party/pkg/webdev':
Var('dart_git') + '/webdev.git' + '@' + Var('dart_webdev_rev'),
'src/third_party/dart/third_party/pkg/webkit_inspection_protocol':
Var('dart_git') + '/external/github.com/google/webkit_inspection_protocol.dart.git' + '@' + Var('dart_webkit_inspection_protocol_rev'),
'src/third_party/dart/third_party/pkg/yaml':
Var('dart_git') + '/yaml.git@7930148a3d03d7985ce2b53bc5eb2be9c878dab8',
'src/third_party/dart/third_party/pkg/yaml_edit':
Var('dart_git') + '/yaml_edit.git' + '@' + Var('dart_yaml_edit_rev'),
'src/third_party/dart/tools/sdks/dart-sdk':
{'dep_type': 'cipd', 'packages': [{'package': 'dart/dart-sdk/${{platform}}', 'version': 'version:3.1.0-298.0.dev'}]},
# WARNING: end of dart dependencies list that is cleaned up automatically - see create_updated_flutter_deps.py.
# Prebuilt Dart SDK of the same revision as the Dart SDK source checkout
'src/flutter/prebuilts/linux-x64/dart-sdk': {
'packages': [
{
'package': 'flutter/dart-sdk/linux-amd64',
'version': 'git_revision:'+Var('dart_revision')
}
],
'dep_type': 'cipd',
'condition': 'host_os == "linux" and download_dart_sdk'
},
'src/flutter/prebuilts/linux-arm64/dart-sdk': {
'packages': [
{
'package': 'flutter/dart-sdk/linux-arm64',
'version': 'git_revision:'+Var('dart_revision')
}
],
'dep_type': 'cipd',
'condition': 'host_os == "linux" and download_dart_sdk'
},
'src/flutter/prebuilts/macos-x64/dart-sdk': {
'packages': [
{
'package': 'flutter/dart-sdk/mac-amd64',
'version': 'git_revision:'+Var('dart_revision')
}
],
'dep_type': 'cipd',
'condition': 'host_os == "mac" and download_dart_sdk'
},
'src/flutter/prebuilts/macos-arm64/dart-sdk': {
'packages': [
{
'package': 'flutter/dart-sdk/mac-arm64',
'version': 'git_revision:'+Var('dart_revision')
}
],
'dep_type': 'cipd',
'condition': 'host_os == "mac" and download_dart_sdk'
},
'src/flutter/prebuilts/windows-x64/dart-sdk': {
'packages': [
{
'package': 'flutter/dart-sdk/windows-amd64',
'version': 'git_revision:'+Var('dart_revision')
}
],
'dep_type': 'cipd',
'condition': 'host_os == "win" and download_dart_sdk'
},
'src/flutter/prebuilts/windows-arm64/dart-sdk': {
'packages': [
{
'package': 'flutter/dart-sdk/windows-arm64',
'version': 'git_revision:'+Var('dart_revision')
}
],
'dep_type': 'cipd',
'condition': 'host_os == "win" and download_dart_sdk and not release_candidate'
},
'src/third_party/colorama/src':
Var('chromium_git') + '/external/colorama.git' + '@' + '799604a1041e9b3bc5d2789ecbd7e8db2e18e6b8',
'src/third_party/expat':
Var('chromium_git') + '/external/github.com/libexpat/libexpat.git' + '@' + '654d2de0da85662fcc7644a7acd7c2dd2cfb21f0',
'src/third_party/freetype2':
Var('flutter_git') + '/third_party/freetype2' + '@' + '3bea2761290a1cbe7d8f75c1c5a7ad727f826a66',
'src/third_party/root_certificates':
Var('dart_git') + '/root_certificates.git' + '@' + Var('dart_root_certificates_rev'),
'src/third_party/skia':
Var('skia_git') + '/skia.git' + '@' + Var('skia_revision'),
'src/third_party/ocmock':
Var('ocmock_git') + '@' + Var('ocmock_rev'),
'src/third_party/libjpeg-turbo':
Var('fuchsia_git') + '/third_party/libjpeg-turbo' + '@' + '0fb821f3b2e570b2783a94ccd9a2fb1f4916ae9f',
'src/third_party/libpng':
Var('flutter_git') + '/third_party/libpng' + '@' + '9187b6e12756317f6d44fc669ac11dfc262bd192',
'src/third_party/libwebp':
Var('chromium_git') + '/webm/libwebp.git' + '@' + '7dfde712a477e420968732161539011e0fd446cf', # 1.2.0
'src/third_party/wuffs':
Var('skia_git') + '/external/github.com/google/wuffs-mirror-release-c.git' + '@' + '600cd96cf47788ee3a74b40a6028b035c9fd6a61',
'src/third_party/fontconfig/src':
Var('chromium_git') + '/external/fontconfig.git' + '@' + 'c336b8471877371f0190ba06f7547c54e2b890ba',
'src/third_party/fontconfig':
Var('flutter_git') + '/third_party/fontconfig' + '@' + '81c83d510ae3aa75589435ce32a5de05139aacb0',
'src/third_party/libxml':
Var('flutter_git') + '/third_party/libxml' + '@' + 'a143e452b5fc7d872813eeadc8db421694058098',
'src/third_party/zlib':
Var('chromium_git') + '/chromium/src/third_party/zlib.git' + '@' + Var('dart_zlib_rev'),
'src/third_party/inja':
Var('flutter_git') + '/third_party/inja' + '@' + '88bd6112575a80d004e551c98cf956f88ff4d445',
'src/third_party/libtess2':
Var('flutter_git') + '/third_party/libtess2' + '@' + '725e5e08ec8751477565f1d603fd7eb9058c277c',
'src/third_party/sqlite':
Var('flutter_git') + '/third_party/sqlite' + '@' + '0f61bd2023ba94423b4e4c8cfb1a23de1fe6a21c',
'src/third_party/pyyaml':
Var('fuchsia_git') + '/third_party/pyyaml.git' + '@' + '25e97546488eee166b1abb229a27856cecd8b7ac',
'src/third_party/swiftshader':
Var('swiftshader_git') + '/SwiftShader.git' + '@' + '5f9ed9b16931c7155171d31f75004f73f0a3abc8',
'src/third_party/angle':
Var('chromium_git') + '/angle/angle.git' + '@' + 'ebf1e7163216932b0eeb6653da5dac13c3b8ba6a',
'src/third_party/vulkan_memory_allocator':
Var('chromium_git') + '/external/github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator' + '@' + '7de5cc00de50e71a3aab22dea52fbb7ff4efceb6',
'src/third_party/abseil-cpp':
Var('flutter_git') + '/third_party/abseil-cpp.git' + '@' + '61833f2c057a2b1993d871e8c51156aed1dd4354',
# Dart packages
'src/third_party/pkg/archive':
Var('github_git') + '/brendan-duncan/archive.git' + '@' + '9de7a0544457c6aba755ccb65abb41b0dc1db70d', # 3.1.2
'src/third_party/pkg/equatable':
Var('github_git') + '/felangel/equatable.git' + '@' + '0ba67c72db8bed75877fc1caafa74112ee0bd921', # 2.0.2
'src/third_party/pkg/file':
Var('dart_git') + '/external/github.com/google/file.dart.git' + '@' + 'b2e31cb6ef40b223701dbfa0b907fe58468484d7', # 6.1.4
'src/third_party/pkg/flutter_packages':
Var('github_git') + '/flutter/packages.git' + '@' + '25454e63851fe7933f04d025606e68c1eac4fe0f', # various
'src/third_party/pkg/gcloud':
Var('github_git') + '/dart-lang/gcloud.git' + '@' + 'a5276b85c4714378e84b1fb478b8feeeb686ac26', # 0.8.6-dev
'src/third_party/pkg/googleapis':
Var('github_git') + '/google/googleapis.dart.git' + '@' + '526011f56d98eab183cc6075ee1392e8303e43e2', # various
'src/third_party/pkg/platform':
Var('github_git') + '/google/platform.dart.git' + '@' + '1ffad63428bbd1b3ecaa15926bacfb724023648c', # 3.1.0
'src/third_party/pkg/process':
Var('github_git') + '/google/process.dart.git' + '@' + '0c9aeac86dcc4e3a6cf760b76fed507107e244d5', # 4.2.1
'src/third_party/pkg/process_runner':
Var('github_git') + '/google/process_runner.git' + '@' + 'f24c69efdcaf109168f23d381fa281453d2bc9b1', # 4.1.2
'src/third_party/pkg/quiver':
Var('github_git') + '/google/quiver-dart.git' + '@' + '90b92bee895e507d435012356a8b5c5f17eafa52', # 3.2.1
'src/third_party/pkg/vector_math':
Var('dart_git') + '/external/github.com/google/vector_math.dart.git' + '@' + '0a5fd95449083d404df9768bc1b321b88a7d2eef', # 2.1.0
'src/third_party/imgui':
Var('github_git') + '/ocornut/imgui.git' + '@' + '3ea0fad204e994d669f79ed29dcaf61cd5cb571d',
'src/third_party/tinygltf':
Var('github_git') + '/syoyo/tinygltf.git' + '@' + '9bb5806df4055ac973b970ba5b3e27ce27d98148',
'src/third_party/json':
Var('github_git') + '/nlohmann/json.git' + '@' + '17d9eacd248f58b73f4d1be518ef649fe2295642',
'src/third_party/stb':
Var('github_git') + '/nothings/stb.git' + '@' + '5736b15f7ea0ffb08dd38af21067c314d6a3aae9',
'src/third_party/gradle': {
'packages': [
{
# See tools/gradle/README.md for update instructions.
# Version here means the CIPD tag.
'version': 'version:7.5.1',
'package': 'flutter/gradle'
}
],
'condition': 'download_android_deps',
'dep_type': 'cipd'
},
'src/third_party/android_tools/trace_to_text': {
'packages': [
{
# 25.0 downloads for both mac-amd64 and mac-arm64
# 26.1 is not found with either platform
# 27.1 is the latest release of perfetto
'version': 'git_tag:v25.0',
'package': 'perfetto/trace_to_text/${{platform}}'
}
],
'condition': 'download_android_deps',
'dep_type': 'cipd'
},
'src/third_party/android_tools/google-java-format': {
'packages': [
{
'package': 'flutter/android/google-java-format',
'version': 'version:1.7-1'
}
],
# We want to be able to format these as part of CI, and the CI step that
# checks formatting runs without downloading the rest of the Android build
# tooling. Therefore unlike all the other Android-related tools, we want to
# download this every time.
'dep_type': 'cipd',
},
'src/third_party/android_tools': {
'packages': [
{
'package': 'flutter/android/sdk/all/${{platform}}',
'version': 'version:33v6'
}
],
'condition': 'download_android_deps',
'dep_type': 'cipd',
},
'src/third_party/android_embedding_dependencies': {
'packages': [
{
'package': 'flutter/android/embedding_bundle',
'version': 'last_updated:2023-08-11T11:35:44-0700'
}
],
'condition': 'download_android_deps',
'dep_type': 'cipd',
},
'src/third_party/web_dependencies': {
'packages': [
{
'package': 'flutter/web/canvaskit_bundle',
'version': Var('canvaskit_cipd_instance')
}
],
'dep_type': 'cipd',
},
'src/third_party/java/openjdk': {
'packages': [
{
'package': 'flutter/java/openjdk/${{platform}}',
'version': 'version:11'
}
],
'condition': 'download_android_deps',
'dep_type': 'cipd',
},
'src/flutter/third_party/gn': {
'packages': [
{
'package': 'gn/gn/${{platform}}',
'version': 'git_revision:b79031308cc878488202beb99883ec1f2efd9a6d'
},
],
'dep_type': 'cipd',
},
'src/flutter/third_party/ninja': {
'packages': [
{
'package': 'infra/3pp/tools/ninja/${{platform}}',
'version': 'version:2@1.11.1.chromium.4',
}
],
'dep_type': 'cipd',
},
'src/buildtools/emsdk': {
'url': Var('skia_git') + '/external/github.com/emscripten-core/emsdk.git' + '@' + 'a896e3d066448b3530dbcaa48869fafefd738f57',
'condition': 'download_emsdk',
},
# Clang on mac and linux are expected to typically be the same revision.
# They are separated out so that the autoroller can more easily manage them.
'src/buildtools/mac-x64/clang': {
'packages': [
{
'package': 'fuchsia/third_party/clang/mac-amd64',
'version': Var('clang_version'),
}
],
'condition': 'host_os == "mac"', # On ARM64 Macs too because Goma doesn't support the host-arm64 toolchain.
'dep_type': 'cipd',
},
'src/buildtools/mac-arm64/clang': {
'packages': [
{
'package': 'fuchsia/third_party/clang/mac-arm64',
'version': Var('clang_version'),
}
],
'condition': 'host_os == "mac" and host_cpu == "arm64"',
'dep_type': 'cipd',
},
'src/buildtools/linux-x64/clang': {
'packages': [
{
'package': 'fuchsia/third_party/clang/linux-amd64',
'version': Var('clang_version'),
}
],
'condition': 'host_os == "linux" and host_cpu == "x64"',
'dep_type': 'cipd',
},
'src/buildtools/linux-arm64/clang': {
'packages': [
{
'package': 'fuchsia/third_party/clang/linux-arm64',
'version': Var('clang_version'),
}
],
'condition': 'host_os == "linux" and host_cpu == "arm64"',
'dep_type': 'cipd',
},
'src/buildtools/windows-x64/clang': {
'packages': [
{
'package': 'fuchsia/third_party/clang/windows-amd64',
'version': Var('clang_version'),
}
],
'condition': 'download_windows_deps',
'dep_type': 'cipd',
},
# GOMA
'src/buildtools/mac-x64/goma': {
'packages': [
{
'package': 'fuchsia/third_party/goma/client/mac-amd64',
'version': Var('goma_version'),
}
],
'condition': 'use_cipd_goma and host_os == "mac"',
'dep_type': 'cipd',
},
'src/buildtools/linux-x64/goma': {
'packages': [
{
'package': 'fuchsia/third_party/goma/client/linux-amd64',
'version': Var('goma_version'),
}
],
'condition': 'use_cipd_goma and host_os == "linux"',
'dep_type': 'cipd',
},
'src/buildtools/windows-x64/goma': {
'packages': [
{
'package': 'fuchsia/third_party/goma/client/windows-amd64',
'version': Var('goma_version'),
}
],
'condition': 'use_cipd_goma and download_windows_deps',
'dep_type': 'cipd',
},
# reclient.
'src/buildtools/linux-x64/reclient': {
'packages': [
{
'package': 'infra/rbe/client/${{platform}}',
'version': Var('reclient_version'),
}
],
'condition': 'host_os == "linux" and host_cpu == "x64"',
'dep_type': 'cipd',
},
# gcloud
'src/buildtools/linux-x64/gcloud': {
'packages': [
{
'package': 'infra/3pp/tools/gcloud/${{platform}}',
'version': Var('gcloud_version'),
}
],
'condition': 'host_os == "linux" and host_cpu == "x64"',
'dep_type': 'cipd',
},
# Get the SDK from https://chrome-infra-packages.appspot.com/p/fuchsia/sdk/core at the 'latest' tag
# Get the toolchain from https://chrome-infra-packages.appspot.com/p/fuchsia/clang at the 'goma' tag
'src/fuchsia/sdk/mac': {
'packages': [
{
'package': 'fuchsia/sdk/core/mac-amd64',
'version': 'ynBQWN3XpE2JvSlfdty6QkcHUl5RrpNixZxfi1O09ZUC'
}
],
'condition': 'host_os == "mac" and not download_fuchsia_sdk',
'dep_type': 'cipd',
},
'src/fuchsia/sdk/linux': {
'packages': [
{
'package': 'fuchsia/sdk/core/linux-amd64',
'version': 'z9uQ0mXwjKFQF05XlBv-X8Em1QvkefyGcIJWrSZEm-sC'
}
],
'condition': 'host_os == "linux" and not download_fuchsia_sdk',
'dep_type': 'cipd',
},
'src/third_party/impeller-cmake-example': {
'url': Var('github_git') + '/bdero/impeller-cmake-example.git' + '@' + '142507046e11f593b54d94af328998bbe45a88ef',
'condition': 'download_impeller_cmake_example',
},
# cmake is only used by impeller-cmake-example.
'src/buildtools/mac-x64/cmake': {
'packages': [
{
'package': 'infra/3pp/tools/cmake/mac-amd64',
'version': 'CGpMvZoP962wdEINR9d4OEvEW7ZOv0MPrHNKbBUBS0sC',
}
],
'condition': 'download_impeller_cmake_example and host_os == "mac"',
'dep_type': 'cipd',
},
'src/third_party/google_fonts_for_unit_tests': {
'packages': [
{
'package': 'flutter/flutter_font_fallbacks',
'version': 'ba9a3d16939f9576afa67273198d779270cd768ae2867209ff3d72bab9acd3f6'
}
],
'dep_type': 'cipd',
}
}
recursedeps = [
'src/third_party/vulkan-deps',
]
hooks = [
{
# Generate the Dart SDK's .dart_tool/package_confg.json file.
'name': 'Generate .dart_tool/package_confg.json',
'pattern': '.',
'action': ['python3', 'src/third_party/dart/tools/generate_package_config.py'],
},
{
# Generate the sdk/version file.
'name': 'Generate sdk/version',
'pattern': '.',
'action': ['python3', 'src/third_party/dart/tools/generate_sdk_version_file.py'],
},
{
# Update the Windows toolchain if necessary.
'name': 'win_toolchain',
'condition': 'download_windows_deps',
'pattern': '.',
'action': ['python3', 'src/build/vs_toolchain.py', 'update'],
},
{
# Ensure that we don't accidentally reference any .pyc files whose
# corresponding .py files have already been deleted.
'name': 'remove_stale_pyc_files',
'pattern': 'src/tools/.*\\.py',
'action': [
'python3',
'src/tools/remove_stale_pyc_files.py',
'src/tools',
],
},
{
'name': 'dia_dll',
'pattern': '.',
'condition': 'download_windows_deps',
'action': [
'python3',
'src/flutter/tools/dia_dll.py',
],
},
{
'name': 'linux_sysroot_x64',
'pattern': '.',
'condition': 'download_linux_deps',
'action': [
'python3',
'src/build/linux/sysroot_scripts/install-sysroot.py',
'--arch=x64'],
},
{
'name': 'linux_sysroot_arm64',
'pattern': '.',
'condition': 'download_linux_deps',
'action': [
'python3',
'src/build/linux/sysroot_scripts/install-sysroot.py',
'--arch=arm64'],
},
{
'name': 'pub get --offline',
'pattern': '.',
'action': [
'python3',
'src/flutter/tools/pub_get_offline.py',
]
},
{
'name': 'Download Fuchsia SDK',
'pattern': '.',
'condition': 'download_fuchsia_sdk',
'action': [
'python3',
'src/flutter/tools/download_fuchsia_sdk.py',
'--fail-loudly',
'--verbose',
'--host-os',
Var('host_os'),
'--fuchsia-sdk-path',
Var('fuchsia_sdk_path'),
]
},
{
'name': 'Activate Emscripten SDK',
'pattern': '.',
'condition': 'download_emsdk',
'action': [
'python3',
'src/flutter/tools/activate_emsdk.py',
]
},
{
'name': 'Start compiler proxy',
'pattern': '.',
'condition': 'use_cipd_goma and host_os == "mac"',
'action': [
'python3',
'src/buildtools/mac-x64/goma/goma_ctl.py',
'ensure_start'
]
},
{
'name': 'Start compiler proxy',
'pattern': '.',
'condition': 'use_cipd_goma and host_os == "linux"',
'action': [
'python3',
'src/buildtools/linux-x64/goma/goma_ctl.py',
'ensure_start'
]
},
{
'name': 'Start compiler proxy',
'pattern': '.',
'condition': 'use_cipd_goma and download_windows_deps',
'action': [
'python3',
'src/buildtools/windows-x64/goma/goma_ctl.py',
'ensure_start'
]
},
{
'name': 'Setup githooks',
'pattern': '.',
'condition': 'setup_githooks',
'action': [
'python3',
'src/flutter/tools/githooks/setup.py',
]
},
{
'name': 'impeller-cmake-example submodules',
'pattern': '.',
'condition': 'download_impeller_cmake_example',
'action': [
'python3',
'src/flutter/ci/impeller_cmake_build_test.py',
'--path',
'third_party/impeller-cmake-example',
'--setup',
]
}
]
|
|
fa63fe2eaee1c144c5dbba12328da0cf9a72b735
|
45e376ae66b78b17788b1d3575b334b2cb1d0b1c
|
/tests/terraform/checks/resource/aws/test_ELBCrossZoneEnable.py
|
2ccf26d367a584606f5d2b9da59cd892b206f4eb
|
[
"Apache-2.0"
] |
permissive
|
bridgecrewio/checkov
|
aeb8febed2ed90e61d5755f8f9d80b125362644d
|
e64cbd27ffb6f09c2c9f081b45b7a821a3aa1a4d
|
refs/heads/main
| 2023-08-31T06:57:21.990147
| 2023-08-30T23:01:47
| 2023-08-30T23:01:47
| 224,386,599
| 5,929
| 1,056
|
Apache-2.0
| 2023-09-14T20:10:23
| 2019-11-27T08:55:14
|
Python
|
UTF-8
|
Python
| false
| false
| 5,294
|
py
|
test_ELBCrossZoneEnable.py
|
import unittest
import hcl2
from checkov.terraform.checks.resource.aws.ELBCrossZoneEnable import check
from checkov.common.models.enums import CheckResult
class TestELBCrossZoneEnable(unittest.TestCase):
def test_failure(self):
hcl_res = hcl2.loads("""
resource "aws_elb" "test_failed" {
name = "foobar-terraform-elb"
availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"]
access_logs {
bucket = "foo"
bucket_prefix = "bar"
interval = 60
}
listener {
instance_port = 8000
instance_protocol = "http"
lb_port = 80
lb_protocol = "http"
}
listener {
instance_port = 8000
instance_protocol = "http"
lb_port = 443
lb_protocol = "https"
ssl_certificate_id = "arn:aws:iam::123456789012:server-certificate/certName"
}
health_check {
healthy_threshold = 2
unhealthy_threshold = 2
timeout = 3
target = "HTTP:8000/"
interval = 30
}
instances = [aws_instance.foo.id]
idle_timeout = 400
connection_draining = true
connection_draining_timeout = 400
cross_zone_load_balancing = false
}
""")
resource_conf = hcl_res['resource'][0]['aws_elb']['test_failed']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_success_missing_attribute(self):
hcl_res = hcl2.loads("""
resource "aws_elb" "test_success" {
name = "foobar-terraform-elb"
availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"]
access_logs {
bucket = "foo"
bucket_prefix = "bar"
interval = 60
}
listener {
instance_port = 8000
instance_protocol = "http"
lb_port = 80
lb_protocol = "http"
}
listener {
instance_port = 8000
instance_protocol = "http"
lb_port = 443
lb_protocol = "https"
ssl_certificate_id = "arn:aws:iam::123456789012:server-certificate/certName"
}
health_check {
healthy_threshold = 2
unhealthy_threshold = 2
timeout = 3
target = "HTTP:8000/"
interval = 30
}
instances = [aws_instance.foo.id]
idle_timeout = 400
connection_draining = true
connection_draining_timeout = 400
}
""")
resource_conf = hcl_res['resource'][0]['aws_elb']['test_success']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
def test_success(self):
hcl_res = hcl2.loads("""
resource "aws_elb" "test_success" {
name = "foobar-terraform-elb"
availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"]
access_logs {
bucket = "foo"
bucket_prefix = "bar"
interval = 60
}
listener {
instance_port = 8000
instance_protocol = "http"
lb_port = 80
lb_protocol = "http"
}
listener {
instance_port = 8000
instance_protocol = "http"
lb_port = 443
lb_protocol = "https"
ssl_certificate_id = "arn:aws:iam::123456789012:server-certificate/certName"
}
health_check {
healthy_threshold = 2
unhealthy_threshold = 2
timeout = 3
target = "HTTP:8000/"
interval = 30
}
instances = [aws_instance.foo.id]
cross_zone_load_balancing = true
idle_timeout = 400
connection_draining = true
connection_draining_timeout = 400
}
""")
resource_conf = hcl_res['resource'][0]['aws_elb']['test_success']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
if __name__ == '__main__':
unittest.main()
|
c1a76c74707f78af7a836dcd02d1f04dd8c2cc0d
|
0032d988541e85c47b5034c20ecf88220dde5a95
|
/openbook_circles/tests/test_views.py
|
4887d8aa38d6706e130a0f710a207d8ece05f881
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
OkunaOrg/okuna-api
|
eabd37fef9d2be59b590ed8d72bee084ac377997
|
f87d8e80d2f182c01dbce68155ded0078ee707e4
|
refs/heads/master
| 2022-02-04T21:31:10.577601
| 2021-12-28T18:20:39
| 2021-12-28T18:20:39
| 151,052,951
| 185
| 92
|
MIT
| 2022-01-13T01:00:40
| 2018-10-01T07:44:46
|
Python
|
UTF-8
|
Python
| false
| false
| 11,985
|
py
|
test_views.py
|
# Create your tests here.
from django.urls import reverse
from rest_framework import status
from openbook_common.tests.models import OpenbookAPITestCase
from mixer.backend.django import mixer
from openbook_auth.models import User
from faker import Faker
import logging
import json
from openbook_circles.models import Circle
from openbook_common.tests.helpers import make_user, make_authentication_headers_for_user, make_fake_circle_name
logger = logging.getLogger(__name__)
fake = Faker()
class CirclesAPITests(OpenbookAPITestCase):
"""
CirclesAPI
"""
def test_create_circle(self):
"""
should be able to create a circle and return 201
"""
user = make_user()
auth_token = user.auth_token.key
circle_name = fake.name()
circle_color = fake.hex_color()
headers = {'HTTP_AUTHORIZATION': 'Token %s' % auth_token}
data = {
'name': circle_name,
'color': circle_color
}
url = self._get_url()
response = self.client.put(url, data, **headers, format='multipart')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertTrue(Circle.objects.filter(name=circle_name, color=circle_color, creator_id=user.pk).count() == 1)
def test_retrieve_own_circles(self):
"""
should retrieve the all own circles and return 200
"""
user = make_user()
auth_token = user.auth_token.key
headers = {'HTTP_AUTHORIZATION': 'Token %s' % auth_token}
circles = mixer.cycle(5).blend(Circle, creator=user)
circles_ids = [circle.pk for circle in circles]
# We also expect to get back the default circles
circles_ids.append(user.connections_circle_id)
url = self._get_url()
response = self.client.get(url, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_circles = json.loads(response.content)
self.assertEqual(len(response_circles), len(circles_ids))
for response_circle in response_circles:
response_circle_id = response_circle.get('id')
self.assertIn(response_circle_id, circles_ids)
def _get_url(self):
return reverse('circles')
class CircleItemAPITests(OpenbookAPITestCase):
"""
CircleItemAPI
"""
fixtures = [
'openbook_circles/fixtures/circles.json'
]
def test_delete_own_circle(self):
"""
should be able to delete an own circle and return 200
"""
user = make_user()
auth_token = user.auth_token.key
headers = {'HTTP_AUTHORIZATION': 'Token %s' % auth_token}
circle = mixer.blend(Circle, creator=user)
circle_id = circle.pk
url = self._get_url(circle_id)
response = self.client.delete(url, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue(Circle.objects.filter(id=circle_id).count() == 0)
def test_cannot_delete_world_circle(self):
"""
should not be able to own world circle and return 400
"""
user = make_user()
auth_token = user.auth_token.key
headers = {'HTTP_AUTHORIZATION': 'Token %s' % auth_token}
circle_id = Circle.get_world_circle_id()
url = self._get_url(circle_id)
response = self.client.delete(url, **headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue(Circle.objects.filter(id=circle_id).count() == 1)
def test_cannot_delete_connections_circle(self):
"""
should not be able to delete own connections circle and return 400
"""
user = make_user()
auth_token = user.auth_token.key
headers = {'HTTP_AUTHORIZATION': 'Token %s' % auth_token}
circle_id = user.connections_circle_id
url = self._get_url(circle_id)
response = self.client.delete(url, **headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue(Circle.objects.filter(id=circle_id).count() == 1)
def test_cannot_delete_other_user_circle(self):
"""
should not be able to delete another user's circle and return 400
"""
user = make_user()
auth_token = user.auth_token.key
other_user = make_user()
headers = {'HTTP_AUTHORIZATION': 'Token %s' % auth_token}
circle = mixer.blend(Circle, creator=other_user)
circle_id = circle.pk
url = self._get_url(circle_id)
response = self.client.delete(url, **headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue(Circle.objects.filter(id=circle_id).count() == 1)
def test_can_update_own_circle(self):
"""
should be able to update own circle and return 200
"""
user = make_user()
auth_token = user.auth_token.key
headers = {'HTTP_AUTHORIZATION': 'Token %s' % auth_token}
circle_color = fake.hex_color()
circle = mixer.blend(Circle, creator=user, color=circle_color)
circle_id = circle.pk
new_circle_name = fake.name()
new_circle_color = fake.hex_color()
data = {
'name': new_circle_name,
'color': new_circle_color
}
url = self._get_url(circle_id)
response = self.client.patch(url, data, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue(Circle.objects.filter(name=new_circle_name, id=circle_id, color=new_circle_color).count() == 1)
def test_can_update_own_circle_users(self):
"""
should be able to update an own circle and return 200
"""
user = make_user()
circle = mixer.blend(Circle, creator=user)
circle_id = circle.pk
users_to_connect_with_in_circle = 4
for i in range(users_to_connect_with_in_circle):
user_to_connect_with = make_user()
user.connect_with_user_with_id(user_to_connect_with.pk, circles_ids=[circle_id])
new_users_to_connect_with_in_circle_amount = 2
new_users_to_connect_with_in_circle = []
new_users_to_connect_with_in_circle_usernames = []
for i in range(new_users_to_connect_with_in_circle_amount):
user_to_connect_with = make_user()
new_users_to_connect_with_in_circle.append(user_to_connect_with)
new_users_to_connect_with_in_circle_usernames.append(user_to_connect_with.username)
data = {
'usernames': ','.join(map(str, new_users_to_connect_with_in_circle_usernames))
}
url = self._get_url(circle_id)
headers = make_authentication_headers_for_user(user)
response = self.client.patch(url, data, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
for new_user_to_connect_with in new_users_to_connect_with_in_circle:
self.assertTrue(
user.is_connected_with_user_with_id_in_circle_with_id(new_user_to_connect_with.pk, circle_id))
def test_can_update_own_circle_users_to_none(self):
"""
should be able to update an own circle and return 200
"""
user = make_user()
circle = mixer.blend(Circle, creator=user)
circle_id = circle.pk
users_to_connect_with_in_circle = 4
for i in range(users_to_connect_with_in_circle):
user_to_connect_with = make_user()
user.connect_with_user_with_id(user_to_connect_with.pk, circles_ids=[circle_id])
data = {
'usernames': ''
}
url = self._get_url(circle_id)
headers = make_authentication_headers_for_user(user)
response = self.client.patch(url, data, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
circle.refresh_from_db()
self.assertEqual(len(circle.users), 0)
def test_cannot_update_other_user_circle(self):
"""
should not be able to update the circle of another user and return 400
"""
user = make_user()
auth_token = user.auth_token.key
headers = {'HTTP_AUTHORIZATION': 'Token %s' % auth_token}
another_user = make_user()
circle_color = fake.hex_color()
circle = mixer.blend(Circle, creator=another_user, color=circle_color)
circle_id = circle.pk
new_circle_name = fake.name()
new_circle_color = fake.hex_color()
data = {
'name': new_circle_name,
'color': new_circle_color
}
url = self._get_url(circle_id)
response = self.client.patch(url, data, **headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue(Circle.objects.filter(name=new_circle_name, id=circle_id, color=new_circle_color).count() == 0)
def test_cannot_update_world_circle(self):
"""
should not be able to update own world circle and return 400
"""
user = make_user()
auth_token = user.auth_token.key
headers = {'HTTP_AUTHORIZATION': 'Token %s' % auth_token}
circle_id = Circle.get_world_circle_id()
new_circle_name = fake.name()
new_circle_color = fake.hex_color()
data = {
'name': new_circle_name,
'color': new_circle_color
}
url = self._get_url(circle_id)
response = self.client.patch(url, data, **headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue(Circle.objects.filter(name=new_circle_name, id=circle_id, color=new_circle_color).count() == 0)
def test_cannot_update_connections_circle(self):
"""
should not be able to update own connections circle and return 400
"""
user = make_user()
auth_token = user.auth_token.key
headers = {'HTTP_AUTHORIZATION': 'Token %s' % auth_token}
circle_id = user.connections_circle_id
new_circle_name = fake.name()
new_circle_color = fake.hex_color()
data = {
'name': new_circle_name,
'color': new_circle_color
}
url = self._get_url(circle_id)
response = self.client.patch(url, data, **headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue(Circle.objects.filter(name=new_circle_name, id=circle_id, color=new_circle_color).count() == 0)
def _get_url(self, circle_id):
return reverse('circle', kwargs={
'circle_id': circle_id
})
class CircleNameCheckAPITests(OpenbookAPITestCase):
"""
CircleNameCheckAPI
"""
def test_circle_name_not_taken(self):
"""
should return status 202 if circle name is not taken.
"""
user = make_user()
circle_name = make_fake_circle_name()
request_data = {'name': circle_name}
url = self._get_url()
headers = make_authentication_headers_for_user(user)
response = self.client.post(url, request_data, format='json', **headers)
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
def test_circle_name_taken(self):
"""
should return status 400 if the circleName is taken
"""
user = make_user()
color = fake.hex_color()
circle = user.create_circle(name=make_fake_circle_name(), color=color)
request_data = {'name': circle.name}
url = self._get_url()
headers = make_authentication_headers_for_user(user)
response = self.client.post(url, request_data, format='json', **headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def _get_url(self):
return reverse('circle-name-check')
|
43317a326ab04b50243d85ee35e71193a45b364a
|
f80ef3a3cf859b13e8af8433af549b6b1043bf6e
|
/pyobjc-framework-OSAKit/Lib/OSAKit/_metadata.py
|
55fca43b4c2fb9d754f73d9871bac927ff525b4e
|
[
"MIT"
] |
permissive
|
ronaldoussoren/pyobjc
|
29dc9ca0af838a56105a9ddd62fb38ec415f0b86
|
77b98382e52818690449111cd2e23cd469b53cf5
|
refs/heads/master
| 2023-09-01T05:15:21.814504
| 2023-06-13T20:00:17
| 2023-06-13T20:00:17
| 243,933,900
| 439
| 49
| null | 2023-06-25T02:49:07
| 2020-02-29T08:43:12
|
Python
|
UTF-8
|
Python
| false
| false
| 4,631
|
py
|
_metadata.py
|
# This file is generated by objective.metadata
#
# Last update: Sun Feb 20 19:11:25 2022
#
# flake8: noqa
import objc, sys
from typing import NewType
if sys.maxsize > 2**32:
def sel32or64(a, b):
return b
else:
def sel32or64(a, b):
return a
if objc.arch == "arm64":
def selAorI(a, b):
return a
else:
def selAorI(a, b):
return b
misc = {}
constants = """$OSAScriptErrorAppAddressKey$OSAScriptErrorAppName$OSAScriptErrorAppNameKey$OSAScriptErrorBriefMessage$OSAScriptErrorBriefMessageKey$OSAScriptErrorExpectedTypeKey$OSAScriptErrorMessage$OSAScriptErrorMessageKey$OSAScriptErrorNumber$OSAScriptErrorNumberKey$OSAScriptErrorOffendingObjectKey$OSAScriptErrorPartialResultKey$OSAScriptErrorRange$OSAScriptErrorRangeKey$OSAStorageApplicationBundleType$OSAStorageApplicationType$OSAStorageScriptBundleType$OSAStorageScriptType$OSAStorageTextType$"""
enums = """$OSACompileIntoContext@2$OSADontSetScriptLocation@16777216$OSANull@0$OSAPreventGetSource@1$OSAScriptRecording@2$OSAScriptRunning@1$OSAScriptStopped@0$OSAShowStartupScreen@536870912$OSAStayOpenApplet@268435456$OSASupportsAECoercion@8$OSASupportsAESending@16$OSASupportsCompiling@2$OSASupportsConvenience@64$OSASupportsDialects@128$OSASupportsEventHandling@256$OSASupportsGetSource@4$OSASupportsRecording@32$"""
misc.update(
{
"OSALanguageFeatures": NewType("OSALanguageFeatures", int),
"OSAStorageOptions": NewType("OSAStorageOptions", int),
"OSAScriptState": NewType("OSAScriptState", int),
}
)
misc.update({})
r = objc.registerMetaDataForSelector
objc._updatingMetadata(True)
try:
r(b"OSALanguage", b"isThreadSafe", {"retval": {"type": b"Z"}})
r(
b"OSAScript",
b"compileAndReturnError:",
{"retval": {"type": b"Z"}, "arguments": {2: {"type_modifier": b"o"}}},
)
r(
b"OSAScript",
b"compiledDataForType:usingStorageOptions:error:",
{"arguments": {4: {"type_modifier": b"o"}}},
)
r(
b"OSAScript",
b"executeAndReturnDisplayValue:error:",
{"arguments": {2: {"type_modifier": b"o"}, 3: {"type_modifier": b"o"}}},
)
r(
b"OSAScript",
b"executeAndReturnError:",
{"arguments": {2: {"type_modifier": b"o"}}},
)
r(
b"OSAScript",
b"executeAppleEvent:error:",
{"arguments": {3: {"type_modifier": b"o"}}},
)
r(
b"OSAScript",
b"executeHandlerWithName:arguments:error:",
{"arguments": {4: {"type_modifier": b"o"}}},
)
r(
b"OSAScript",
b"initWithCompiledData:error:",
{"deprecated": 1006, "arguments": {3: {"type_modifier": b"o"}}},
)
r(
b"OSAScript",
b"initWithCompiledData:fromURL:usingStorageOptions:error:",
{"arguments": {5: {"type_modifier": b"o"}}},
)
r(
b"OSAScript",
b"initWithContentsOfURL:error:",
{"arguments": {3: {"type_modifier": b"o"}}},
)
r(
b"OSAScript",
b"initWithContentsOfURL:language:error:",
{"deprecated": 1006, "arguments": {4: {"type_modifier": b"o"}}},
)
r(
b"OSAScript",
b"initWithContentsOfURL:languageInstance:usingStorageOptions:error:",
{"arguments": {5: {"type_modifier": b"o"}}},
)
r(
b"OSAScript",
b"initWithScriptDataDescriptor:fromURL:languageInstance:usingStorageOptions:error:",
{"arguments": {6: {"type_modifier": b"o"}}},
)
r(b"OSAScript", b"isCompiled", {"retval": {"type": b"Z"}})
r(
b"OSAScript",
b"writeToURL:ofType:error:",
{"retval": {"type": b"Z"}, "arguments": {4: {"type_modifier": b"o"}}},
)
r(
b"OSAScript",
b"writeToURL:ofType:usingStorageOptions:error:",
{"retval": {"type": b"Z"}, "arguments": {5: {"type_modifier": b"o"}}},
)
r(b"OSAScriptController", b"isCompiling", {"retval": {"type": b"Z"}})
r(b"OSAScriptView", b"indentsWrappedLines", {"retval": {"type": b"Z"}})
r(b"OSAScriptView", b"setIndentsWrappedLines:", {"arguments": {2: {"type": b"Z"}}})
r(b"OSAScriptView", b"setUsesScriptAssistant:", {"arguments": {2: {"type": b"Z"}}})
r(b"OSAScriptView", b"setUsesTabs:", {"arguments": {2: {"type": b"Z"}}})
r(b"OSAScriptView", b"setWrapsLines:", {"arguments": {2: {"type": b"Z"}}})
r(b"OSAScriptView", b"usesScriptAssistant", {"retval": {"type": b"Z"}})
r(b"OSAScriptView", b"usesTabs", {"retval": {"type": b"Z"}})
r(b"OSAScriptView", b"wrapsLines", {"retval": {"type": b"Z"}})
finally:
objc._updatingMetadata(False)
expressions = {}
# END OF FILE
|
2df2a176fd1f3f0f95b50119c9dc0072fd1c8be1
|
83963c19fd120dcc7498b726cc56de7fbb900a47
|
/osxphotos/photosdb/_photosdb_process_scoreinfo.py
|
4b1798815816cb69119910daa2c1f84e86d9cbb7
|
[
"MIT",
"LicenseRef-scancode-public-domain"
] |
permissive
|
RhetTbull/osxphotos
|
55ad4f1257bcd26bb3fbadde6ce5dd59c0917354
|
2cb5a4d18a27be6ccf68f5f35abd39418d238016
|
refs/heads/main
| 2023-09-02T18:11:06.227191
| 2023-09-02T16:06:51
| 2023-09-02T16:06:51
| 192,160,985
| 1,287
| 93
|
MIT
| 2023-09-14T14:10:58
| 2019-06-16T07:07:49
|
Python
|
UTF-8
|
Python
| false
| false
| 6,082
|
py
|
_photosdb_process_scoreinfo.py
|
""" Methods for PhotosDB to add Photos 5 photo score info
ref: https://simonwillison.net/2020/May/21/dogsheep-photos/
"""
import logging
from .._constants import _DB_TABLE_NAMES, _PHOTOS_4_VERSION
from ..sqlite_utils import sqlite_open_ro
from .photosdb_utils import get_db_version
"""
This module should be imported in the class defintion of PhotosDB in photosdb.py
Do not import this module directly
This module adds the following method to PhotosDB:
_process_scoreinfo: process photo score info
The following data structures are added to PhotosDB
self._db_scoreinfo_uuid
These methods only work on Photos 5 databases. Will print warning on earlier library versions.
"""
def _process_scoreinfo(self):
"""Process computed photo scores
Note: Only works on Photos version == 5.0
"""
# _db_scoreinfo_uuid is dict in form {uuid: {score values}}
self._db_scoreinfo_uuid = {}
if self._db_version <= _PHOTOS_4_VERSION:
raise NotImplementedError(
f"search info not implemented for this database version"
)
else:
_process_scoreinfo_5(self)
def _process_scoreinfo_5(photosdb):
"""Process computed photo scores for Photos 5 databases
Args:
photosdb: an OSXPhotosDB instance
"""
db = photosdb._tmp_db
asset_table = _DB_TABLE_NAMES[photosdb._photos_ver]["ASSET"]
(conn, cursor) = sqlite_open_ro(db)
result = cursor.execute(
f"""
SELECT
{asset_table}.ZUUID,
{asset_table}.ZOVERALLAESTHETICSCORE,
{asset_table}.ZCURATIONSCORE,
{asset_table}.ZPROMOTIONSCORE,
{asset_table}.ZHIGHLIGHTVISIBILITYSCORE,
ZCOMPUTEDASSETATTRIBUTES.ZBEHAVIORALSCORE,
ZCOMPUTEDASSETATTRIBUTES.ZFAILURESCORE,
ZCOMPUTEDASSETATTRIBUTES.ZHARMONIOUSCOLORSCORE,
ZCOMPUTEDASSETATTRIBUTES.ZIMMERSIVENESSSCORE,
ZCOMPUTEDASSETATTRIBUTES.ZINTERACTIONSCORE,
ZCOMPUTEDASSETATTRIBUTES.ZINTERESTINGSUBJECTSCORE,
ZCOMPUTEDASSETATTRIBUTES.ZINTRUSIVEOBJECTPRESENCESCORE,
ZCOMPUTEDASSETATTRIBUTES.ZLIVELYCOLORSCORE,
ZCOMPUTEDASSETATTRIBUTES.ZLOWLIGHT,
ZCOMPUTEDASSETATTRIBUTES.ZNOISESCORE,
ZCOMPUTEDASSETATTRIBUTES.ZPLEASANTCAMERATILTSCORE,
ZCOMPUTEDASSETATTRIBUTES.ZPLEASANTCOMPOSITIONSCORE,
ZCOMPUTEDASSETATTRIBUTES.ZPLEASANTLIGHTINGSCORE,
ZCOMPUTEDASSETATTRIBUTES.ZPLEASANTPATTERNSCORE,
ZCOMPUTEDASSETATTRIBUTES.ZPLEASANTPERSPECTIVESCORE,
ZCOMPUTEDASSETATTRIBUTES.ZPLEASANTPOSTPROCESSINGSCORE,
ZCOMPUTEDASSETATTRIBUTES.ZPLEASANTREFLECTIONSSCORE,
ZCOMPUTEDASSETATTRIBUTES.ZPLEASANTSYMMETRYSCORE,
ZCOMPUTEDASSETATTRIBUTES.ZSHARPLYFOCUSEDSUBJECTSCORE,
ZCOMPUTEDASSETATTRIBUTES.ZTASTEFULLYBLURREDSCORE,
ZCOMPUTEDASSETATTRIBUTES.ZWELLCHOSENSUBJECTSCORE,
ZCOMPUTEDASSETATTRIBUTES.ZWELLFRAMEDSUBJECTSCORE,
ZCOMPUTEDASSETATTRIBUTES.ZWELLTIMEDSHOTSCORE
FROM {asset_table}
JOIN ZCOMPUTEDASSETATTRIBUTES ON ZCOMPUTEDASSETATTRIBUTES.ZASSET = {asset_table}.Z_PK
"""
)
# 0 ZGENERICASSET.ZUUID,
# 1 ZGENERICASSET.ZOVERALLAESTHETICSCORE,
# 2 ZGENERICASSET.ZCURATIONSCORE,
# 3 ZGENERICASSET.ZPROMOTIONSCORE,
# 4 ZGENERICASSET.ZHIGHLIGHTVISIBILITYSCORE,
# 5 ZCOMPUTEDASSETATTRIBUTES.ZBEHAVIORALSCORE,
# 6 ZCOMPUTEDASSETATTRIBUTES.ZFAILURESCORE,
# 7 ZCOMPUTEDASSETATTRIBUTES.ZHARMONIOUSCOLORSCORE,
# 8 ZCOMPUTEDASSETATTRIBUTES.ZIMMERSIVENESSSCORE,
# 9 ZCOMPUTEDASSETATTRIBUTES.ZINTERACTIONSCORE,
# 10 ZCOMPUTEDASSETATTRIBUTES.ZINTERESTINGSUBJECTSCORE,
# 11 ZCOMPUTEDASSETATTRIBUTES.ZINTRUSIVEOBJECTPRESENCESCORE,
# 12 ZCOMPUTEDASSETATTRIBUTES.ZLIVELYCOLORSCORE,
# 13 ZCOMPUTEDASSETATTRIBUTES.ZLOWLIGHT,
# 14 ZCOMPUTEDASSETATTRIBUTES.ZNOISESCORE,
# 15 ZCOMPUTEDASSETATTRIBUTES.ZPLEASANTCAMERATILTSCORE,
# 16 ZCOMPUTEDASSETATTRIBUTES.ZPLEASANTCOMPOSITIONSCORE,
# 17 ZCOMPUTEDASSETATTRIBUTES.ZPLEASANTLIGHTINGSCORE,
# 18 ZCOMPUTEDASSETATTRIBUTES.ZPLEASANTPATTERNSCORE,
# 19 ZCOMPUTEDASSETATTRIBUTES.ZPLEASANTPERSPECTIVESCORE,
# 20 ZCOMPUTEDASSETATTRIBUTES.ZPLEASANTPOSTPROCESSINGSCORE,
# 21 ZCOMPUTEDASSETATTRIBUTES.ZPLEASANTREFLECTIONSSCORE,
# 22 ZCOMPUTEDASSETATTRIBUTES.ZPLEASANTSYMMETRYSCORE,
# 23 ZCOMPUTEDASSETATTRIBUTES.ZSHARPLYFOCUSEDSUBJECTSCORE,
# 24 ZCOMPUTEDASSETATTRIBUTES.ZTASTEFULLYBLURREDSCORE,
# 25 ZCOMPUTEDASSETATTRIBUTES.ZWELLCHOSENSUBJECTSCORE,
# 26 ZCOMPUTEDASSETATTRIBUTES.ZWELLFRAMEDSUBJECTSCORE,
# 27 ZCOMPUTEDASSETATTRIBUTES.ZWELLTIMEDSHOTSCORE
for row in result:
uuid = row[0]
scores = {"uuid": uuid}
scores["overall_aesthetic"] = row[1]
scores["curation"] = row[2]
scores["promotion"] = row[3]
scores["highlight_visibility"] = row[4]
scores["behavioral"] = row[5]
scores["failure"] = row[6]
scores["harmonious_color"] = row[7]
scores["immersiveness"] = row[8]
scores["interaction"] = row[9]
scores["interesting_subject"] = row[10]
scores["intrusive_object_presence"] = row[11]
scores["lively_color"] = row[12]
scores["low_light"] = row[13]
scores["noise"] = row[14]
scores["pleasant_camera_tilt"] = row[15]
scores["pleasant_composition"] = row[16]
scores["pleasant_lighting"] = row[17]
scores["pleasant_pattern"] = row[18]
scores["pleasant_perspective"] = row[19]
scores["pleasant_post_processing"] = row[20]
scores["pleasant_reflection"] = row[21]
scores["pleasant_symmetry"] = row[22]
scores["sharply_focused_subject"] = row[23]
scores["tastefully_blurred"] = row[24]
scores["well_chosen_subject"] = row[25]
scores["well_framed_subject"] = row[26]
scores["well_timed_shot"] = row[27]
photosdb._db_scoreinfo_uuid[uuid] = scores
conn.close()
|
28fbb164dfd68e25d46fea7268239eca1651789e
|
49c0a28c44d4bbe917325d955f399a3c317b8e5e
|
/backend/light_head_rcnn/lib/lib_kernel/lib_roi_align/roi_align_op_test.py
|
005f8ea9c77e23ea624d4c8a33551a862575f424
|
[
"Apache-2.0"
] |
permissive
|
zju3dv/mvpose
|
4eac14998dd1ba6959c1a4bda13c2d77c73694c7
|
38b958f423f2de2bf7562f5a386c27440eab8c53
|
refs/heads/master
| 2022-07-27T02:11:12.777830
| 2021-07-18T07:57:33
| 2021-07-18T07:57:33
| 165,477,360
| 470
| 95
|
Apache-2.0
| 2021-09-01T14:27:57
| 2019-01-13T07:19:10
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,485
|
py
|
roi_align_op_test.py
|
import tensorflow as tf
import numpy as np
import roi_align_op
import roi_align_op_grad
import tensorflow as tf
import pdb
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "7"
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
array = np.random.rand(32, 100, 100, 3)
#array = np.ones((32, 100, 100, 3))
data = tf.convert_to_tensor(array, dtype=tf.float32)
rois = tf.convert_to_tensor([[0, 10, 20, 30, 40], [31, 10, 20, 30, 40]], dtype=tf.float32)
W = weight_variable([3, 3, 3, 1])
h = conv2d(data, W)
[y, argmax] = roi_align_op.roi_align(data, rois, 6, 6, 2, 2, 1.0)
#pdb.set_trace()
y_data = tf.convert_to_tensor(np.ones((2, 6, 6, 1)), dtype=tf.float32)
print(y_data, y, argmax)
# Minimize the mean squared errors.
#loss = tf.reduce_mean(tf.square(y - y_data))
#optimizer = tf.train.GradientDescentOptimizer(0.5)
#train = optimizer.minimize(loss)
init = tf.global_variables_initializer()
# Launch the graph.
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
sess.run(init)
a, b = sess.run([y, argmax])
from IPython import embed; embed()
print(sess.run(y))
#pdb.set_trace()
#for step in range(10):
# sess.run(train)
# print(step, sess.run(W))
# print(sess.run(y))
#with tf.device('/gpu:0'):
# result = module.roi_pool(data, rois, 1, 1, 1.0/1)
# print result.eval()
#with tf.device('/cpu:0'):
# run(init)
|
4dc64b481e5d73a8751ba3e7a140dcbfffe92243
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/response/ZhimaCreditPayafteruseCreditagreementTransferResponse.py
|
472c37efff47062038d3e3225fcce6287f0c1b93
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,203
|
py
|
ZhimaCreditPayafteruseCreditagreementTransferResponse.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class ZhimaCreditPayafteruseCreditagreementTransferResponse(AlipayResponse):
def __init__(self):
super(ZhimaCreditPayafteruseCreditagreementTransferResponse, self).__init__()
self._credit_agreement_id = None
self._out_agreement_no = None
@property
def credit_agreement_id(self):
return self._credit_agreement_id
@credit_agreement_id.setter
def credit_agreement_id(self, value):
self._credit_agreement_id = value
@property
def out_agreement_no(self):
return self._out_agreement_no
@out_agreement_no.setter
def out_agreement_no(self, value):
self._out_agreement_no = value
def parse_response_content(self, response_content):
response = super(ZhimaCreditPayafteruseCreditagreementTransferResponse, self).parse_response_content(response_content)
if 'credit_agreement_id' in response:
self.credit_agreement_id = response['credit_agreement_id']
if 'out_agreement_no' in response:
self.out_agreement_no = response['out_agreement_no']
|
7f827d127488b0973bf819a808fc8a78925cc33e
|
219a938ab3b084f8a9a0c4e0fe552ae40a42b991
|
/examples/spark-app-demo/k8s/data_cleanup.py
|
9d0c5d09047ef44d0db9382f409044d42d36cc24
|
[
"LGPL-2.0-or-later",
"Apache-2.0"
] |
permissive
|
apache/incubator-liminal
|
0b9c510ed30826fcd416a5b3aaf991d675b0347a
|
57246ec472dc79529a68b2c6edd76e5fef677f1b
|
refs/heads/master
| 2023-08-31T09:25:43.704740
| 2023-01-22T08:13:30
| 2023-01-22T08:13:30
| 271,182,596
| 141
| 44
|
Apache-2.0
| 2023-09-12T19:01:36
| 2020-06-10T04:57:10
|
Python
|
UTF-8
|
Python
| false
| false
| 2,408
|
py
|
data_cleanup.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import pyspark.sql.functions as F
from pyspark.ml import Pipeline
from pyspark.ml.feature import StandardScaler, StringIndexer, VectorAssembler
from pyspark.ml.functions import vector_to_array
from pyspark.sql import SparkSession
def transform(data):
columns_to_scale = data.columns[:-1]
vectorizer = VectorAssembler(inputCols=columns_to_scale, outputCol="features")
scaler = StandardScaler(inputCol="features", outputCol="scaled_features", withStd=True, withMean=True)
labeler = StringIndexer(inputCol=data.columns[-1], outputCol='label')
pipeline = Pipeline(stages=[vectorizer, scaler, labeler])
fitted = pipeline.fit(data)
transformed = fitted.transform(data)
result = transformed.withColumn("feature_arr", vector_to_array("scaled_features")).select(
[F.col("feature_arr")[i].alias(columns_to_scale[i]) for i in range(len(columns_to_scale))] + ['label']
)
return result
def extract(spark, input_uri):
return spark.read.csv(input_uri, header=True, inferSchema=True, comment="#")
def load(data, output_uri):
data.coalesce(1).write.mode("overwrite").csv(output_uri, header=True)
def data_pipeline(input_uri, output_uri):
spark = SparkSession.builder.appName("Prepare Iris Data").getOrCreate()
input = extract(spark, input_uri)
data = transform(input)
load(data, output_uri)
spark.stop()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input_uri")
parser.add_argument("--output_uri")
args = parser.parse_args()
data_pipeline(args.input_uri, args.output_uri)
|
27348cd7b60346b6f2386f6b367d994cb913da30
|
2cdfd562d865036e6f0862c9f377dd1e88e2eb32
|
/bin/anthology/formatter.py
|
ad787246905ce95c84e8c6c3bda102598de4b897
|
[
"Apache-2.0"
] |
permissive
|
acl-org/acl-anthology
|
8ded455ca44a5d286325f8f2530735b3bdf79225
|
395fc4114a560776215a2e1c4a6cc5ce07dc348c
|
refs/heads/master
| 2023-09-02T07:14:09.651667
| 2023-09-01T20:05:48
| 2023-09-01T20:05:48
| 20,092,585
| 290
| 274
|
Apache-2.0
| 2023-09-14T14:54:32
| 2014-05-23T08:37:21
|
Python
|
UTF-8
|
Python
| false
| false
| 7,554
|
py
|
formatter.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2019-2022 Marcel Bollmann <marcel@bollmann.me>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging as log
from copy import deepcopy
from lxml import etree
import citeproc
from citeproc.source.json import CiteProcJSON
import citeproc_styles
import codecs
import re
from . import latexcodec
from .texmath import TexMath
from .utils import stringify_children, remove_extra_whitespace
latexcodec.register()
_BIBTEX_MONTHS = {
"january": "jan",
"february": "feb",
"march": "mar",
"april": "apr",
"may": "may",
"june": "jun",
"july": "jul",
"august": "aug",
"september": "sep",
"october": "oct",
"november": "nov",
"december": "dec",
}
class CiteprocFormatter:
"""Formatter using Citeproc and CSL files to produce citations.
cf. https://github.com/citation-style-language/styles for possible citation
styles
"""
styles = {}
@classmethod
def load_style(cls, style):
"""Loads and returns a CSL style."""
if style not in cls.styles:
cls.styles[style] = citeproc.CitationStylesStyle(
citeproc_styles.get_style_filepath(style)
)
return cls.styles[style]
@classmethod
def render_html_citation(
cls, paper, style="association-for-computational-linguistics"
):
"""Render a bibliography entry for a paper with a given CSL style.
Returns HTML encoded as a single string.
"""
data = paper.as_citeproc_json()
source = CiteProcJSON(data)
item = citeproc.CitationItem(data[0]["id"])
bib = citeproc.CitationStylesBibliography(
cls.load_style(style), source, citeproc.formatter.html
)
bib.register(citeproc.Citation([item]))
return str(bib.style.render_bibliography([item])[0])
def bibtex_encode(text):
"""Encodes a text string for use in BibTeX.
Assumes that the text does *not* contain any LaTeX commands!
"""
if text is None:
return ""
text = codecs.encode(text, "latex")
return text
def bibtex_convert_quotes(text):
if re.match(r"(?<!\\)\"", text):
log.warning(
f'Straight quote (") found in text field ({text}); converting automatically, but please fix in XML'
)
text = re.sub(r"(?<!\\)\"\b", "``", text)
text = re.sub(r"(?<!\\)\"", "''", text)
return text
def bibtex_convert_month(text):
"""Converts a month string to BibTeX macros.
If the string contains digits or is otherwise not parseable, it is returned
unchanged with quotes around it.
"""
text = text.lower()
if text in _BIBTEX_MONTHS: # most common case; map e.g. march -> mar
return _BIBTEX_MONTHS[text]
if text in _BIBTEX_MONTHS.values(): # already a month spec
return text
# Find embedded month strings
text = f'"{text}"'
for month, macro in _BIBTEX_MONTHS.items():
if month in text:
text = text.replace(month, f'" # {macro} # "')
text = " # ".join(filter(lambda k: k != '""', text.split(" # ")))
return text
def bibtex_make_entry(bibkey, bibtype, fields):
lines = [f"@{bibtype}{{{bibkey},"]
for key, value in fields:
if key == "author" and bibtype == "proceedings":
key = "editor"
if key in ("author", "editor") and " and " in value:
# Print each author on a separate line
value = " and\n ".join(value.split(" and "))
if key == "month":
value = bibtex_convert_month(value)
elif value is None:
log.warning(f"Skipping empty value for {bibkey}/{key}")
continue
elif has_unbalanced_braces(value):
log.error(f"Unbalanced braces in {key} field for {bibkey}; skipping!")
continue
elif '"' in value:
# Make sure not to use "" to quote values when they contain "
value = f"{{{value}}}"
else:
# quote value
value = f'"{value}"'
lines.append(f" {key} = {value},")
lines.append("}")
return "\n".join(lines)
def has_unbalanced_braces(string):
c = 0
for char in string:
if char == "{":
c += 1
elif char == "}":
c -= 1
if c < 0:
return True
return c != 0
class MarkupFormatter:
def __init__(self):
self.texmath = TexMath()
def as_xml(self, element):
return remove_extra_whitespace(stringify_children(element))
def as_text(self, element):
element = deepcopy(element)
for sub in element.iterfind(".//tex-math"):
sub.text = self.texmath.to_unicode(sub)
retval = etree.tostring(element, encoding="unicode", method="text")
return remove_extra_whitespace(retval)
def as_html(self, element, allow_url=False):
element = deepcopy(element)
# Transform elements to valid HTML
for sub in element.iterfind(".//url"):
if allow_url:
sub.tag = "a"
sub.attrib["href"] = sub.text
else:
sub.tag = "span"
sub.attrib["class"] = "acl-markup-url"
for sub in element.iterfind(".//fixed-case"):
sub.tag = "span"
sub.attrib["class"] = "acl-fixed-case"
for sub in element.iterfind(".//tex-math"):
parsed_elem = self.texmath.to_html(sub)
parsed_elem.tail = sub.tail
sub.getparent().replace(sub, parsed_elem)
retval = stringify_children(element)
return remove_extra_whitespace(retval)
def as_latex(self, element):
# following convert_xml_text_markup in anth2bib.py
if element.tag in ["tex-math", "url"]:
if len(element) > 0:
log.warning(f"<{element.tag}> element has children")
text = element.text
else:
text = bibtex_encode(element.text)
for nested_element in element:
text += self.as_latex(nested_element)
text += bibtex_encode(nested_element.tail)
if element.tag == "fixed-case":
text = f"{{{text}}}"
elif element.tag == "b":
text = f"\\textbf{{{text}}}"
elif element.tag == "i":
text = f"\\textit{{{text}}}"
elif element.tag == "tex-math":
text = f"${text}$"
elif element.tag == "url":
text = f"\\url{{{text}}}"
text = bibtex_convert_quotes(text)
return remove_extra_whitespace(text)
def __call__(self, element, form, **kwargs):
if element is None:
return ""
if form == "xml":
return self.as_xml(element)
elif form in ("plain", "text"):
return self.as_text(element)
elif form == "html":
return self.as_html(element, **kwargs)
elif form == "latex":
return self.as_latex(element)
raise ValueError(f"Unknown format: {form}")
|
e31c7bb56132a2e474daf5091d977120186639ed
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/homeassistant/components/homeassistant/triggers/time.py
|
5b3cd8590a77ff8036d55d3d2eca9c246ad3d5bb
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 6,552
|
py
|
time.py
|
"""Offer time listening automation rules."""
from datetime import datetime
from functools import partial
import voluptuous as vol
from homeassistant.components import sensor
from homeassistant.const import (
ATTR_DEVICE_CLASS,
CONF_AT,
CONF_PLATFORM,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
)
from homeassistant.core import CALLBACK_TYPE, HassJob, HomeAssistant, State, callback
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.event import (
EventStateChangedData,
async_track_point_in_time,
async_track_state_change_event,
async_track_time_change,
)
from homeassistant.helpers.trigger import TriggerActionType, TriggerInfo
from homeassistant.helpers.typing import ConfigType, EventType
import homeassistant.util.dt as dt_util
_TIME_TRIGGER_SCHEMA = vol.Any(
cv.time,
vol.All(str, cv.entity_domain(["input_datetime", "sensor"])),
msg=(
"Expected HH:MM, HH:MM:SS or Entity ID with domain 'input_datetime' or 'sensor'"
),
)
TRIGGER_SCHEMA = cv.TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(CONF_PLATFORM): "time",
vol.Required(CONF_AT): vol.All(cv.ensure_list, [_TIME_TRIGGER_SCHEMA]),
}
)
async def async_attach_trigger(
hass: HomeAssistant,
config: ConfigType,
action: TriggerActionType,
trigger_info: TriggerInfo,
) -> CALLBACK_TYPE:
"""Listen for state changes based on configuration."""
trigger_data = trigger_info["trigger_data"]
entities: dict[str, CALLBACK_TYPE] = {}
removes: list[CALLBACK_TYPE] = []
job = HassJob(action, f"time trigger {trigger_info}")
@callback
def time_automation_listener(description, now, *, entity_id=None):
"""Listen for time changes and calls action."""
hass.async_run_hass_job(
job,
{
"trigger": {
**trigger_data,
"platform": "time",
"now": now,
"description": description,
"entity_id": entity_id,
}
},
)
@callback
def update_entity_trigger_event(event: EventType[EventStateChangedData]) -> None:
"""update_entity_trigger from the event."""
return update_entity_trigger(event.data["entity_id"], event.data["new_state"])
@callback
def update_entity_trigger(entity_id: str, new_state: State | None = None) -> None:
"""Update the entity trigger for the entity_id."""
# If a listener was already set up for entity, remove it.
if remove := entities.pop(entity_id, None):
remove()
remove = None
if not new_state:
return
trigger_dt: datetime | None
# Check state of entity. If valid, set up a listener.
if new_state.domain == "input_datetime":
if has_date := new_state.attributes["has_date"]:
year = new_state.attributes["year"]
month = new_state.attributes["month"]
day = new_state.attributes["day"]
if has_time := new_state.attributes["has_time"]:
hour = new_state.attributes["hour"]
minute = new_state.attributes["minute"]
second = new_state.attributes["second"]
else:
# If no time then use midnight.
hour = minute = second = 0
if has_date:
# If input_datetime has date, then track point in time.
trigger_dt = datetime(
year,
month,
day,
hour,
minute,
second,
tzinfo=dt_util.DEFAULT_TIME_ZONE,
)
# Only set up listener if time is now or in the future.
if trigger_dt >= dt_util.now():
remove = async_track_point_in_time(
hass,
partial(
time_automation_listener,
f"time set in {entity_id}",
entity_id=entity_id,
),
trigger_dt,
)
elif has_time:
# Else if it has time, then track time change.
remove = async_track_time_change(
hass,
partial(
time_automation_listener,
f"time set in {entity_id}",
entity_id=entity_id,
),
hour=hour,
minute=minute,
second=second,
)
elif (
new_state.domain == "sensor"
and new_state.attributes.get(ATTR_DEVICE_CLASS)
== sensor.SensorDeviceClass.TIMESTAMP
and new_state.state not in (STATE_UNAVAILABLE, STATE_UNKNOWN)
):
trigger_dt = dt_util.parse_datetime(new_state.state)
if trigger_dt is not None and trigger_dt > dt_util.utcnow():
remove = async_track_point_in_time(
hass,
partial(
time_automation_listener,
f"time set in {entity_id}",
entity_id=entity_id,
),
trigger_dt,
)
# Was a listener set up?
if remove:
entities[entity_id] = remove
to_track: list[str] = []
for at_time in config[CONF_AT]:
if isinstance(at_time, str):
# entity
to_track.append(at_time)
update_entity_trigger(at_time, new_state=hass.states.get(at_time))
else:
# datetime.time
removes.append(
async_track_time_change(
hass,
partial(time_automation_listener, "time"),
hour=at_time.hour,
minute=at_time.minute,
second=at_time.second,
)
)
# Track state changes of any entities.
removes.append(
async_track_state_change_event(hass, to_track, update_entity_trigger_event)
)
@callback
def remove_track_time_changes():
"""Remove tracked time changes."""
for remove in entities.values():
remove()
for remove in removes:
remove()
return remove_track_time_changes
|
c9260385bb8552b7b061f1c9b00a6292694365ee
|
5ecd6c73e60e15f5d426ae8dfcc5fb34540010f7
|
/tryalgo/horn_sat.py
|
388553e3a64927c70182f558d8d06b02572e0c60
|
[
"MIT"
] |
permissive
|
jilljenn/tryalgo
|
736568f223d9a08db9ec392a2420b478aff6039a
|
634645707ebf2489356009a6f91f012b55b1ee39
|
refs/heads/master
| 2023-08-24T21:30:57.871068
| 2023-07-09T18:08:33
| 2023-07-09T18:08:33
| 50,119,000
| 390
| 124
|
MIT
| 2023-01-29T09:47:45
| 2016-01-21T16:05:54
|
Python
|
UTF-8
|
Python
| false
| false
| 3,776
|
py
|
horn_sat.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""\
Solving Horn SAT
christoph dürr - 2016-2020
clauses are numbered starting from 0
variables are strings (identifier)
solution : set of variables that are set to true
posvar_in_clause : maps clause to the unique positive variable in clause
(or None)
clause_with_negvar : maps variable v to all clauses that contain not(v)
every clause has a score: number of its negative variables
that are not in solution sol
pool : maps score to clauses of that score
"""
from collections import defaultdict
import sys
# To mock it, https://stackoverflow.com/a/44677646/827989
def read(filename):
""" reads a Horn SAT formula from a text file
:file format:
# comment
A # clause with unique positive literal
:- A # clause with unique negative literal
A :- B, C, D # clause where A is positive and B,C,D negative
# variables are strings without spaces
"""
formula = []
for line in open(filename, 'r'):
line = line.strip()
if line[0] == "#":
continue
lit = line.split(":-")
if len(lit) == 1:
posvar = lit[0]
negvars = []
else:
assert len(lit) == 2
posvar = lit[0].strip()
if posvar == '':
posvar = None
negvars = lit[1].split(',')
for i, _ in enumerate(negvars):
negvars[i] = negvars[i].strip()
formula.append((posvar, negvars))
return formula
# pylint: disable=line-too-long
def horn_sat(formula):
""" Solving a HORN Sat formula
:param formula: list of couple(posvar, negvars).
negvars is a list of the negative variables (can be empty)
posvar is the positive variable (can be None)
Variables can be any hashable objects: integers, strings...
:returns: None if formula is not satisfiable, else a minimal set of vars
that have to be set to true in order to satisfy the formula.
:complexity: linear
"""
# --- construct data structures
CLAUSES = range(len(formula))
score = [0 for c in CLAUSES] # number of neg vars not yet in solution
# the unique positive variable of a clause (if any)
posvar_in_clause = [None for c in CLAUSES]
# all clauses where a variable appears negatively
clauses_with_negvar = defaultdict(set)
for c in CLAUSES:
posvar, negvars = formula[c]
score[c] = len(set(negvars)) # do not count twice negative variables
posvar_in_clause[c] = posvar
for v in negvars:
clauses_with_negvar[v].add(c)
pool = [set() for s in range(max(score) + 1)] # create the pool
for c in CLAUSES:
pool[score[c]].add(c) # pool[s] = set of clauses with score s
# --- solve Horn SAT formula
solution = set() # contains all variables set to True
while pool[0]:
curr = pool[0].pop() # arbitrary zero score clause
v = posvar_in_clause[curr]
if v is None: # formula is not satisfiable
return None
if v in solution or curr in clauses_with_negvar[v]:
continue # clause is already satisfied
solution.add(v)
for c in clauses_with_negvar[v]: # update score
pool[score[c]].remove(c)
score[c] -= 1
pool[score[c]].add(c) # change c to lower score in pool
return solution
if __name__ == "__main__":
F = read(sys.argv[1])
sol = horn_sat(F)
if sol is None:
print("No solution")
else:
print("Minimal solution:")
for x in sorted(sol):
print(x)
|
576407a90ed6e38cbcf8f912970469a218377e13
|
481d8268f533c0b5527112f9b7d709aaa22ab053
|
/scripts/zmail.py
|
7ce7992e2dfcbdecd0d72d0dacca54ff9eae6b81
|
[
"Apache-2.0"
] |
permissive
|
intelxed/xed
|
590f60c564dc75004e51d95773fc1d4d4bfba1d3
|
01a6da8090af84cd52f6c1070377ae6e885b078f
|
refs/heads/main
| 2023-08-25T01:30:27.421743
| 2023-08-21T17:19:26
| 2023-08-21T17:19:26
| 75,980,044
| 1,390
| 175
|
Apache-2.0
| 2023-08-07T11:12:00
| 2016-12-08T22:21:22
|
Python
|
UTF-8
|
Python
| false
| false
| 6,948
|
py
|
zmail.py
|
#!/usr/bin/env python3
#BEGIN_LEGAL
#
#Copyright (c) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#END_LEGAL
import re
import os
import smtplib
import sys
import argparse
def msg(s):
sys.stdout.write(s + "\n")
def msgb(s, t=''):
sys.stdout.write('[{}] {}\n'.format(s, t))
def die(s):
msgb('FAILING',s)
sys.exit(1)
def warn(m):
msg("[WARNING] " + m)
############################################################################
# Stuff for sending mail or just a making an note file
def _check_muttrc():
if 'HOME' in os.environ:
home = os.environ['HOME']
muttrc = os.path.join(home,'.muttrc')
if os.path.exists(muttrc):
f = open(muttrc,"r")
lines = f.readlines()
f.close()
for line in lines:
g= re.search(r'set[ ]+from[ ]*[=][ ]*(?P<email>[A-Za-z0-9_.@]+)',
line)
if g:
sender = g.group('email')
return sender
warn("Cannot find \'from\' setting in .muttrc." +
" Please set it to your email address.")
return None
else:
warn("Cannot find " + muttrc + " file where you " +
"should set your email address.")
return None
warn("Cannot find your HOME environment variable. " +
"Cannot look for your .muttrc file.")
return None
def check_smtp_host_env_var():
servers = []
if 'SMTP_HOST' in os.environ:
servers.append(os.environ['SMTP_HOST'])
else:
servers.append('ecsmtp.hd.intel.com')
servers.append('ecsmtp.iil.intel.com')
return servers
def check_reply_to_env_var():
if 'REPLYTO' in os.environ:
return os.environ['REPLYTO']
sender = _check_muttrc()
if sender:
return sender
die("Please either set your REPLYTO environment variable " +
"to your @intel.com\n " +
"email address or have a\n\tset " +
"from=YOUR-EMAIL-ADDRESS@intel.com\n" +
"in your $HOME/.muttrc.\n\n")
def find_sender():
return check_reply_to_env_var()
############################################################
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
def _send_email(recipients_list,
sender,
subject,
body,
attachments_list = [],
cc_recipients_list = [],
verbose = 0):
"""Send the body string and any attachments to the list of
recipients. The attachments_list is a list of tuples (real_name,
attachment_name)"""
if verbose>50:
msgb('email 0.0')
all_recipients = recipients_list + cc_recipients_list
#recipients = ", ".join(all_recipients)
recipients = ", ".join(recipients_list)
cc_recipients = ", ".join(cc_recipients_list)
# Create the enclosing (outer) message
outer = MIMEMultipart()
outer['Subject'] = subject
outer['To'] = recipients
outer['Cc'] = cc_recipients
outer['From'] = sender
if verbose:
msgb("FROM", sender)
msgb("TO", recipients)
msgb("CC", cc_recipients)
outer.preamble = 'You will not see this in a MIME-aware mail reader.\n'
if verbose > 50:
msgb('email 0.5')
msg = MIMEText(body, _subtype='plain')
msg.add_header('Content-Disposition', 'inline')
outer.attach(msg)
if verbose > 50:
msgb('email 1.0')
for (real_name,attachment_name) in attachments_list:
if verbose > 50:
msgb('email 2.0', real_name + " " + attachment_name )
if not os.path.exists(real_name):
die("Cannot read attachment named: " + real_name)
fp = open(real_name,'r')
msg = MIMEText(fp.read(), _subtype='plain')
fp.close()
if verbose > 50:
msgb('3.0')
msg.add_header('Content-Disposition', 'attachment',
filename=attachment_name)
outer.attach(msg)
if verbose > 50:
msgb('4.0')
# Now send or store the message
composed = outer.as_string()
s = smtplib.SMTP()
if verbose > 50:
s.set_debuglevel(1)
# try connecting to a server from the list
mail_server_list = check_smtp_host_env_var()
connected = False
for outgoing_mail_server in mail_server_list:
try:
msgb("MAIL SERVER", outgoing_mail_server)
s.connect(outgoing_mail_server)
connected = True
break
except:
continue
# last resort try the default
if not connected:
s.connect()
rdict = s.sendmail(sender, all_recipients, composed)
s.quit()
if rdict and len(rdict) > 0:
die("MAIL FAILED FOR " + str(rdict))
def mail(note,
sender,
recipients,
cc_recipients=[],
attachments=[],
subject = '',
verbosity = 0):
"""mail note to the to_recipient and the cc_recipient"""
if verbosity > 1:
msgb("SENDING EMAIL")
note = [x.rstrip() for x in note]
body = '\n'.join(note)
att = []
for attachment in attachments:
att.append( (attachment, os.path.basename(attachment)) )
try:
_send_email(recipients,
sender,
subject,
body,
att,
cc_recipients,
verbosity)
except:
die("Sending email failed")
return 0
def getargs():
parser = argparse.ArgumentParser()
parser.add_argument("-m",
dest="message",
default=[],
action="append",
help="Message to send")
parser.add_argument("-f",
dest="sender",
help="Sender")
parser.add_argument("-t",
dest="recipients",
action="append",
default=[],
help="Recipient")
parser.add_argument("-v",
dest="verbosity",
default=0,
type=int,
help="Verbosity")
args = parser.parse_args()
return args
def main():
args = getargs()
r = mail(args.message, args.sender, args.recipients,
verbosity=args.verbosity)
return r
if __name__ == '__main__':
r = main()
sys.exit(r)
|
f23a42c7a886ec0b3eecda36197d58a1449923a8
|
a41e1498e3c080f47abd8e8e57157548df3ebbf1
|
/pandas/tests/io/test_common.py
|
a7ece6a6d7b08fcfa9ec737adcce3ce8aed0d9b5
|
[
"BSD-3-Clause"
] |
permissive
|
pandas-dev/pandas
|
e7e639454a298bebc272622e66faa9829ea393bb
|
c7325d7e7e77ecb4a4e57b48bc25265277c75712
|
refs/heads/main
| 2023-09-01T12:42:07.927176
| 2023-09-01T11:14:10
| 2023-09-01T11:14:10
| 858,127
| 36,166
| 18,728
|
BSD-3-Clause
| 2023-09-14T21:18:41
| 2010-08-24T01:37:33
|
Python
|
UTF-8
|
Python
| false
| false
| 22,455
|
py
|
test_common.py
|
"""
Tests for the pandas.io.common functionalities
"""
import codecs
import errno
from functools import partial
from io import (
BytesIO,
StringIO,
UnsupportedOperation,
)
import mmap
import os
from pathlib import Path
import pickle
import tempfile
import pytest
from pandas.compat import is_platform_windows
import pandas.util._test_decorators as td
import pandas as pd
import pandas._testing as tm
import pandas.io.common as icom
class CustomFSPath:
"""For testing fspath on unknown objects"""
def __init__(self, path) -> None:
self.path = path
def __fspath__(self):
return self.path
# Functions that consume a string path and return a string or path-like object
path_types = [str, CustomFSPath, Path]
try:
from py.path import local as LocalPath
path_types.append(LocalPath)
except ImportError:
pass
HERE = os.path.abspath(os.path.dirname(__file__))
# https://github.com/cython/cython/issues/1720
class TestCommonIOCapabilities:
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def test_expand_user(self):
filename = "~/sometest"
expanded_name = icom._expand_user(filename)
assert expanded_name != filename
assert os.path.isabs(expanded_name)
assert os.path.expanduser(filename) == expanded_name
def test_expand_user_normal_path(self):
filename = "/somefolder/sometest"
expanded_name = icom._expand_user(filename)
assert expanded_name == filename
assert os.path.expanduser(filename) == expanded_name
def test_stringify_path_pathlib(self):
rel_path = icom.stringify_path(Path("."))
assert rel_path == "."
redundant_path = icom.stringify_path(Path("foo//bar"))
assert redundant_path == os.path.join("foo", "bar")
@td.skip_if_no("py.path")
def test_stringify_path_localpath(self):
path = os.path.join("foo", "bar")
abs_path = os.path.abspath(path)
lpath = LocalPath(path)
assert icom.stringify_path(lpath) == abs_path
def test_stringify_path_fspath(self):
p = CustomFSPath("foo/bar.csv")
result = icom.stringify_path(p)
assert result == "foo/bar.csv"
def test_stringify_file_and_path_like(self):
# GH 38125: do not stringify file objects that are also path-like
fsspec = pytest.importorskip("fsspec")
with tm.ensure_clean() as path:
with fsspec.open(f"file://{path}", mode="wb") as fsspec_obj:
assert fsspec_obj == icom.stringify_path(fsspec_obj)
@pytest.mark.parametrize("path_type", path_types)
def test_infer_compression_from_path(self, compression_format, path_type):
extension, expected = compression_format
path = path_type("foo/bar.csv" + extension)
compression = icom.infer_compression(path, compression="infer")
assert compression == expected
@pytest.mark.parametrize("path_type", [str, CustomFSPath, Path])
def test_get_handle_with_path(self, path_type):
# ignore LocalPath: it creates strange paths: /absolute/~/sometest
with tempfile.TemporaryDirectory(dir=Path.home()) as tmp:
filename = path_type("~/" + Path(tmp).name + "/sometest")
with icom.get_handle(filename, "w") as handles:
assert Path(handles.handle.name).is_absolute()
assert os.path.expanduser(filename) == handles.handle.name
def test_get_handle_with_buffer(self):
with StringIO() as input_buffer:
with icom.get_handle(input_buffer, "r") as handles:
assert handles.handle == input_buffer
assert not input_buffer.closed
assert input_buffer.closed
# Test that BytesIOWrapper(get_handle) returns correct amount of bytes every time
def test_bytesiowrapper_returns_correct_bytes(self):
# Test latin1, ucs-2, and ucs-4 chars
data = """a,b,c
1,2,3
©,®,®
Look,a snake,🐍"""
with icom.get_handle(StringIO(data), "rb", is_text=False) as handles:
result = b""
chunksize = 5
while True:
chunk = handles.handle.read(chunksize)
# Make sure each chunk is correct amount of bytes
assert len(chunk) <= chunksize
if len(chunk) < chunksize:
# Can be less amount of bytes, but only at EOF
# which happens when read returns empty
assert len(handles.handle.read()) == 0
result += chunk
break
result += chunk
assert result == data.encode("utf-8")
# Test that pyarrow can handle a file opened with get_handle
def test_get_handle_pyarrow_compat(self):
pa_csv = pytest.importorskip("pyarrow.csv")
# Test latin1, ucs-2, and ucs-4 chars
data = """a,b,c
1,2,3
©,®,®
Look,a snake,🐍"""
expected = pd.DataFrame(
{"a": ["1", "©", "Look"], "b": ["2", "®", "a snake"], "c": ["3", "®", "🐍"]}
)
s = StringIO(data)
with icom.get_handle(s, "rb", is_text=False) as handles:
df = pa_csv.read_csv(handles.handle).to_pandas()
tm.assert_frame_equal(df, expected)
assert not s.closed
def test_iterator(self):
with pd.read_csv(StringIO(self.data1), chunksize=1) as reader:
result = pd.concat(reader, ignore_index=True)
expected = pd.read_csv(StringIO(self.data1))
tm.assert_frame_equal(result, expected)
# GH12153
with pd.read_csv(StringIO(self.data1), chunksize=1) as it:
first = next(it)
tm.assert_frame_equal(first, expected.iloc[[0]])
tm.assert_frame_equal(pd.concat(it), expected.iloc[1:])
@pytest.mark.parametrize(
"reader, module, error_class, fn_ext",
[
(pd.read_csv, "os", FileNotFoundError, "csv"),
(pd.read_fwf, "os", FileNotFoundError, "txt"),
(pd.read_excel, "xlrd", FileNotFoundError, "xlsx"),
(pd.read_feather, "pyarrow", OSError, "feather"),
(pd.read_hdf, "tables", FileNotFoundError, "h5"),
(pd.read_stata, "os", FileNotFoundError, "dta"),
(pd.read_sas, "os", FileNotFoundError, "sas7bdat"),
(pd.read_json, "os", FileNotFoundError, "json"),
(pd.read_pickle, "os", FileNotFoundError, "pickle"),
],
)
def test_read_non_existent(self, reader, module, error_class, fn_ext):
pytest.importorskip(module)
path = os.path.join(HERE, "data", "does_not_exist." + fn_ext)
msg1 = rf"File (b')?.+does_not_exist\.{fn_ext}'? does not exist"
msg2 = rf"\[Errno 2\] No such file or directory: '.+does_not_exist\.{fn_ext}'"
msg3 = "Expected object or value"
msg4 = "path_or_buf needs to be a string file path or file-like"
msg5 = (
rf"\[Errno 2\] File .+does_not_exist\.{fn_ext} does not exist: "
rf"'.+does_not_exist\.{fn_ext}'"
)
msg6 = rf"\[Errno 2\] 没有那个文件或目录: '.+does_not_exist\.{fn_ext}'"
msg7 = (
rf"\[Errno 2\] File o directory non esistente: '.+does_not_exist\.{fn_ext}'"
)
msg8 = rf"Failed to open local file.+does_not_exist\.{fn_ext}"
with pytest.raises(
error_class,
match=rf"({msg1}|{msg2}|{msg3}|{msg4}|{msg5}|{msg6}|{msg7}|{msg8})",
):
reader(path)
@pytest.mark.parametrize(
"method, module, error_class, fn_ext",
[
(pd.DataFrame.to_csv, "os", OSError, "csv"),
(pd.DataFrame.to_html, "os", OSError, "html"),
(pd.DataFrame.to_excel, "xlrd", OSError, "xlsx"),
(pd.DataFrame.to_feather, "pyarrow", OSError, "feather"),
(pd.DataFrame.to_parquet, "pyarrow", OSError, "parquet"),
(pd.DataFrame.to_stata, "os", OSError, "dta"),
(pd.DataFrame.to_json, "os", OSError, "json"),
(pd.DataFrame.to_pickle, "os", OSError, "pickle"),
],
)
# NOTE: Missing parent directory for pd.DataFrame.to_hdf is handled by PyTables
def test_write_missing_parent_directory(self, method, module, error_class, fn_ext):
pytest.importorskip(module)
dummy_frame = pd.DataFrame({"a": [1, 2, 3], "b": [2, 3, 4], "c": [3, 4, 5]})
path = os.path.join(HERE, "data", "missing_folder", "does_not_exist." + fn_ext)
with pytest.raises(
error_class,
match=r"Cannot save file into a non-existent directory: .*missing_folder",
):
method(dummy_frame, path)
@pytest.mark.parametrize(
"reader, module, error_class, fn_ext",
[
(pd.read_csv, "os", FileNotFoundError, "csv"),
(pd.read_table, "os", FileNotFoundError, "csv"),
(pd.read_fwf, "os", FileNotFoundError, "txt"),
(pd.read_excel, "xlrd", FileNotFoundError, "xlsx"),
(pd.read_feather, "pyarrow", OSError, "feather"),
(pd.read_hdf, "tables", FileNotFoundError, "h5"),
(pd.read_stata, "os", FileNotFoundError, "dta"),
(pd.read_sas, "os", FileNotFoundError, "sas7bdat"),
(pd.read_json, "os", FileNotFoundError, "json"),
(pd.read_pickle, "os", FileNotFoundError, "pickle"),
],
)
def test_read_expands_user_home_dir(
self, reader, module, error_class, fn_ext, monkeypatch
):
pytest.importorskip(module)
path = os.path.join("~", "does_not_exist." + fn_ext)
monkeypatch.setattr(icom, "_expand_user", lambda x: os.path.join("foo", x))
msg1 = rf"File (b')?.+does_not_exist\.{fn_ext}'? does not exist"
msg2 = rf"\[Errno 2\] No such file or directory: '.+does_not_exist\.{fn_ext}'"
msg3 = "Unexpected character found when decoding 'false'"
msg4 = "path_or_buf needs to be a string file path or file-like"
msg5 = (
rf"\[Errno 2\] File .+does_not_exist\.{fn_ext} does not exist: "
rf"'.+does_not_exist\.{fn_ext}'"
)
msg6 = rf"\[Errno 2\] 没有那个文件或目录: '.+does_not_exist\.{fn_ext}'"
msg7 = (
rf"\[Errno 2\] File o directory non esistente: '.+does_not_exist\.{fn_ext}'"
)
msg8 = rf"Failed to open local file.+does_not_exist\.{fn_ext}"
with pytest.raises(
error_class,
match=rf"({msg1}|{msg2}|{msg3}|{msg4}|{msg5}|{msg6}|{msg7}|{msg8})",
):
reader(path)
@pytest.mark.parametrize(
"reader, module, path",
[
(pd.read_csv, "os", ("io", "data", "csv", "iris.csv")),
(pd.read_table, "os", ("io", "data", "csv", "iris.csv")),
(
pd.read_fwf,
"os",
("io", "data", "fixed_width", "fixed_width_format.txt"),
),
(pd.read_excel, "xlrd", ("io", "data", "excel", "test1.xlsx")),
(
pd.read_feather,
"pyarrow",
("io", "data", "feather", "feather-0_3_1.feather"),
),
(
pd.read_hdf,
"tables",
("io", "data", "legacy_hdf", "datetimetz_object.h5"),
),
(pd.read_stata, "os", ("io", "data", "stata", "stata10_115.dta")),
(pd.read_sas, "os", ("io", "sas", "data", "test1.sas7bdat")),
(pd.read_json, "os", ("io", "json", "data", "tsframe_v012.json")),
(
pd.read_pickle,
"os",
("io", "data", "pickle", "categorical.0.25.0.pickle"),
),
],
)
def test_read_fspath_all(self, reader, module, path, datapath):
pytest.importorskip(module)
path = datapath(*path)
mypath = CustomFSPath(path)
result = reader(mypath)
expected = reader(path)
if path.endswith(".pickle"):
# categorical
tm.assert_categorical_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"writer_name, writer_kwargs, module",
[
("to_csv", {}, "os"),
("to_excel", {"engine": "openpyxl"}, "openpyxl"),
("to_feather", {}, "pyarrow"),
("to_html", {}, "os"),
("to_json", {}, "os"),
("to_latex", {}, "os"),
("to_pickle", {}, "os"),
("to_stata", {"time_stamp": pd.to_datetime("2019-01-01 00:00")}, "os"),
],
)
def test_write_fspath_all(self, writer_name, writer_kwargs, module):
if writer_name in ["to_latex"]: # uses Styler implementation
pytest.importorskip("jinja2")
p1 = tm.ensure_clean("string")
p2 = tm.ensure_clean("fspath")
df = pd.DataFrame({"A": [1, 2]})
with p1 as string, p2 as fspath:
pytest.importorskip(module)
mypath = CustomFSPath(fspath)
writer = getattr(df, writer_name)
writer(string, **writer_kwargs)
writer(mypath, **writer_kwargs)
with open(string, "rb") as f_str, open(fspath, "rb") as f_path:
if writer_name == "to_excel":
# binary representation of excel contains time creation
# data that causes flaky CI failures
result = pd.read_excel(f_str, **writer_kwargs)
expected = pd.read_excel(f_path, **writer_kwargs)
tm.assert_frame_equal(result, expected)
else:
result = f_str.read()
expected = f_path.read()
assert result == expected
def test_write_fspath_hdf5(self):
# Same test as write_fspath_all, except HDF5 files aren't
# necessarily byte-for-byte identical for a given dataframe, so we'll
# have to read and compare equality
pytest.importorskip("tables")
df = pd.DataFrame({"A": [1, 2]})
p1 = tm.ensure_clean("string")
p2 = tm.ensure_clean("fspath")
with p1 as string, p2 as fspath:
mypath = CustomFSPath(fspath)
df.to_hdf(mypath, key="bar")
df.to_hdf(string, key="bar")
result = pd.read_hdf(fspath, key="bar")
expected = pd.read_hdf(string, key="bar")
tm.assert_frame_equal(result, expected)
@pytest.fixture
def mmap_file(datapath):
return datapath("io", "data", "csv", "test_mmap.csv")
class TestMMapWrapper:
def test_constructor_bad_file(self, mmap_file):
non_file = StringIO("I am not a file")
non_file.fileno = lambda: -1
# the error raised is different on Windows
if is_platform_windows():
msg = "The parameter is incorrect"
err = OSError
else:
msg = "[Errno 22]"
err = mmap.error
with pytest.raises(err, match=msg):
icom._maybe_memory_map(non_file, True)
with open(mmap_file, encoding="utf-8") as target:
pass
msg = "I/O operation on closed file"
with pytest.raises(ValueError, match=msg):
icom._maybe_memory_map(target, True)
def test_next(self, mmap_file):
with open(mmap_file, encoding="utf-8") as target:
lines = target.readlines()
with icom.get_handle(
target, "r", is_text=True, memory_map=True
) as wrappers:
wrapper = wrappers.handle
assert isinstance(wrapper.buffer.buffer, mmap.mmap)
for line in lines:
next_line = next(wrapper)
assert next_line.strip() == line.strip()
with pytest.raises(StopIteration, match=r"^$"):
next(wrapper)
def test_unknown_engine(self):
with tm.ensure_clean() as path:
df = tm.makeDataFrame()
df.to_csv(path)
with pytest.raises(ValueError, match="Unknown engine"):
pd.read_csv(path, engine="pyt")
def test_binary_mode(self):
"""
'encoding' shouldn't be passed to 'open' in binary mode.
GH 35058
"""
with tm.ensure_clean() as path:
df = tm.makeDataFrame()
df.to_csv(path, mode="w+b")
tm.assert_frame_equal(df, pd.read_csv(path, index_col=0))
@pytest.mark.parametrize("encoding", ["utf-16", "utf-32"])
@pytest.mark.parametrize("compression_", ["bz2", "xz"])
def test_warning_missing_utf_bom(self, encoding, compression_):
"""
bz2 and xz do not write the byte order mark (BOM) for utf-16/32.
https://stackoverflow.com/questions/55171439
GH 35681
"""
df = tm.makeDataFrame()
with tm.ensure_clean() as path:
with tm.assert_produces_warning(UnicodeWarning):
df.to_csv(path, compression=compression_, encoding=encoding)
# reading should fail (otherwise we wouldn't need the warning)
msg = r"UTF-\d+ stream does not start with BOM"
with pytest.raises(UnicodeError, match=msg):
pd.read_csv(path, compression=compression_, encoding=encoding)
def test_is_fsspec_url():
assert icom.is_fsspec_url("gcs://pandas/somethingelse.com")
assert icom.is_fsspec_url("gs://pandas/somethingelse.com")
# the following is the only remote URL that is handled without fsspec
assert not icom.is_fsspec_url("http://pandas/somethingelse.com")
assert not icom.is_fsspec_url("random:pandas/somethingelse.com")
assert not icom.is_fsspec_url("/local/path")
assert not icom.is_fsspec_url("relative/local/path")
# fsspec URL in string should not be recognized
assert not icom.is_fsspec_url("this is not fsspec://url")
assert not icom.is_fsspec_url("{'url': 'gs://pandas/somethingelse.com'}")
# accept everything that conforms to RFC 3986 schema
assert icom.is_fsspec_url("RFC-3986+compliant.spec://something")
@pytest.mark.parametrize("encoding", [None, "utf-8"])
@pytest.mark.parametrize("format", ["csv", "json"])
def test_codecs_encoding(encoding, format):
# GH39247
expected = tm.makeDataFrame()
with tm.ensure_clean() as path:
with codecs.open(path, mode="w", encoding=encoding) as handle:
getattr(expected, f"to_{format}")(handle)
with codecs.open(path, mode="r", encoding=encoding) as handle:
if format == "csv":
df = pd.read_csv(handle, index_col=0)
else:
df = pd.read_json(handle)
tm.assert_frame_equal(expected, df)
def test_codecs_get_writer_reader():
# GH39247
expected = tm.makeDataFrame()
with tm.ensure_clean() as path:
with open(path, "wb") as handle:
with codecs.getwriter("utf-8")(handle) as encoded:
expected.to_csv(encoded)
with open(path, "rb") as handle:
with codecs.getreader("utf-8")(handle) as encoded:
df = pd.read_csv(encoded, index_col=0)
tm.assert_frame_equal(expected, df)
@pytest.mark.parametrize(
"io_class,mode,msg",
[
(BytesIO, "t", "a bytes-like object is required, not 'str'"),
(StringIO, "b", "string argument expected, got 'bytes'"),
],
)
def test_explicit_encoding(io_class, mode, msg):
# GH39247; this test makes sure that if a user provides mode="*t" or "*b",
# it is used. In the case of this test it leads to an error as intentionally the
# wrong mode is requested
expected = tm.makeDataFrame()
with io_class() as buffer:
with pytest.raises(TypeError, match=msg):
expected.to_csv(buffer, mode=f"w{mode}")
@pytest.mark.parametrize("encoding_errors", [None, "strict", "replace"])
@pytest.mark.parametrize("format", ["csv", "json"])
def test_encoding_errors(encoding_errors, format):
# GH39450
msg = "'utf-8' codec can't decode byte"
bad_encoding = b"\xe4"
if format == "csv":
content = b"," + bad_encoding + b"\n" + bad_encoding * 2 + b"," + bad_encoding
reader = partial(pd.read_csv, index_col=0)
else:
content = (
b'{"'
+ bad_encoding * 2
+ b'": {"'
+ bad_encoding
+ b'":"'
+ bad_encoding
+ b'"}}'
)
reader = partial(pd.read_json, orient="index")
with tm.ensure_clean() as path:
file = Path(path)
file.write_bytes(content)
if encoding_errors != "replace":
with pytest.raises(UnicodeDecodeError, match=msg):
reader(path, encoding_errors=encoding_errors)
else:
df = reader(path, encoding_errors=encoding_errors)
decoded = bad_encoding.decode(errors=encoding_errors)
expected = pd.DataFrame({decoded: [decoded]}, index=[decoded * 2])
tm.assert_frame_equal(df, expected)
def test_bad_encdoing_errors():
# GH 39777
with tm.ensure_clean() as path:
with pytest.raises(LookupError, match="unknown error handler name"):
icom.get_handle(path, "w", errors="bad")
def test_errno_attribute():
# GH 13872
with pytest.raises(FileNotFoundError, match="\\[Errno 2\\]") as err:
pd.read_csv("doesnt_exist")
assert err.errno == errno.ENOENT
def test_fail_mmap():
with pytest.raises(UnsupportedOperation, match="fileno"):
with BytesIO() as buffer:
icom.get_handle(buffer, "rb", memory_map=True)
def test_close_on_error():
# GH 47136
class TestError:
def close(self):
raise OSError("test")
with pytest.raises(OSError, match="test"):
with BytesIO() as buffer:
with icom.get_handle(buffer, "rb") as handles:
handles.created_handles.append(TestError())
@pytest.mark.parametrize(
"reader",
[
pd.read_csv,
pd.read_fwf,
pd.read_excel,
pd.read_feather,
pd.read_hdf,
pd.read_stata,
pd.read_sas,
pd.read_json,
pd.read_pickle,
],
)
def test_pickle_reader(reader):
# GH 22265
with BytesIO() as buffer:
pickle.dump(reader, buffer)
|
8fef98c2654ba9af7518648c1307f2dfd67c0ac1
|
8246092010e656920e7199f889f9cbf54b83a729
|
/pycoin/symbols/ric.py
|
0cb5179b08fc317c81336784f54f585757c5fe15
|
[
"MIT"
] |
permissive
|
richardkiss/pycoin
|
5717411a11445773ac922c1d1c1b7dbe4835cd77
|
b41ad7d02e52d9869a8c9f0dbd7d3b2b496c98c0
|
refs/heads/main
| 2023-08-07T12:14:04.974934
| 2023-04-18T02:27:15
| 2023-04-18T02:27:15
| 10,917,677
| 1,306
| 489
|
MIT
| 2023-06-03T23:24:50
| 2013-06-24T19:17:52
|
Python
|
UTF-8
|
Python
| false
| false
| 319
|
py
|
ric.py
|
from pycoin.networks.bitcoinish import create_bitcoinish_network
network = create_bitcoinish_network(
symbol="RIC", network_name="Riecoin", subnet_name="mainnet",
wif_prefix_hex="80", address_prefix_hex="3c", pay_to_script_prefix_hex="05",
bip32_prv_prefix_hex="0488ade4", bip32_pub_prefix_hex="0488b21e")
|
fbdfa49a33e38a4996637be08063956a52c38232
|
e23229e1f3f932d05cb8dd38eec7ad9f7b7bfe12
|
/myfitnesspal/types.py
|
3b802efc9ab4d491ffa77c9eacbf62e9635dc821
|
[
"MIT"
] |
permissive
|
coddingtonbear/python-myfitnesspal
|
292b182afe51f00ad7d7e05e566f1e6c33fdbdcf
|
596970d18c26d82e03a30a420a9864e148c52686
|
refs/heads/master
| 2023-08-31T16:21:50.396794
| 2023-06-16T03:40:28
| 2023-06-16T03:40:28
| 11,441,467
| 773
| 163
|
MIT
| 2023-08-07T20:17:40
| 2013-07-16T05:31:10
|
Python
|
UTF-8
|
Python
| false
| false
| 4,531
|
py
|
types.py
|
from __future__ import annotations
from typing import Any, Callable, Dict, List, Optional
from typing_extensions import Literal, TypedDict
class CommandDefinition(TypedDict):
function: Callable
description: str
is_alias: bool
aliases: List[str]
MyfitnesspalUserId = str
class GoalDisplayDict(TypedDict):
id: str
display_type: str
nutrients: List[str]
class UnitPreferenceDict(TypedDict):
energy: str
weight: str
distance: str
height: str
water: str
class DiaryPreferencesDict(TypedDict):
default_foot_view: str
meal_names: List[str]
tracked_nutrients: List[str]
class UnitValueContainer(TypedDict):
unit: str
value: float
class GoalPreferencesDict(TypedDict):
workouts_per_week: int
weekly_workout_duration: int
weekly_exercise_energy: UnitValueContainer
weight_change_goal: UnitValueContainer
weight_goal: UnitValueContainer
diary_goal_display: str
home_goal_display: str
macro_goal_format: str
class LocationPreferencesDict(TypedDict):
time_zone: str
country_code: str
locale: str
postal_code: str
state: str
city: str
IsoDateStr = str
class AdminFlagDict(TypedDict):
status: str
has_changed_username: bool
forgot_password_or_username: bool
warnings: int
strikes: int
revoked_privileges: List
class AccountDict(TypedDict):
created_at: IsoDateStr
updated_at: IsoDateStr
last_login: IsoDateStr
valid_email: bool
registration_source: str
roles: List[str]
admin_flags: AdminFlagDict
class SystemDataDict(TypedDict):
login_streak: int
unseen_notifications: int
Unknown = Any
class UserProfile(TypedDict):
type: str
starting_weight_date: str
starting_weight: UnitValueContainer
main_image_url: str
main_image_id: Optional[Unknown]
birthdate: str
height: UnitValueContainer
first_name: Optional[str]
last_name: Optional[str]
sex: Literal["M", "F"]
activity_factor: str
headline: Optional[str]
about: Optional[str]
why: Optional[str]
inspirations: List
class UserMetadata(TypedDict):
id: MyfitnesspalUserId
username: str
email: str
goal_displays: List[GoalDisplayDict]
unit_preferences: UnitPreferenceDict
diary_preferences: DiaryPreferencesDict
goal_preferences: GoalPreferencesDict
location_preferences: LocationPreferencesDict
account: AccountDict
system_data: SystemDataDict
step_sources: List
profiles: List[UserProfile]
class AuthData(TypedDict):
token_type: str
access_token: str
expires_in: int
refresh_token: str
user_id: MyfitnesspalUserId
NutritionDict = Dict[str, float]
class MealEntry(TypedDict):
name: str
nutrition_information: NutritionDict
class NoteDataDict(TypedDict):
body: str
type: str
date: str
class FoodItemNutritionDict(TypedDict):
calcium: float
carbohydrates: float
cholesterol: float
fat: float
fiber: float
iron: float
monounsaturated_fat: float
polyunsaturated_fat: float
potassium: float
protein: float
saturated_fat: float
sodium: float
sugar: float
trans_fat: float
vitamin_a: float
vitamin_c: float
class ServingSizeDict(TypedDict):
id: str
nutrition_multiplier: float
value: float
unit: str
index: int
class FoodItemDetailsResponse(TypedDict):
description: str
brand_name: Optional[str]
verified: bool
nutrition: FoodItemNutritionDict
calories: float
confirmations: int
serving_sizes: List[ServingSizeDict]
NutritionInformation = TypedDict(
"NutritionInformation",
{
"@type": Literal["NutritionInformation"],
"calories": str,
"carbohydrateContent": str,
"fiberContent": str,
"sugarContent": str,
"sodiumContent": str,
"proteinContent": str,
"fatContent": str,
"saturatedFatContent": str,
"monunsaturatedFatContent": str,
"polyunsaturatedFatContent": str,
"unsaturatedFatContent": str,
"transFatContent": str,
},
)
Recipe = TypedDict(
"Recipe",
{
"@context": Literal["https://schema.org"],
"@type": Literal["Recipe"],
"author": str,
"org_url": str,
"name": str,
"recipeYield": str,
"recipeIngredient": List[str],
"nutrition": NutritionInformation,
"recipeInstructions": str,
"tags": List[str],
},
)
|
40fd824b2f3b11f11f607939722471fc2578fa86
|
2337351b228818e41be3002bd38f68f77c2aa074
|
/sa/profiles/DCN/DCWL/ping.py
|
5d584ef13d5569f473d8b06564922fe073befecc
|
[
"BSD-3-Clause"
] |
permissive
|
nocproject/noc
|
57d40c680a1499374463e472434f9595ed6d1374
|
6e6d71574e9b9d822bec572cc629a0ea73604a59
|
refs/heads/master
| 2023-08-31T01:11:33.544573
| 2023-08-30T17:31:11
| 2023-08-30T17:31:11
| 107,815,776
| 105
| 33
|
BSD-3-Clause
| 2023-07-31T07:57:45
| 2017-10-21T21:04:33
|
Python
|
UTF-8
|
Python
| false
| false
| 2,269
|
py
|
ping.py
|
# ---------------------------------------------------------------------
# DCN.DCWL.ping
# ---------------------------------------------------------------------
# Copyright (C) 2007-2018 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.iping import IPing
class Script(BaseScript):
name = "DCN.DCWL.ping"
interface = IPing
rx_result = re.compile(
r"^(?P<count>\d+) packets transmitted, (?P<success>\d+) "
r"(packets received|received),(?:\s|\s\S+ (errors|duplicates), )\d+% packet loss$",
re.MULTILINE,
)
rx_stat = re.compile(
r"^round-trip min/avg/max = (?P<min>.+)/(?P<avg>.+)/(?P<max>.+)\s.", re.MULTILINE
)
rx_count = re.compile(r"^\d+ bytes from \d\S+\d+: seq=(\d+) ttl=\d+ time=\S+ ms", re.MULTILINE)
def execute(self, address, count=None, source_address=None, size=None, df=None):
if count is None:
count = 5
cmd = "ping %s -c %d" % (address, int(count))
if size:
cmd += " -s %d" % int(size)
if source_address:
cmd += " -I %s" % source_address
result = None
try:
ping = self.cli(cmd, ignore_errors=True)
result = self.rx_result.search(ping)
except self.CLISyntaxError:
pass
"""
Workaround for this incident
PING 10.218.217.227 (10.218.217.227): 56 data bytes
64 bytes from 10.218.217.227: seq=0 ttl=61 time=15.436 ms
64 bytes from 10.218.217.227: seq=1 ttl=61 time=15.265 ms
64 bytes from 10.218.217.227: seq=2 ttl=61 time=15.365 ms
ping: sendto: Network is unreachable
Invalid command.
"""
if not result and "Network is unreachable" in ping:
result = self.rx_count.findall(ping)
return {"success": len(result), "count": count}
r = {"success": result.group("success"), "count": result.group("count")}
stat = self.rx_stat.search(ping)
if stat:
r.update({"min": stat.group("min"), "avg": stat.group("avg"), "max": stat.group("max")})
return r
|
e5f5d3337bd825acf6792735329a0242f344cd9f
|
21f35d6b81c94bd1ed07b923482c1a9e17423d4c
|
/sphinx_gallery/tests/tinybuild/examples/plot_numpy_matplotlib.py
|
5c37908ba2e6c5f465435136be1605fb645450b2
|
[] |
permissive
|
sphinx-gallery/sphinx-gallery
|
06378c2ecedf9cb306b3958327a1ba294c8d6725
|
4e298a6ccee1c4ff8b33cd65371127118f626032
|
refs/heads/master
| 2023-08-17T06:30:59.195322
| 2023-08-15T16:00:25
| 2023-08-15T16:00:25
| 25,860,190
| 382
| 235
|
BSD-3-Clause
| 2023-09-12T15:29:13
| 2014-10-28T08:41:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,499
|
py
|
plot_numpy_matplotlib.py
|
"""
======================
Link to other packages
======================
Use :mod:`sphinx_gallery` to link to other packages, like
:mod:`numpy`, :mod:`matplotlib.colors`, and :mod:`matplotlib.pyplot`.
FYI this gallery uses :obj:`sphinx_gallery.sorting.FileNameSortKey`.
"""
from warnings import warn
import numpy as np
from matplotlib.colors import is_color_like
from matplotlib.figure import Figure
from itertools import compress # noqa
import matplotlib
import matplotlib.pyplot as plt
import sphinx_gallery.backreferences
from local_module import N # N = 1000
t = np.arange(N) / float(N)
win = np.hanning(N)
print(is_color_like("r"))
fig, ax = plt.subplots()
ax.plot(t, win, color="r")
ax.text(0, 1, "png", size=40, va="top")
fig.tight_layout()
orig_dpi = 80.0 if matplotlib.__version__[0] < "2" else 100.0
assert plt.rcParams["figure.dpi"] == orig_dpi
plt.rcParams["figure.dpi"] = 70.0
assert plt.rcParams["figure.dpi"] == 70.0
listy = [0, 1]
compress("abc", [0, 0, 1])
warn("This warning should show up in the output", RuntimeWarning)
x = Figure() # plt.Figure should be decorated (class), x shouldn't (inst)
# nested resolution resolves to numpy.random.mtrand.RandomState:
rng = np.random.RandomState(0)
# test Issue 583
sphinx_gallery.backreferences.identify_names(
[("text", "Text block", 1)],
sphinx_gallery.backreferences._make_ref_regex({"default_role": None}),
)
# 583: methods don't link properly
dc = sphinx_gallery.backreferences.DummyClass()
dc.run()
print(dc.prop)
|
bd6f647615de9a21f860b6e419883ccc19958779
|
09a6d8dbad5b92f93791948b5bf9b75f5cb2e5ce
|
/tests/qinfo/test_reduced_dm.py
|
da76fd5a256132c14aedfc5f14ab7f1885977164
|
[
"Apache-2.0"
] |
permissive
|
PennyLaneAI/pennylane
|
458efd5d9457e90ada31ca2ef0fb6bb96a24e9a7
|
0843183ff15a013c2622af5e61fea431d18076d3
|
refs/heads/master
| 2023-09-03T17:00:43.105784
| 2023-09-01T16:15:07
| 2023-09-01T16:15:07
| 129,936,360
| 1,431
| 410
|
Apache-2.0
| 2023-09-14T21:30:56
| 2018-04-17T16:45:42
|
Python
|
UTF-8
|
Python
| false
| false
| 7,855
|
py
|
test_reduced_dm.py
|
# Copyright 2022 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the (reduced) density matrix transform."""
# pylint: disable=too-many-arguments
import pytest
import pennylane as qml
from pennylane import numpy as np
pytestmark = pytest.mark.all_interfaces
tf = pytest.importorskip("tensorflow", minversion="2.1")
torch = pytest.importorskip("torch")
jax = pytest.importorskip("jax")
angle_values = [0, np.pi / 4, np.pi / 2, 3 * np.pi / 4, np.pi]
devices = [
"default.qubit",
"default.mixed",
]
interfaces = [
"autograd",
"torch",
"tensorflow",
"jax",
]
wires_list = [[0], [1], [0, 1], [1, 0]]
class TestDensityMatrixQNode:
"""Tests for the (reduced) density matrix for QNodes returning states."""
@pytest.mark.parametrize("device", devices)
@pytest.mark.parametrize("interface", interfaces)
@pytest.mark.parametrize("angle", angle_values)
@pytest.mark.parametrize("wires", wires_list)
def test_density_matrix_from_qnode(self, device, wires, angle, interface, tol):
"""Test the density matrix from matrix for single wires."""
dev = qml.device(device, wires=2)
@qml.qnode(dev, interface=interface)
def circuit(x):
qml.PauliX(0)
qml.IsingXX(x, wires=[0, 1])
return qml.state()
density_matrix = qml.qinfo.reduced_dm(circuit, wires=wires)(angle)
def expected_density_matrix(x, wires):
if wires == [0]:
return [[np.sin(x / 2) ** 2, 0], [0, np.cos(x / 2) ** 2]]
if wires == [1]:
return [[np.cos(x / 2) ** 2, 0], [0, np.sin(x / 2) ** 2]]
if wires == [0, 1]:
return [
[0, 0, 0, 0],
[0, np.sin(x / 2) ** 2, 0.0 - np.cos(x / 2) * np.sin(x / 2) * 1j, 0],
[0, 0.0 + np.cos(x / 2) * np.sin(x / 2) * 1j, np.cos(x / 2) ** 2, 0],
[0, 0, 0, 0],
]
if wires == [1, 0]:
return [
[0, 0, 0, 0],
[0, np.cos(x / 2) ** 2, 0.0 + np.cos(x / 2) * np.sin(x / 2) * 1j, 0],
[0, 0.0 - np.cos(x / 2) * np.sin(x / 2) * 1j, np.sin(x / 2) ** 2, 0],
[0, 0, 0, 0],
]
return None
assert np.allclose(expected_density_matrix(angle, wires), density_matrix, atol=tol, rtol=0)
@pytest.mark.parametrize("device", devices)
@pytest.mark.parametrize("angle", angle_values)
def test_density_matrix_wire_labels(self, device, angle, tol):
"""Test that density matrix is correct with custom wire labels"""
wires = ["a", 8]
dev = qml.device(device, wires=wires)
@qml.qnode(dev)
def circuit(x):
qml.PauliX(wires=wires[0])
qml.IsingXX(x, wires=wires)
return qml.state()
dm0 = qml.qinfo.reduced_dm(circuit, wires=[wires[0]])(angle)
dm1 = qml.qinfo.reduced_dm(circuit, wires=[wires[1]])(angle)
exp0 = np.array([[np.sin(angle / 2) ** 2, 0], [0, np.cos(angle / 2) ** 2]])
exp1 = np.array([[np.cos(angle / 2) ** 2, 0], [0, np.sin(angle / 2) ** 2]])
assert np.allclose(exp0, dm0, atol=tol)
assert np.allclose(exp1, dm1, atol=tol)
def test_qnode_not_returning_state(self):
"""Test that the QNode of reduced_dm function must return state."""
dev = qml.device("default.qubit", wires=1)
@qml.qnode(dev)
def circuit():
qml.RZ(0, wires=[0])
return qml.expval(qml.PauliX(wires=0))
with pytest.raises(ValueError, match="The qfunc measurement needs to be State"):
qml.qinfo.reduced_dm(circuit, wires=[0])()
def test_density_matrix_qnode_jax_jit(self, tol):
"""Test reduced_dm jitting for QNode."""
import jax.numpy as jnp
from jax import jit
angle = jnp.array(0.1)
dev = qml.device("default.qubit", wires=2)
@qml.qnode(dev, interface="jax-jit")
def circuit(x):
qml.IsingXX(x, wires=[0, 1])
return qml.state()
density_matrix = jit(qml.qinfo.reduced_dm(circuit, wires=[0]))(angle)
expected_density_matrix = [[np.cos(angle / 2) ** 2, 0], [0, np.sin(angle / 2) ** 2]]
assert np.allclose(density_matrix, expected_density_matrix, atol=tol, rtol=0)
def test_density_matrix_qnode_tf_jit(self):
"""Test jitting the density matrix from state vector function with Tf."""
dev = qml.device("default.qubit", wires=2)
@qml.qnode(dev, interface="tf")
def circuit(x):
qml.IsingXX(x, wires=[0, 1])
return qml.state()
density_matrix = tf.function(
qml.qinfo.reduced_dm(circuit, wires=[0]),
jit_compile=True,
input_signature=(tf.TensorSpec(shape=(), dtype=tf.float32),),
)
density_matrix = density_matrix(tf.Variable(0.0, dtype=tf.float32))
assert np.allclose(density_matrix, [[1, 0], [0, 0]])
c_dtypes = [np.complex64, np.complex128]
@pytest.mark.parametrize("c_dtype", c_dtypes)
@pytest.mark.parametrize("wires", wires_list)
def test_density_matrix_c_dtype(self, wires, c_dtype):
"""Test different complex dtype."""
dev = qml.device("default.qubit", wires=2, c_dtype=c_dtype)
@qml.qnode(dev, diff_method=None)
def circuit(x):
qml.IsingXX(x, wires=[0, 1])
return qml.state()
density_matrix = qml.qinfo.reduced_dm(circuit, wires=wires)(0.5)
assert density_matrix.dtype == c_dtype
class TestBroadcasting:
"""Test that the reduced_dm transform supports broadcasting"""
@pytest.mark.parametrize("device", devices)
@pytest.mark.parametrize("interface", interfaces)
def test_sv_broadcast(self, device, interface, tol):
"""Test that broadcasting works for circuits returning state vectors"""
dev = qml.device(device, wires=2)
@qml.qnode(dev, interface=interface)
def circuit(x):
qml.PauliX(0)
qml.IsingXX(x, wires=[0, 1])
return qml.state()
x = qml.math.asarray([0.4, 0.6, 0.8], like=interface)
density_matrix = qml.qinfo.reduced_dm(circuit, wires=[0])(x)
expected = np.zeros((3, 2, 2))
expected[:, 0, 0] = np.sin(x / 2) ** 2
expected[:, 1, 1] = np.cos(x / 2) ** 2
assert qml.math.allclose(expected, density_matrix, atol=tol)
@pytest.mark.parametrize("device", devices)
@pytest.mark.parametrize("interface", interfaces)
def test_dm_broadcast(self, device, interface, tol):
"""Test that broadcasting works for circuits returning density matrices"""
dev = qml.device(device, wires=2)
@qml.qnode(dev, interface=interface)
def circuit(x):
qml.PauliX(0)
qml.IsingXX(x, wires=[0, 1])
return qml.density_matrix(wires=[0, 1])
x = qml.math.asarray([0.4, 0.6, 0.8], like=interface)
density_matrix = qml.qinfo.reduced_dm(circuit, wires=[0])(x)
expected = np.zeros((3, 2, 2))
expected[:, 0, 0] = np.sin(x / 2) ** 2
expected[:, 1, 1] = np.cos(x / 2) ** 2
assert qml.math.allclose(expected, density_matrix, atol=tol)
|
ca8938605410b7dbb8bd99f505c9f702ebcd1323
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/completion/notImportedQualifiedName/UseImportPriorityWhenAddingImport/main.after.py
|
768d86d912199c069a8fba86d93933fd628608d4
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 149
|
py
|
main.after.py
|
import subprocess
import sys
import django.conf
import django.utils.encoding
import matplotlib.pyplot as plt
subprocess.Popen
sys.argv
plt.func()
|
be9b250199a3dc2b257edc7aeeaec89c413cbc53
|
753cd066a9bd26b6c37c8d53a86c7a9c659ec18c
|
/tutorials/tutorials/pytorch/efficient_data_loading/tests/test_tuto_data_loading.py
|
f52cc2ee3bbacc437ffde0130935d0f6048b96f5
|
[
"MIT"
] |
permissive
|
graphcore/examples
|
ac872015808ed2a913d4d7bf0d63202ce15ebbae
|
e2f834dd60e7939672c1795b4ac62e89ad0bca49
|
refs/heads/master
| 2023-08-05T02:08:12.341836
| 2023-07-27T11:13:10
| 2023-07-27T11:13:10
| 143,977,106
| 311
| 80
|
MIT
| 2023-09-11T16:42:56
| 2018-08-08T07:29:17
|
Python
|
UTF-8
|
Python
| false
| false
| 1,530
|
py
|
test_tuto_data_loading.py
|
# Copyright (c) 2021 Graphcore Ltd. All rights reserved.
from pathlib import Path
import pytest
from tutorials_tests import testing_util
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
working_path = Path(__file__).parent.parent
@pytest.mark.category2
@pytest.mark.ipus(1)
def test_run_default_ipu():
# Check default params
testing_util.run_command("python tuto_data_loading.py", working_path, "IPU throughput")
@pytest.mark.category1
@pytest.mark.ipus(1)
def test_run_synthetic_ipu():
# Check synthetic data params
testing_util.run_command(
"python tuto_data_loading.py --synthetic-data",
working_path,
"IPU throughput",
)
@pytest.mark.category2
@pytest.mark.ipus(2)
def test_run_replication_ipu():
# Check replication
testing_util.run_command("python tuto_data_loading.py --replicas 2", working_path, "IPU throughput")
@pytest.mark.category1
@pytest.mark.ipus(2)
def test_run_replication_synthetic_ipu():
# Check synthetic data with replication
testing_util.run_command(
"python tuto_data_loading.py --replicas 2 --synthetic-data",
working_path,
"IPU throughput",
)
@pytest.mark.ipus(4)
@pytest.mark.category1
def test_notebook():
notebook_filename = working_path / "walkthrough.ipynb"
with open(notebook_filename) as f:
nb = nbformat.read(f, as_version=4)
ep = ExecutePreprocessor(timeout=600, kernel_name="python3")
ep.preprocess(nb, {"metadata": {"path": f"{working_path}"}})
|
db54daf48ef545ab4ed13c88b39c333e3d6fc257
|
9ed3b16b3da72e4c47a04f2f2e3ef395e9fd9f20
|
/contrib/clucene/update.py
|
ad95e537560031502e0602d132fc0666849ab3c6
|
[
"BSD-2-Clause"
] |
permissive
|
chimera-linux/cports
|
fdae59dc25856942be3041e10e3533dbf8f883c3
|
714680161cd719dd047452c95fbb9b447bc23a86
|
refs/heads/master
| 2023-09-03T19:30:40.720670
| 2023-09-03T15:07:40
| 2023-09-03T15:07:40
| 374,000,317
| 118
| 37
|
BSD-2-Clause
| 2023-09-14T20:31:08
| 2021-06-05T02:07:34
|
Python
|
UTF-8
|
Python
| false
| false
| 39
|
py
|
update.py
|
pattern = r"clucene-core-([\d.]+).tar"
|
541566589f7efddcd0f5a7aa42e50c8c30240313
|
e65a4dbfbfb0e54e59787ba7741efee12f7687f3
|
/net/py-python-twitter/files/patch-setup.py
|
81702979369ecb98b5786362736463e1e5636fcf
|
[
"BSD-2-Clause"
] |
permissive
|
freebsd/freebsd-ports
|
86f2e89d43913412c4f6b2be3e255bc0945eac12
|
605a2983f245ac63f5420e023e7dce56898ad801
|
refs/heads/main
| 2023-08-30T21:46:28.720924
| 2023-08-30T19:33:44
| 2023-08-30T19:33:44
| 1,803,961
| 916
| 918
|
NOASSERTION
| 2023-09-08T04:06:26
| 2011-05-26T11:15:35
| null |
UTF-8
|
Python
| false
| false
| 345
|
py
|
patch-setup.py
|
--- setup.py.orig 2016-12-23 00:11:09 UTC
+++ setup.py
@@ -57,7 +57,6 @@ setup(
packages=find_packages(exclude=('tests', 'docs')),
platforms=['Any'],
install_requires=['future', 'requests', 'requests-oauthlib'],
- setup_requires=['pytest-runner'],
tests_require=['pytest'],
keywords='twitter api',
classifiers=[
|
d545e16c28150003ee82cbb5a3d45f63d8978b50
|
dcbef06d5a00f07756339b9e62c684dec2fee425
|
/tests/PyPI-pytest/run_all.py
|
3e97addf752558660f29cdf2fa4d614e30d50c6d
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
Nuitka/Nuitka
|
f9543d8d95bfa0b81d4e60af0dfad99fb72893a4
|
d87faf2f7e1d6ed9bfe4cf8c1d648f34307e33f2
|
refs/heads/develop
| 2023-08-28T14:00:32.861328
| 2023-08-27T09:16:45
| 2023-08-27T09:16:45
| 9,626,741
| 8,573
| 599
|
Apache-2.0
| 2023-09-13T02:49:41
| 2013-04-23T15:40:33
|
Python
|
UTF-8
|
Python
| false
| false
| 11,141
|
py
|
run_all.py
|
#!/usr/bin/env python
# Copyright 2023, Tommy Li, mailto:<tommyli3318@gmail.com>
#
# Python test originally created or extracted from other peoples work. The
# parts from me are licensed as below. It is at least Free Software where
# it's copied from other people. In these cases, that will normally be
# indicated.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Runner for PyPI Pytest comparison
This script automates the comparing of pytest results of a nuitka compiled wheel
using `python setup.py bdist_nuitka` to the pytest results of an uncompiled wheel
built using `python setup.py bdist_wheel` for the most popular PyPI packages.
Testing is done to ensure that nuitka is building the wheel correctly. If the
pytests pass/fail in the same way, that means Nuitka built the wheel properly.
Else if the tests differ, then something is wrong.
Virtualenv is used to create a clean environment with no outside pollution.
"""
import json
import os
import sys
# Find nuitka package relative to us.
sys.path.insert(
0,
os.path.normpath(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..")
),
)
# isort:start
import nuitka
from nuitka.tools.environments.Virtualenv import withVirtualenv
from nuitka.tools.testing.Common import (
createSearchMode,
my_print,
reportSkip,
setup,
)
from nuitka.tools.testing.OutputComparison import compareOutput
from nuitka.utils.AppDirs import getCacheDir
from nuitka.utils.FileOperations import getFileContents
def executeCommand(command):
my_print("Executing:", command, style="blue")
return os.system(command) == 0
def gitClone(package, url, directory):
"""
Update package with git if already existing in directory
else git clone the package into directory
"""
os.chdir(directory)
if not executeCommand(
"cd %s && git fetch -q && git reset -q --hard origin && git clean -q -dfx"
% package
):
assert executeCommand(
"git clone %s %s --depth 1 --single-branch --no-tags" % (url, package)
), ("Error while git cloning package %s, aborting..." % package)
def main():
# pylint: disable=broad-except,too-many-branches,too-many-locals,too-many-statements
setup(suite="pypi")
# cache_dir is where the git clones are cached
cache_dir = os.path.join(getCacheDir(), "pypi-git-clones")
base_dir = os.getcwd()
if not os.path.isdir(cache_dir):
os.mkdir(cache_dir)
search_mode = createSearchMode()
results = []
# load json
packages = json.load(getFileContents("packages.json"))
for package_name, details in sorted(packages.items()):
active = search_mode.consider(dirname=None, filename=package_name)
if not active:
continue
if str is not bytes:
# running on python3
if package_name in ("futures", "future"):
reportSkip("Does not run on Python3", ".", package_name)
continue
if os.name == "nt":
if package_name in ("cryptography",):
reportSkip("Not working on Windows", ".", package_name)
continue
if package_name == "pyyaml":
reportSkip("Not yet supported, see Issue #476", ".", package_name)
continue
if package_name in ("pycparser", "numpy"):
reportSkip("Not yet supported, see Issue #477", ".", package_name)
continue
if package_name in (
"google-auth", # bdist_nuitka fails AttributeError: single_version_externally_managed
"jinja2", # ModuleNotFoundError: No module named 'jinja2.tests'
"pandas", # ModuleNotFoundError: No module named 'Cython'
"pytz", # need to 'make build'
"rsa", # Now uses Poetry (no setup.py)
):
continue
package_dir = os.path.join(cache_dir, package_name)
try:
gitClone(package_name, details["url"], cache_dir)
os.chdir(base_dir)
with withVirtualenv(
"venv_%s" % package_name, delete=False, style="blue"
) as venv:
dist_dir = os.path.join(package_dir, "dist")
# delete ignored tests if any
if details["ignored_tests"]:
for test in details["ignored_tests"]:
venv.runCommand("rm -rf %s" % os.path.join(package_dir, test))
# setup for pytest
cmds = [
"python -m pip install pytest",
"cd %s" % os.path.join(os.path.dirname(nuitka.__file__), ".."),
"python setup.py develop",
"cd %s" % package_dir,
]
if details["requirements_file"]:
cmds.append(
"python -m pip install -r %s" % details["requirements_file"]
)
if details.get("extra_commands"):
cmds += details["extra_commands"]
# build uncompiled .whl
cmds.append("python setup.py bdist_wheel")
venv.runCommand(commands=cmds)
# install and print out if the active .whl is compiled or not
venv.runCommand(
commands=[
"python -m pip install -U %s"
% os.path.join(dist_dir, os.listdir(dist_dir)[0]),
# use triple quotes for linux
"""python -c "print(getattr(__import__('%s'),'__compiled__','__uncompiled_version__'))" """
% details.get("package_name", package_name),
]
)
# get uncompiled pytest results
uncompiled_stdout, uncompiled_stderr = venv.runCommandWithOutput(
commands=[
"cd %s" % package_dir,
"python -m pytest --disable-warnings",
],
style="blue",
)
# clean up before building compiled .whl
cmds = ["cd %s" % package_dir, "git clean -dfx"]
if details.get("extra_commands"):
cmds += details["extra_commands"]
# build nuitka compiled .whl
cmds.append("python setup.py bdist_nuitka")
venv.runCommand(commands=cmds)
# install and print out if the active .whl is compiled or not
venv.runCommand(
commands=[
"python -m pip install -U %s"
% os.path.join(dist_dir, os.listdir(dist_dir)[0]),
# use triple quotes for linux
"""python -c "print(getattr(__import__('%s'),'__compiled__','__uncompiled_version__'))" """
% details.get("package_name", package_name),
]
)
# get compiled pytest results, may fail some tests.
(
compiled_stdout,
compiled_stderr,
_exit_code,
) = venv.runCommandWithOutput(
commands=[
"cd %s" % package_dir,
"python -m pytest --disable-warnings",
],
style="blue",
)
venv.runCommand(commands=["cd %s" % package_dir, "git clean -q -dfx"])
except Exception as e:
my_print(
"Package",
package_name,
"ran into an exception during execution, traceback: ",
)
my_print(e)
results.append((package_name, "ERROR", "ERROR"))
continue
# compare outputs
stdout_diff = compareOutput(
"stdout",
uncompiled_stdout,
compiled_stdout,
ignore_warnings=True,
syntax_errors=True,
)
stderr_diff = compareOutput(
"stderr",
uncompiled_stderr,
compiled_stderr,
ignore_warnings=True,
syntax_errors=True,
)
results.append((package_name, stdout_diff, stderr_diff))
exit_code = stdout_diff or stderr_diff
my_print(
"\n=================================================================================",
"\n--- %s ---" % package_name,
"exit_stdout:",
stdout_diff,
"exit_stderr:",
stderr_diff,
"\nError, outputs differed for package %s." % package_name
if exit_code
else "\nNo differences found for package %s." % package_name,
"\n=================================================================================\n",
style="red" if exit_code else "green",
)
if exit_code != 0 and search_mode.abortOnFinding(
dirname=None, filename=package_name
):
break
search_mode.finish()
# give a summary of all packages
my_print(
"\n\n=====================================SUMMARY=====================================",
style="yellow",
)
for package_name, stdout_diff, stderr_diff in results:
my_print(
package_name,
"-",
end=" ",
style="red" if (stdout_diff or stderr_diff) else "green",
)
my_print(
"stdout:", stdout_diff, end=" ", style="red" if stdout_diff else "green"
)
my_print(
"stderr:", stderr_diff, end="", style="red" if stderr_diff else "green"
)
my_print(
"\n---------------------------------------------------------------------------------"
)
my_print("TOTAL NUMBER OF PACKAGES TESTED: %s" % len(results), style="yellow")
num_failed = 0
num_errors = 0
# tally the number of errors and failed
for _, y, z in results:
if type(y) is str:
# this means the package ran into an exception
num_errors += 1
elif y or z:
num_failed += 1
my_print(
"TOTAL PASSED: %s" % (len(results) - num_failed - num_errors), style="green"
)
my_print("TOTAL FAILED (differences): %s" % num_failed, style="red")
my_print("TOTAL ERRORS (exceptions): %s" % num_errors, style="red")
if __name__ == "__main__":
main()
|
762358d740041add7f8078db3fb76eab6af6b7dc
|
020cfb93254c0e85a4e6eb406fd8c34d686cbfad
|
/gubernator/main_test.py
|
01603ac5f1a9357a86349c822523466d28f8c625
|
[
"Apache-2.0"
] |
permissive
|
kubernetes/test-infra
|
4108be9a19b1b95ce13a57a584886ca7df56eb57
|
7a9e69f0550788b4192a8d20259c56b79dee44a3
|
refs/heads/master
| 2023-09-04T06:27:37.988969
| 2023-09-04T04:03:48
| 2023-09-04T04:03:48
| 57,333,709
| 3,886
| 2,715
|
Apache-2.0
| 2023-09-14T20:44:33
| 2016-04-28T21:05:35
|
Go
|
UTF-8
|
Python
| false
| false
| 9,151
|
py
|
main_test.py
|
#!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
To run these tests:
$ pip install webtest nosegae
$ nosetests --with-gae --gae-lib-root ~/google_appengine/
"""
import unittest
import webtest
import cloudstorage as gcs
import main
import gcs_async
import gcs_async_test
write = gcs_async_test.write
app = webtest.TestApp(main.app)
JUNIT_SUITE = """<testsuite tests="8" failures="0" time="1000.24">
<testcase name="First" classname="Example e2e suite" time="0">
<skipped/>
</testcase>
<testcase name="Second" classname="Example e2e suite" time="36.49"/>
<testcase name="Third" classname="Example e2e suite" time="96.49">
<failure>/go/src/k8s.io/kubernetes/test.go:123
Error Goes Here</failure>
</testcase>
</testsuite>"""
def init_build(build_dir, started=True, finished=True,
finished_has_version=False):
"""Create faked files for a build."""
start_json = {'timestamp': 1406535800}
finish_json = {'passed': True, 'result': 'SUCCESS', 'timestamp': 1406536800}
(finish_json if finished_has_version else start_json)['revision'] = 'v1+56'
if started:
write(build_dir + 'started.json', start_json)
if finished:
write(build_dir + 'finished.json', finish_json)
write(build_dir + 'artifacts/junit_01.xml', JUNIT_SUITE)
class TestBase(unittest.TestCase):
def init_stubs(self):
self.testbed.init_memcache_stub()
self.testbed.init_app_identity_stub()
self.testbed.init_urlfetch_stub()
self.testbed.init_blobstore_stub()
self.testbed.init_datastore_v3_stub()
self.testbed.init_app_identity_stub()
# redirect GCS calls to the local proxy
gcs_async.GCS_API_URL = gcs.common.local_api_url()
class AppTest(TestBase):
# pylint: disable=too-many-public-methods
BUILD_DIR = '/kubernetes-jenkins/logs/somejob/1234/'
def setUp(self):
self.init_stubs()
init_build(self.BUILD_DIR)
def test_index(self):
"""Test that the index works."""
response = app.get('/')
self.assertIn('kubernetes-e2e-gce', response)
def test_nodelog_missing_files(self):
"""Test that a missing all files gives a 404."""
build_dir = self.BUILD_DIR + 'nodelog?pod=abc'
response = app.get('/build' + build_dir, status=404)
self.assertIn('Unable to find', response)
def test_nodelog_kubelet(self):
"""Test for a kubelet file with junit file.
- missing the default kube-apiserver"""
nodelog_url = self.BUILD_DIR + 'nodelog?pod=abc&junit=junit_01.xml'
init_build(self.BUILD_DIR)
write(self.BUILD_DIR + 'artifacts/tmp-node-image/junit_01.xml', JUNIT_SUITE)
write(self.BUILD_DIR + 'artifacts/tmp-node-image/kubelet.log',
'abc\nEvent(api.ObjectReference{Name:"abc", UID:"podabc"})\n')
response = app.get('/build' + nodelog_url)
self.assertIn("Wrap line", response)
def test_nodelog_apiserver(self):
"""Test for default apiserver file
- no kubelet file to find objrefdict
- no file with junit file"""
nodelog_url = self.BUILD_DIR + 'nodelog?pod=abc&junit=junit_01.xml'
init_build(self.BUILD_DIR)
write(self.BUILD_DIR + 'artifacts/tmp-node-image/junit_01.xml', JUNIT_SUITE)
write(self.BUILD_DIR + 'artifacts/tmp-node-image/kube-apiserver.log',
'apiserver pod abc\n')
response = app.get('/build' + nodelog_url)
self.assertIn("Wrap line", response)
def test_nodelog_no_junit(self):
"""Test for when no junit in same folder
- multiple folders"""
nodelog_url = self.BUILD_DIR + 'nodelog?pod=abc&junit=junit_01.xml'
init_build(self.BUILD_DIR)
write(self.BUILD_DIR + 'artifacts/junit_01.xml', JUNIT_SUITE)
write(self.BUILD_DIR + 'artifacts/tmp-node-image/kube-apiserver.log',
'apiserver pod abc\n')
write(self.BUILD_DIR + 'artifacts/tmp-node-2/kube-apiserver.log',
'apiserver pod abc\n')
write(self.BUILD_DIR + 'artifacts/tmp-node-image/kubelet.log',
'abc\nEvent(api.ObjectReference{Name:"abc", UID:"podabc"})\n')
response = app.get('/build' + nodelog_url)
self.assertIn("tmp-node-2", response)
def test_nodelog_no_junit_apiserver(self):
"""Test for when no junit in same folder
- multiple folders
- no kube-apiserver.log"""
nodelog_url = self.BUILD_DIR + 'nodelog?pod=abc&junit=junit_01.xml'
init_build(self.BUILD_DIR)
write(self.BUILD_DIR + 'artifacts/junit_01.xml', JUNIT_SUITE)
write(self.BUILD_DIR + 'artifacts/tmp-node-image/docker.log',
'Containers\n')
write(self.BUILD_DIR + 'artifacts/tmp-node-2/kubelet.log',
'apiserver pod abc\n')
write(self.BUILD_DIR + 'artifacts/tmp-node-image/kubelet.log',
'abc\nEvent(api.ObjectReference{Name:"abc", UID:"podabc"})\n')
response = app.get('/build' + nodelog_url)
self.assertIn("tmp-node-2", response)
def test_no_failed_pod(self):
"""Test that filtering page still loads when no failed pod name is given"""
nodelog_url = self.BUILD_DIR + 'nodelog?junit=junit_01.xml'
init_build(self.BUILD_DIR)
write(self.BUILD_DIR + 'artifacts/tmp-node-image/junit_01.xml', JUNIT_SUITE)
write(self.BUILD_DIR + 'artifacts/tmp-node-image/kubelet.log',
'abc\nEvent(api.ObjectReference{Name:"abc", UID:"podabc"} failed)\n')
response = app.get('/build' + nodelog_url)
self.assertIn("Wrap line", response)
def test_parse_by_timestamp(self):
"""Test parse_by_timestamp and get_woven_logs
- Weave separate logs together by timestamp
- Check that lines without timestamp are combined
- Test different timestamp formats"""
kubelet_filepath = self.BUILD_DIR + 'artifacts/tmp-node-image/kubelet.log'
kubeapi_filepath = self.BUILD_DIR + 'artifacts/tmp-node-image/kube-apiserver.log'
query_string = 'nodelog?pod=abc&junit=junit_01.xml&weave=on&logfiles=%s&logfiles=%s' % (
kubelet_filepath, kubeapi_filepath)
nodelog_url = self.BUILD_DIR + query_string
init_build(self.BUILD_DIR)
write(self.BUILD_DIR + 'artifacts/tmp-node-image/junit_01.xml', JUNIT_SUITE)
write(kubelet_filepath,
'abc\n0101 01:01:01.001 Event(api.ObjectReference{Name:"abc", UID:"podabc"})\n')
write(kubeapi_filepath,
'0101 01:01:01.000 kubeapi\n0101 01:01:01.002 pod\n01-01T01:01:01.005Z last line')
expected = ('0101 01:01:01.000 kubeapi\n'
'<span class="highlight">abc0101 01:01:01.001 Event(api.ObjectReference{Name:'
'"<span class="keyword">abc</span>", UID:"podabc"})</span>\n'
'0101 01:01:01.002 pod\n'
'01-01T01:01:01.005Z last line')
response = app.get('/build' + nodelog_url)
print response
self.assertIn(expected, response)
def test_timestamp_no_apiserver(self):
"""Test parse_by_timestamp and get_woven_logs without an apiserver file
- Weave separate logs together by timestamp
- Check that lines without timestamp are combined
- Test different timestamp formats
- no kube-apiserver.log"""
kubelet_filepath = self.BUILD_DIR + 'artifacts/tmp-node-image/kubelet.log'
proxy_filepath = self.BUILD_DIR + 'artifacts/tmp-node-image/kube-proxy.log'
query_string = 'nodelog?pod=abc&junit=junit_01.xml&weave=on&logfiles=%s&logfiles=%s' % (
kubelet_filepath, proxy_filepath)
nodelog_url = self.BUILD_DIR + query_string
init_build(self.BUILD_DIR)
write(self.BUILD_DIR + 'artifacts/tmp-node-image/junit_01.xml', JUNIT_SUITE)
write(kubelet_filepath,
'abc\n0101 01:01:01.001 Event(api.ObjectReference{Name:"abc", UID:"podabc"})\n')
write(proxy_filepath,
'0101 01:01:01.000 proxy\n0101 01:01:01.002 pod\n01-01T01:01:01.005Z last line')
expected = ('0101 01:01:01.000 proxy\n'
'<span class="highlight">abc0101 01:01:01.001 Event(api.ObjectReference{Name:'
'"<span class="keyword">abc</span>", UID:"podabc"})</span>\n'
'0101 01:01:01.002 pod\n'
'01-01T01:01:01.005Z last line')
response = app.get('/build' + nodelog_url)
self.assertIn(expected, response)
|
e5c8a942b01727b298d2e340e0294553d6e5ae21
|
67cc5db4593e2cdd109e589e13fb07074bcff5d9
|
/tests/numpy/npdot_test.py
|
7fcd1805f30c6cb05bf13411ea1071f6fe5fdf6e
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
spcl/dace
|
39849b1488e8f59f880fc0e2572687556c51847d
|
c5ca99ad37e7ceef6da71026c3c8bb579f64117f
|
refs/heads/master
| 2023-08-31T10:45:09.480018
| 2023-08-30T06:05:10
| 2023-08-30T06:05:10
| 172,703,996
| 402
| 114
|
BSD-3-Clause
| 2023-09-14T15:18:29
| 2019-02-26T12:05:50
|
Python
|
UTF-8
|
Python
| false
| false
| 341
|
py
|
npdot_test.py
|
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
import dace
import numpy as np
import pytest
from common import compare_numpy_output
@compare_numpy_output(check_dtype=True)
def test_dot_simple(A: dace.float32[10], B: dace.float32[10]):
return np.dot(A, B)
if __name__ == "__main__":
test_dot_simple()
|
2db5c90d6e9ff5333e73a4ac7a1815011776e4b7
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/third_party/wpt_tools/wpt/tools/third_party/pywebsocket3/mod_pywebsocket/handshake/base.py
|
ffad0614d6bc42fdf75c2ee4885212304dcbcb90
|
[
"BSD-3-Clause",
"GPL-1.0-or-later",
"MIT",
"LGPL-2.0-or-later",
"Apache-2.0"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 14,521
|
py
|
base.py
|
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Common functions and exceptions used by WebSocket opening handshake
processors.
"""
from __future__ import absolute_import
from mod_pywebsocket import common
from mod_pywebsocket import http_header_util
from mod_pywebsocket.extensions import get_extension_processor
from mod_pywebsocket.stream import StreamOptions
from mod_pywebsocket.stream import Stream
from mod_pywebsocket import util
from six.moves import map
from six.moves import range
# Defining aliases for values used frequently.
_VERSION_LATEST = common.VERSION_HYBI_LATEST
_VERSION_LATEST_STRING = str(_VERSION_LATEST)
_SUPPORTED_VERSIONS = [
_VERSION_LATEST,
]
class AbortedByUserException(Exception):
"""Exception for aborting a connection intentionally.
If this exception is raised in do_extra_handshake handler, the connection
will be abandoned. No other WebSocket or HTTP(S) handler will be invoked.
If this exception is raised in transfer_data_handler, the connection will
be closed without closing handshake. No other WebSocket or HTTP(S) handler
will be invoked.
"""
pass
class HandshakeException(Exception):
"""This exception will be raised when an error occurred while processing
WebSocket initial handshake.
"""
def __init__(self, name, status=None):
super(HandshakeException, self).__init__(name)
self.status = status
class VersionException(Exception):
"""This exception will be raised when a version of client request does not
match with version the server supports.
"""
def __init__(self, name, supported_versions=''):
"""Construct an instance.
Args:
supported_version: a str object to show supported hybi versions.
(e.g. '13')
"""
super(VersionException, self).__init__(name)
self.supported_versions = supported_versions
def get_default_port(is_secure):
if is_secure:
return common.DEFAULT_WEB_SOCKET_SECURE_PORT
else:
return common.DEFAULT_WEB_SOCKET_PORT
def validate_subprotocol(subprotocol):
"""Validate a value in the Sec-WebSocket-Protocol field.
See the Section 4.1., 4.2.2., and 4.3. of RFC 6455.
"""
if not subprotocol:
raise HandshakeException('Invalid subprotocol name: empty')
# Parameter should be encoded HTTP token.
state = http_header_util.ParsingState(subprotocol)
token = http_header_util.consume_token(state)
rest = http_header_util.peek(state)
# If |rest| is not None, |subprotocol| is not one token or invalid. If
# |rest| is None, |token| must not be None because |subprotocol| is
# concatenation of |token| and |rest| and is not None.
if rest is not None:
raise HandshakeException('Invalid non-token string in subprotocol '
'name: %r' % rest)
def parse_host_header(request):
fields = request.headers_in[common.HOST_HEADER].split(':', 1)
if len(fields) == 1:
return fields[0], get_default_port(request.is_https())
try:
return fields[0], int(fields[1])
except ValueError as e:
raise HandshakeException('Invalid port number format: %r' % e)
def get_mandatory_header(request, key):
value = request.headers_in.get(key)
if value is None:
raise HandshakeException('Header %s is not defined' % key)
return value
def validate_mandatory_header(request, key, expected_value, fail_status=None):
value = get_mandatory_header(request, key)
if value.lower() != expected_value.lower():
raise HandshakeException(
'Expected %r for header %s but found %r (case-insensitive)' %
(expected_value, key, value),
status=fail_status)
def parse_token_list(data):
"""Parses a header value which follows 1#token and returns parsed elements
as a list of strings.
Leading LWSes must be trimmed.
"""
state = http_header_util.ParsingState(data)
token_list = []
while True:
token = http_header_util.consume_token(state)
if token is not None:
token_list.append(token)
http_header_util.consume_lwses(state)
if http_header_util.peek(state) is None:
break
if not http_header_util.consume_string(state, ','):
raise HandshakeException('Expected a comma but found %r' %
http_header_util.peek(state))
http_header_util.consume_lwses(state)
if len(token_list) == 0:
raise HandshakeException('No valid token found')
return token_list
class HandshakerBase(object):
def __init__(self, request, dispatcher):
self._logger = util.get_class_logger(self)
self._request = request
self._dispatcher = dispatcher
""" subclasses must implement the five following methods """
def _protocol_rfc(self):
""" Return the name of the RFC that the handshake class is implementing.
"""
raise AssertionError("subclasses should implement this method")
def _transform_header(self, header):
"""
:param header: header name
transform the header name if needed. For example, HTTP/2 subclass will
return the name of the header in lower case.
"""
raise AssertionError("subclasses should implement this method")
def _validate_request(self):
""" validate that all the mandatory fields are set """
raise AssertionError("subclasses should implement this method")
def _set_accept(self):
""" Computes accept value based on Sec-WebSocket-Accept if needed. """
raise AssertionError("subclasses should implement this method")
def _send_handshake(self):
""" Prepare and send the response after it has been parsed and processed.
"""
raise AssertionError("subclasses should implement this method")
def do_handshake(self):
self._request.ws_close_code = None
self._request.ws_close_reason = None
# Parsing.
self._validate_request()
self._request.ws_resource = self._request.uri
self._request.ws_version = self._check_version()
try:
self._get_origin()
self._set_protocol()
self._parse_extensions()
self._set_accept()
self._logger.debug('Protocol version is ' + self._protocol_rfc())
# Setup extension processors.
self._request.ws_extension_processors = self._get_extension_processors_requested(
)
# List of extra headers. The extra handshake handler may add header
# data as name/value pairs to this list and pywebsocket appends
# them to the WebSocket handshake.
self._request.extra_headers = []
# Extra handshake handler may modify/remove processors.
self._dispatcher.do_extra_handshake(self._request)
stream_options = StreamOptions()
self._process_extensions(stream_options)
self._request.ws_stream = Stream(self._request, stream_options)
if self._request.ws_requested_protocols is not None:
if self._request.ws_protocol is None:
raise HandshakeException(
'do_extra_handshake must choose one subprotocol from '
'ws_requested_protocols and set it to ws_protocol')
validate_subprotocol(self._request.ws_protocol)
self._logger.debug('Subprotocol accepted: %r',
self._request.ws_protocol)
else:
if self._request.ws_protocol is not None:
raise HandshakeException(
'ws_protocol must be None when the client didn\'t '
'request any subprotocol')
self._send_handshake()
except HandshakeException as e:
if not e.status:
# Fallback to 400 bad request by default.
e.status = common.HTTP_STATUS_BAD_REQUEST
raise e
def _check_version(self):
sec_websocket_version_header = self._transform_header(
common.SEC_WEBSOCKET_VERSION_HEADER)
version = get_mandatory_header(self._request,
sec_websocket_version_header)
if version == _VERSION_LATEST_STRING:
return _VERSION_LATEST
if version.find(',') >= 0:
raise HandshakeException(
'Multiple versions (%r) are not allowed for header %s' %
(version, sec_websocket_version_header),
status=common.HTTP_STATUS_BAD_REQUEST)
raise VersionException('Unsupported version %r for header %s' %
(version, sec_websocket_version_header),
supported_versions=', '.join(
map(str, _SUPPORTED_VERSIONS)))
def _get_origin(self):
origin_header = self._transform_header(common.ORIGIN_HEADER)
origin = self._request.headers_in.get(origin_header)
if origin is None:
self._logger.debug('Client request does not have origin header')
self._request.ws_origin = origin
def _set_protocol(self):
self._request.ws_protocol = None
sec_websocket_protocol_header = self._transform_header(
common.SEC_WEBSOCKET_PROTOCOL_HEADER)
protocol_header = self._request.headers_in.get(
sec_websocket_protocol_header)
if protocol_header is None:
self._request.ws_requested_protocols = None
return
self._request.ws_requested_protocols = parse_token_list(
protocol_header)
self._logger.debug('Subprotocols requested: %r',
self._request.ws_requested_protocols)
def _parse_extensions(self):
sec_websocket_extensions_header = self._transform_header(
common.SEC_WEBSOCKET_EXTENSIONS_HEADER)
extensions_header = self._request.headers_in.get(
sec_websocket_extensions_header)
if not extensions_header:
self._request.ws_requested_extensions = None
return
try:
self._request.ws_requested_extensions = common.parse_extensions(
extensions_header)
except common.ExtensionParsingException as e:
raise HandshakeException(
'Failed to parse sec-websocket-extensions header: %r' % e)
self._logger.debug(
'Extensions requested: %r',
list(
map(common.ExtensionParameter.name,
self._request.ws_requested_extensions)))
def _get_extension_processors_requested(self):
processors = []
if self._request.ws_requested_extensions is not None:
for extension_request in self._request.ws_requested_extensions:
processor = get_extension_processor(extension_request)
# Unknown extension requests are just ignored.
if processor is not None:
processors.append(processor)
return processors
def _process_extensions(self, stream_options):
processors = [
processor for processor in self._request.ws_extension_processors
if processor is not None
]
# Ask each processor if there are extensions on the request which
# cannot co-exist. When processor decided other processors cannot
# co-exist with it, the processor marks them (or itself) as
# "inactive". The first extension processor has the right to
# make the final call.
for processor in reversed(processors):
if processor.is_active():
processor.check_consistency_with_other_processors(processors)
processors = [
processor for processor in processors if processor.is_active()
]
accepted_extensions = []
for index, processor in enumerate(processors):
if not processor.is_active():
continue
extension_response = processor.get_extension_response()
if extension_response is None:
# Rejected.
continue
accepted_extensions.append(extension_response)
processor.setup_stream_options(stream_options)
# Inactivate all of the following compression extensions.
for j in range(index + 1, len(processors)):
processors[j].set_active(False)
if len(accepted_extensions) > 0:
self._request.ws_extensions = accepted_extensions
self._logger.debug(
'Extensions accepted: %r',
list(map(common.ExtensionParameter.name, accepted_extensions)))
else:
self._request.ws_extensions = None
# vi:sts=4 sw=4 et
|
ab11877b82d031c2cb823edd45a30ccfe5891b9a
|
da1721d2783ea4d67ff4e73cee6eee71292f2ef7
|
/otp/ai/GarbageLeakServerEventAggregator.py
|
5a92d744ff1cce22075a36f0f035e8bcd29a5611
|
[
"BSD-3-Clause"
] |
permissive
|
open-toontown/open-toontown
|
bbdeb1b7bf0fb2861eba2df5483738c0112090ca
|
464c2d45f60551c31397bd03561582804e760b4a
|
refs/heads/develop
| 2023-07-07T01:34:31.959657
| 2023-05-30T23:49:10
| 2023-05-30T23:49:10
| 219,221,570
| 143
| 104
|
BSD-3-Clause
| 2023-09-11T09:52:34
| 2019-11-02T22:24:38
|
Python
|
UTF-8
|
Python
| false
| false
| 1,450
|
py
|
GarbageLeakServerEventAggregator.py
|
from direct.showbase.DirectObject import DirectObject
from direct.showbase import GarbageReport
class GarbageLeakServerEventAggregator(DirectObject):
def __init__(self, cr):
self.cr = cr
self._doLaterName = None
self._sentLeakDesc2num = {}
self._curLeakDesc2num = {}
self.accept(GarbageReport.GarbageCycleCountAnnounceEvent, self._handleCycleCounts)
return
def destroy(self):
self._stopSending()
self.ignoreAll()
del self.cr
def _handleCycleCounts(self, desc2num):
self._curLeakDesc2num = desc2num
self._startSending()
def _startSending(self):
if not self._doLaterName:
self._sendLeaks()
self._doLaterName = uniqueName('%s-sendGarbageLeakInfo' % self.__class__.__name__)
self.doMethodLater(60 * 60.0, self._sendLeaks, self._doLaterName)
def _stopSending(self):
if self._doLaterName:
self.removeTask(self._doLaterName)
self._doLaterName = None
return
def _sendLeaks(self, task = None):
for desc, curNum in self._curLeakDesc2num.items():
self._sentLeakDesc2num.setdefault(desc, 0)
num = curNum - self._sentLeakDesc2num[desc]
if num > 0:
base.cr.timeManager.d_setClientGarbageLeak(num, desc)
self._sentLeakDesc2num[desc] = curNum
if task:
return task.again
|
ecdfc2da450f7c6630a58cd939aede901aaa730f
|
f21ce03617ded558b3cbff2b5fe5319505df0309
|
/src/packages/stylevar.py
|
00ca9649bd973dfed0a557238e28aa3638a117cb
|
[] |
no_license
|
BEEmod/BEE2.4
|
cabff349c8f734dbfec3aeaa8bcc0bca08529a9f
|
9f9219934b8f4af3c03d0080fad6078a18f3d530
|
refs/heads/master
| 2023-08-23T20:49:11.514180
| 2023-07-31T08:40:20
| 2023-07-31T08:40:20
| 22,973,515
| 276
| 85
| null | 2023-08-22T21:10:23
| 2014-08-15T00:54:22
|
Python
|
UTF-8
|
Python
| false
| false
| 4,438
|
py
|
stylevar.py
|
"""Style specific features which can be enabled or disabled."""
from __future__ import annotations
from typing import Iterator
from transtoken import TransToken, TransTokenSource
from packages import PakObject, Style, ParseData, ExportData
from srctools import Property, bool_as_int
class StyleVar(PakObject, allow_mult=True, needs_foreground=True):
"""Style specific features which can be enabled or disabled."""
def __init__(
self,
var_id: str,
name: TransToken,
styles: list[str],
desc: TransToken,
*,
unstyled: bool,
inherit: bool,
default: bool,
) -> None:
self.id = var_id
self.name = name
self.default = default
self.enabled = default
self.desc = desc
self.inherit = inherit
self.styles = None if unstyled else styles
@classmethod
def unstyled(cls, id: str, name: TransToken, default: bool, desc: TransToken) -> StyleVar:
"""For builtin variables, define it as fully unstyled."""
return cls(id, name, [], desc, unstyled=True, inherit=False, default=default)
@property
def is_unstyled(self) -> bool:
"""check if the variable is unstyled."""
return self.styles is None
@classmethod
async def parse(cls, data: ParseData) -> StyleVar:
"""Parse StyleVars from configs."""
name = TransToken.parse(data.pak_id, data.info['name', ''])
styles = [
prop.value
for prop in
data.info.find_all('Style')
]
desc = TransToken.parse(data.pak_id, '\n'.join(
prop.value
for prop in
data.info.find_all('description')
))
return cls(
data.id,
name,
styles,
desc,
unstyled=data.info.bool('unstyled'),
inherit=data.info.bool('inherit', True),
default=data.info.bool('enabled'),
)
def add_over(self, override: StyleVar) -> None:
"""Override a stylevar to add more compatible styles."""
# Setting it to be unstyled overrides any other values!
if self.styles is None:
return
elif override.styles is None:
self.styles = None
else:
self.styles.extend(override.styles)
if not self.name:
self.name = override.name
# If they both have descriptions, add them together.
# Don't do it if they're both identical though.
# bool(strip()) = has a non-whitespace character
stripped_over = override.desc.token.strip()
if stripped_over and stripped_over not in self.desc.token:
if self.desc.token.strip():
self.desc = TransToken.untranslated('{a}\n\n{b}').format(a=self.desc, b=override.desc)
else:
self.desc = override.desc
def __repr__(self) -> str:
return (
f'<Stylevar "{self.id}", name="{self.name}", '
f'default={self.default}, '
f'styles={self.styles}>:\n{self.desc}'
)
def iter_trans_tokens(self) -> Iterator[TransTokenSource]:
"""Yield translation tokens used by this stylevar."""
yield self.name, self.id + '.name'
yield self.desc, self.id + '.desc'
def applies_to_style(self, style: Style) -> bool:
"""Check to see if this will apply for the given style.
"""
if self.is_unstyled:
return True
if style.id in self.styles:
return True
return self.inherit and any(
base.id in self.styles
for base in
style.bases
)
def applies_to_all(self) -> bool:
"""Check if this applies to all styles."""
if self.is_unstyled:
return True
for style in Style.all():
if not self.applies_to_style(style):
return False
return True
@staticmethod
def export(exp_data: ExportData) -> None:
"""Export style var selections into the config.
The .selected attribute is a dict mapping ids to the boolean value.
"""
# Add the StyleVars block, containing each style_var.
exp_data.vbsp_conf.append(Property('StyleVars', [
Property(key, bool_as_int(val))
for key, val in
exp_data.selected.items()
]))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.