hexsha
stringlengths 40
40
| size
int64 4
996k
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
996k
| avg_line_length
float64 1.33
58.2k
| max_line_length
int64 2
323k
| alphanum_fraction
float64 0
0.97
| content_no_comment
stringlengths 0
946k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
79054b38a3d510ea8635524228aef924775d9f25
| 3,057
|
py
|
Python
|
tests/logictest/http_runner.py
|
LiuYuHui/databend
|
87ad8f1233eee079175dd06a0143ebaa66d5f6d4
|
[
"Apache-2.0"
] | null | null | null |
tests/logictest/http_runner.py
|
LiuYuHui/databend
|
87ad8f1233eee079175dd06a0143ebaa66d5f6d4
|
[
"Apache-2.0"
] | null | null | null |
tests/logictest/http_runner.py
|
LiuYuHui/databend
|
87ad8f1233eee079175dd06a0143ebaa66d5f6d4
|
[
"Apache-2.0"
] | null | null | null |
from abc import ABC
from types import NoneType
import logictest
import http_connector
from log import log
class TestHttp(logictest.SuiteRunner, ABC):
def __init__(self, kind, pattern):
super().__init__(kind, pattern)
self._http = None
def get_connection(self):
if self._http is None:
self._http = http_connector.HttpConnector()
self._http.connect(**self.driver)
return self._http
def reset_connection(self):
self._http.reset_session()
def batch_execute(self, statement_list):
for statement in statement_list:
self.execute_statement(statement)
self.reset_connection()
def execute_ok(self, statement):
self.get_connection().query_with_session(statement)
return None
def execute_error(self, statement):
resp = self.get_connection().query_with_session(statement)
return http_connector.get_error(resp)
def execute_query(self, statement):
results = self.get_connection().fetch_all(statement.text)
query_type = statement.s_type.query_type
vals = []
for (ri, row) in enumerate(results):
for (i, v) in enumerate(row):
if isinstance(v, NoneType):
vals.append("NULL")
continue
if query_type[i] == 'I':
if not isinstance(v, int):
log.error(
"Expected int, got type {} in query {} row {} col {} value {}"
.format(type(v), statement.text, ri, i, v))
elif query_type[i] == 'F' or query_type[i] == 'R':
if not isinstance(v, float):
log.error(
"Expected float, got type {} in query {} row {} col {} value {}"
.format(type(v), statement.text, ri, i, v))
elif query_type[i] == 'T':
# include data, timestamp, dict, list ...
if not (isinstance(v, str) or isinstance(v, dict) or
isinstance(v, list)):
log.error(
"Expected string, got type {} in query {} row {} col {} value {}"
.format(type(v), statement.text, ri, i, v))
elif query_type[i] == 'B':
if not isinstance(v, bool):
log.error(
"Expected bool, got type {} in query {} row {} col {} value {}"
.format(type(v), statement.text, ri, i, v))
else:
log.error(
"Unknown type {} in query {} row {} col {} value {}".
format(query_type[i], statement.text, ri, i, v))
if isinstance(v, bool):
v = str(v).lower(
) # bool to string in python will be True/False
vals.append(str(v))
return vals
| 39.192308
| 93
| 0.499836
|
from abc import ABC
from types import NoneType
import logictest
import http_connector
from log import log
class TestHttp(logictest.SuiteRunner, ABC):
def __init__(self, kind, pattern):
super().__init__(kind, pattern)
self._http = None
def get_connection(self):
if self._http is None:
self._http = http_connector.HttpConnector()
self._http.connect(**self.driver)
return self._http
def reset_connection(self):
self._http.reset_session()
def batch_execute(self, statement_list):
for statement in statement_list:
self.execute_statement(statement)
self.reset_connection()
def execute_ok(self, statement):
self.get_connection().query_with_session(statement)
return None
def execute_error(self, statement):
resp = self.get_connection().query_with_session(statement)
return http_connector.get_error(resp)
def execute_query(self, statement):
results = self.get_connection().fetch_all(statement.text)
query_type = statement.s_type.query_type
vals = []
for (ri, row) in enumerate(results):
for (i, v) in enumerate(row):
if isinstance(v, NoneType):
vals.append("NULL")
continue
if query_type[i] == 'I':
if not isinstance(v, int):
log.error(
"Expected int, got type {} in query {} row {} col {} value {}"
.format(type(v), statement.text, ri, i, v))
elif query_type[i] == 'F' or query_type[i] == 'R':
if not isinstance(v, float):
log.error(
"Expected float, got type {} in query {} row {} col {} value {}"
.format(type(v), statement.text, ri, i, v))
elif query_type[i] == 'T':
if not (isinstance(v, str) or isinstance(v, dict) or
isinstance(v, list)):
log.error(
"Expected string, got type {} in query {} row {} col {} value {}"
.format(type(v), statement.text, ri, i, v))
elif query_type[i] == 'B':
if not isinstance(v, bool):
log.error(
"Expected bool, got type {} in query {} row {} col {} value {}"
.format(type(v), statement.text, ri, i, v))
else:
log.error(
"Unknown type {} in query {} row {} col {} value {}".
format(query_type[i], statement.text, ri, i, v))
if isinstance(v, bool):
v = str(v).lower(
)
vals.append(str(v))
return vals
| true
| true
|
79054b71c4a1bf5b14634dfdc74224ffed211aa3
| 1,779
|
py
|
Python
|
WeatherDashboardCW.py
|
kuzned/rpi_weather
|
6e4102e0fd73d88f2bec01e0252919a05106767e
|
[
"MIT"
] | null | null | null |
WeatherDashboardCW.py
|
kuzned/rpi_weather
|
6e4102e0fd73d88f2bec01e0252919a05106767e
|
[
"MIT"
] | null | null | null |
WeatherDashboardCW.py
|
kuzned/rpi_weather
|
6e4102e0fd73d88f2bec01e0252919a05106767e
|
[
"MIT"
] | null | null | null |
from gpiozero import Servo
from gpiozero import LED
from time import sleep
from WeatherDataCW import WeatherData
class WeatherDashboard:
servo_pin = 17
led_pin = 14
servoCorrection=0.5
maxPW=(2.0+servoCorrection)/1000
minPW=(1.0-servoCorrection)/1000
def __init__(self, servo_position=0, led_status=0):
self.servo = Servo(self.servo_pin, min_pulse_width=self.minPW, max_pulse_width=self.maxPW)
self.led = LED(self.led_pin)
self.move_servo(servo_position)
self.set_led_status(led_status)
def move_servo(self, servo_position=0):
self.servo.value = self.convert_percentage_to_integer(servo_position)
def turnOffServo(self):
sleep(2)
self.servo.close()
def set_led_status(self, led_status=0):
if(led_status==0):
self.led.off()
elif (led_status==1):
self.led.on()
else:
self.led.blink()
def convert_percentage_to_integer(self, percentage_amount):
#adjust for servos that turn counter clockwise by default
adjusted_percentage_amount = 100 - percentage_amount
return (adjusted_percentage_amount*0.02)-1
if __name__=="__main__":
weather_data = WeatherData('Yekaterinburg')
print("%s %sC %s wind speed %s km/h"
%(weather_data.getCity(),
weather_data.getTemperature(),
weather_data.getWeatherConditions(),
weather_data.getWindSpeed()))
print(weather_data.getServoValue())
print(weather_data.getLEDValue())
weather_dashboard = WeatherDashboard(
weather_data.getServoValue(),
weather_data.getLEDValue())
weather_dashboard.turnOffServo()
| 30.672414
| 98
| 0.649241
|
from gpiozero import Servo
from gpiozero import LED
from time import sleep
from WeatherDataCW import WeatherData
class WeatherDashboard:
servo_pin = 17
led_pin = 14
servoCorrection=0.5
maxPW=(2.0+servoCorrection)/1000
minPW=(1.0-servoCorrection)/1000
def __init__(self, servo_position=0, led_status=0):
self.servo = Servo(self.servo_pin, min_pulse_width=self.minPW, max_pulse_width=self.maxPW)
self.led = LED(self.led_pin)
self.move_servo(servo_position)
self.set_led_status(led_status)
def move_servo(self, servo_position=0):
self.servo.value = self.convert_percentage_to_integer(servo_position)
def turnOffServo(self):
sleep(2)
self.servo.close()
def set_led_status(self, led_status=0):
if(led_status==0):
self.led.off()
elif (led_status==1):
self.led.on()
else:
self.led.blink()
def convert_percentage_to_integer(self, percentage_amount):
adjusted_percentage_amount = 100 - percentage_amount
return (adjusted_percentage_amount*0.02)-1
if __name__=="__main__":
weather_data = WeatherData('Yekaterinburg')
print("%s %sC %s wind speed %s km/h"
%(weather_data.getCity(),
weather_data.getTemperature(),
weather_data.getWeatherConditions(),
weather_data.getWindSpeed()))
print(weather_data.getServoValue())
print(weather_data.getLEDValue())
weather_dashboard = WeatherDashboard(
weather_data.getServoValue(),
weather_data.getLEDValue())
weather_dashboard.turnOffServo()
| true
| true
|
79054c2d4fec67af7bb797077ea3cddc2bd2c334
| 15,469
|
py
|
Python
|
tests/integ/test_auto_ml.py
|
bstriner/sagemaker-python-sdk
|
cc98dd057ccd4a38d9a0e44de05e2b38fc8f9526
|
[
"Apache-2.0"
] | 1
|
2020-09-16T12:18:03.000Z
|
2020-09-16T12:18:03.000Z
|
tests/integ/test_auto_ml.py
|
bstriner/sagemaker-python-sdk
|
cc98dd057ccd4a38d9a0e44de05e2b38fc8f9526
|
[
"Apache-2.0"
] | null | null | null |
tests/integ/test_auto_ml.py
|
bstriner/sagemaker-python-sdk
|
cc98dd057ccd4a38d9a0e44de05e2b38fc8f9526
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import os
import pytest
import tests.integ
from sagemaker import AutoML, CandidateEstimator, AutoMLInput
from botocore.exceptions import ClientError
from sagemaker.utils import unique_name_from_base
from tests.integ import DATA_DIR, AUTO_ML_DEFAULT_TIMEMOUT_MINUTES, auto_ml_utils
from tests.integ.timeout import timeout
ROLE = "SageMakerRole"
PREFIX = "sagemaker/beta-automl-xgboost"
AUTO_ML_INSTANCE_TYPE = "ml.m5.2xlarge"
INSTANCE_COUNT = 1
RESOURCE_POOLS = [{"InstanceType": AUTO_ML_INSTANCE_TYPE, "PoolSize": INSTANCE_COUNT}]
TARGET_ATTRIBUTE_NAME = "virginica"
DATA_DIR = os.path.join(DATA_DIR, "automl", "data")
TRAINING_DATA = os.path.join(DATA_DIR, "iris_training.csv")
TEST_DATA = os.path.join(DATA_DIR, "iris_test.csv")
TRANSFORM_DATA = os.path.join(DATA_DIR, "iris_transform.csv")
PROBLEM_TYPE = "MultiClassClassification"
BASE_JOB_NAME = "auto-ml"
# use a succeeded AutoML job to test describe and list candidates method, otherwise tests will run too long
AUTO_ML_JOB_NAME = "python-sdk-integ-test-base-job"
DEFAULT_MODEL_NAME = "python-sdk-automl"
EXPECTED_DEFAULT_JOB_CONFIG = {
"CompletionCriteria": {"MaxCandidates": 3},
"SecurityConfig": {"EnableInterContainerTrafficEncryption": False},
}
@pytest.mark.skipif(
tests.integ.test_region() in tests.integ.NO_AUTO_ML_REGIONS,
reason="AutoML is not supported in the region yet.",
)
@pytest.mark.canary_quick
def test_auto_ml_fit(sagemaker_session):
auto_ml = AutoML(
role=ROLE,
target_attribute_name=TARGET_ATTRIBUTE_NAME,
sagemaker_session=sagemaker_session,
max_candidates=3,
)
job_name = unique_name_from_base("auto-ml", max_length=32)
inputs = sagemaker_session.upload_data(path=TRAINING_DATA, key_prefix=PREFIX + "/input")
with timeout(minutes=AUTO_ML_DEFAULT_TIMEMOUT_MINUTES):
auto_ml.fit(inputs, job_name=job_name)
@pytest.mark.skipif(
tests.integ.test_region() in tests.integ.NO_AUTO_ML_REGIONS,
reason="AutoML is not supported in the region yet.",
)
def test_auto_ml_fit_local_input(sagemaker_session):
auto_ml = AutoML(
role=ROLE,
target_attribute_name=TARGET_ATTRIBUTE_NAME,
sagemaker_session=sagemaker_session,
max_candidates=1,
)
inputs = TRAINING_DATA
job_name = unique_name_from_base("auto-ml", max_length=32)
with timeout(minutes=AUTO_ML_DEFAULT_TIMEMOUT_MINUTES):
auto_ml.fit(inputs, job_name=job_name)
@pytest.mark.skipif(
tests.integ.test_region() in tests.integ.NO_AUTO_ML_REGIONS,
reason="AutoML is not supported in the region yet.",
)
def test_auto_ml_input_object_fit(sagemaker_session):
auto_ml = AutoML(
role=ROLE,
target_attribute_name=TARGET_ATTRIBUTE_NAME,
sagemaker_session=sagemaker_session,
max_candidates=1,
)
job_name = unique_name_from_base("auto-ml", max_length=32)
s3_input = sagemaker_session.upload_data(path=TRAINING_DATA, key_prefix=PREFIX + "/input")
inputs = AutoMLInput(inputs=s3_input, target_attribute_name=TARGET_ATTRIBUTE_NAME)
with timeout(minutes=AUTO_ML_DEFAULT_TIMEMOUT_MINUTES):
auto_ml.fit(inputs, job_name=job_name)
@pytest.mark.skipif(
tests.integ.test_region() in tests.integ.NO_AUTO_ML_REGIONS,
reason="AutoML is not supported in the region yet.",
)
def test_auto_ml_fit_optional_args(sagemaker_session):
output_path = "s3://{}/{}".format(sagemaker_session.default_bucket(), "specified_ouput_path")
problem_type = "MulticlassClassification"
job_objective = {"MetricName": "Accuracy"}
auto_ml = AutoML(
role=ROLE,
target_attribute_name=TARGET_ATTRIBUTE_NAME,
sagemaker_session=sagemaker_session,
max_candidates=1,
output_path=output_path,
problem_type=problem_type,
job_objective=job_objective,
)
inputs = TRAINING_DATA
with timeout(minutes=AUTO_ML_DEFAULT_TIMEMOUT_MINUTES):
auto_ml.fit(inputs, job_name=unique_name_from_base(BASE_JOB_NAME))
auto_ml_desc = auto_ml.describe_auto_ml_job(job_name=auto_ml.latest_auto_ml_job.job_name)
assert auto_ml_desc["AutoMLJobStatus"] == "Completed"
assert auto_ml_desc["AutoMLJobName"] == auto_ml.latest_auto_ml_job.job_name
assert auto_ml_desc["AutoMLJobObjective"] == job_objective
assert auto_ml_desc["ProblemType"] == problem_type
assert auto_ml_desc["OutputDataConfig"]["S3OutputPath"] == output_path
@pytest.mark.skipif(
tests.integ.test_region() in tests.integ.NO_AUTO_ML_REGIONS,
reason="AutoML is not supported in the region yet.",
)
def test_auto_ml_invalid_target_attribute(sagemaker_session):
auto_ml = AutoML(
role=ROLE, target_attribute_name="y", sagemaker_session=sagemaker_session, max_candidates=1
)
job_name = unique_name_from_base("auto-ml", max_length=32)
inputs = sagemaker_session.upload_data(path=TRAINING_DATA, key_prefix=PREFIX + "/input")
with pytest.raises(
ClientError,
match=r"An error occurred \(ValidationException\) when calling the CreateAutoMLJob "
"operation: Target attribute name y does not exist in header.",
):
auto_ml.fit(inputs, job_name=job_name)
@pytest.mark.skipif(
tests.integ.test_region() in tests.integ.NO_AUTO_ML_REGIONS,
reason="AutoML is not supported in the region yet.",
)
def test_auto_ml_describe_auto_ml_job(sagemaker_session):
expected_default_input_config = [
{
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": "s3://{}/{}/input/iris_training.csv".format(
sagemaker_session.default_bucket(), PREFIX
),
}
},
"TargetAttributeName": TARGET_ATTRIBUTE_NAME,
}
]
expected_default_output_config = {
"S3OutputPath": "s3://{}/".format(sagemaker_session.default_bucket())
}
auto_ml_utils.create_auto_ml_job_if_not_exist(sagemaker_session)
auto_ml = AutoML(
role=ROLE, target_attribute_name=TARGET_ATTRIBUTE_NAME, sagemaker_session=sagemaker_session
)
desc = auto_ml.describe_auto_ml_job(job_name=AUTO_ML_JOB_NAME)
assert desc["AutoMLJobName"] == AUTO_ML_JOB_NAME
assert desc["AutoMLJobStatus"] == "Completed"
assert isinstance(desc["BestCandidate"], dict)
assert desc["InputDataConfig"] == expected_default_input_config
assert desc["AutoMLJobConfig"] == EXPECTED_DEFAULT_JOB_CONFIG
assert desc["OutputDataConfig"] == expected_default_output_config
@pytest.mark.skipif(
tests.integ.test_region() in tests.integ.NO_AUTO_ML_REGIONS,
reason="AutoML is not supported in the region yet.",
)
def test_auto_ml_attach(sagemaker_session):
expected_default_input_config = [
{
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": "s3://{}/{}/input/iris_training.csv".format(
sagemaker_session.default_bucket(), PREFIX
),
}
},
"TargetAttributeName": TARGET_ATTRIBUTE_NAME,
}
]
expected_default_output_config = {
"S3OutputPath": "s3://{}/".format(sagemaker_session.default_bucket())
}
auto_ml_utils.create_auto_ml_job_if_not_exist(sagemaker_session)
attached_automl_job = AutoML.attach(
auto_ml_job_name=AUTO_ML_JOB_NAME, sagemaker_session=sagemaker_session
)
attached_desc = attached_automl_job.describe_auto_ml_job()
assert attached_desc["AutoMLJobName"] == AUTO_ML_JOB_NAME
assert attached_desc["AutoMLJobStatus"] == "Completed"
assert isinstance(attached_desc["BestCandidate"], dict)
assert attached_desc["InputDataConfig"] == expected_default_input_config
assert attached_desc["AutoMLJobConfig"] == EXPECTED_DEFAULT_JOB_CONFIG
assert attached_desc["OutputDataConfig"] == expected_default_output_config
@pytest.mark.skipif(
tests.integ.test_region() in tests.integ.NO_AUTO_ML_REGIONS,
reason="AutoML is not supported in the region yet.",
)
def test_list_candidates(sagemaker_session):
auto_ml_utils.create_auto_ml_job_if_not_exist(sagemaker_session)
auto_ml = AutoML(
role=ROLE, target_attribute_name=TARGET_ATTRIBUTE_NAME, sagemaker_session=sagemaker_session
)
candidates = auto_ml.list_candidates(job_name=AUTO_ML_JOB_NAME)
assert len(candidates) == 3
@pytest.mark.skipif(
tests.integ.test_region() in tests.integ.NO_AUTO_ML_REGIONS,
reason="AutoML is not supported in the region yet.",
)
def test_best_candidate(sagemaker_session):
auto_ml_utils.create_auto_ml_job_if_not_exist(sagemaker_session)
auto_ml = AutoML(
role=ROLE, target_attribute_name=TARGET_ATTRIBUTE_NAME, sagemaker_session=sagemaker_session
)
best_candidate = auto_ml.best_candidate(job_name=AUTO_ML_JOB_NAME)
assert len(best_candidate["InferenceContainers"]) == 3
assert len(best_candidate["CandidateSteps"]) == 4
assert best_candidate["CandidateStatus"] == "Completed"
@pytest.mark.skipif(
tests.integ.test_region() in tests.integ.NO_AUTO_ML_REGIONS,
reason="AutoML is not supported in the region yet.",
)
@pytest.mark.canary_quick
def test_deploy_best_candidate(sagemaker_session, cpu_instance_type):
auto_ml_utils.create_auto_ml_job_if_not_exist(sagemaker_session)
auto_ml = AutoML(
role=ROLE, target_attribute_name=TARGET_ATTRIBUTE_NAME, sagemaker_session=sagemaker_session
)
best_candidate = auto_ml.best_candidate(job_name=AUTO_ML_JOB_NAME)
endpoint_name = unique_name_from_base("sagemaker-auto-ml-best-candidate-test")
with timeout(minutes=AUTO_ML_DEFAULT_TIMEMOUT_MINUTES):
auto_ml.deploy(
candidate=best_candidate,
initial_instance_count=INSTANCE_COUNT,
instance_type=cpu_instance_type,
endpoint_name=endpoint_name,
)
endpoint_status = sagemaker_session.sagemaker_client.describe_endpoint(
EndpointName=endpoint_name
)["EndpointStatus"]
assert endpoint_status == "InService"
sagemaker_session.sagemaker_client.delete_endpoint(EndpointName=endpoint_name)
@pytest.mark.skipif(
tests.integ.test_region() in tests.integ.NO_AUTO_ML_REGIONS,
reason="AutoML is not supported in the region yet.",
)
def test_create_model_best_candidate(sagemaker_session, cpu_instance_type):
auto_ml_utils.create_auto_ml_job_if_not_exist(sagemaker_session)
auto_ml = AutoML.attach(auto_ml_job_name=AUTO_ML_JOB_NAME, sagemaker_session=sagemaker_session)
best_candidate = auto_ml.best_candidate()
with timeout(minutes=5):
pipeline_model = auto_ml.create_model(
name=DEFAULT_MODEL_NAME,
candidate=best_candidate,
sagemaker_session=sagemaker_session,
vpc_config=None,
enable_network_isolation=False,
model_kms_key=None,
predictor_cls=None,
)
inputs = sagemaker_session.upload_data(
path=TRANSFORM_DATA, key_prefix=PREFIX + "/transform_input"
)
pipeline_model.transformer(
instance_count=1,
instance_type=cpu_instance_type,
assemble_with="Line",
output_path="s3://{}/{}".format(sagemaker_session.default_bucket(), "transform_test"),
accept="text/csv",
).transform(data=inputs, content_type="text/csv", split_type="Line", join_source="Input")
@pytest.mark.skipif(
tests.integ.test_region() in tests.integ.NO_AUTO_ML_REGIONS,
reason="AutoML is not supported in the region yet.",
)
def test_candidate_estimator_default_rerun_and_deploy(sagemaker_session, cpu_instance_type):
auto_ml_utils.create_auto_ml_job_if_not_exist(sagemaker_session)
auto_ml = AutoML(
role=ROLE, target_attribute_name=TARGET_ATTRIBUTE_NAME, sagemaker_session=sagemaker_session
)
candidates = auto_ml.list_candidates(job_name=AUTO_ML_JOB_NAME)
candidate = candidates[1]
candidate_estimator = CandidateEstimator(candidate, sagemaker_session)
inputs = sagemaker_session.upload_data(path=TEST_DATA, key_prefix=PREFIX + "/input")
endpoint_name = unique_name_from_base("sagemaker-auto-ml-rerun-candidate-test")
with timeout(minutes=AUTO_ML_DEFAULT_TIMEMOUT_MINUTES):
candidate_estimator.fit(inputs)
auto_ml.deploy(
initial_instance_count=INSTANCE_COUNT,
instance_type=cpu_instance_type,
candidate=candidate,
endpoint_name=endpoint_name,
)
endpoint_status = sagemaker_session.sagemaker_client.describe_endpoint(
EndpointName=endpoint_name
)["EndpointStatus"]
assert endpoint_status == "InService"
sagemaker_session.sagemaker_client.delete_endpoint(EndpointName=endpoint_name)
@pytest.mark.skipif(
tests.integ.test_region() in tests.integ.NO_AUTO_ML_REGIONS,
reason="AutoML is not supported in the region yet.",
)
def test_candidate_estimator_rerun_with_optional_args(sagemaker_session, cpu_instance_type):
auto_ml_utils.create_auto_ml_job_if_not_exist(sagemaker_session)
auto_ml = AutoML(
role=ROLE, target_attribute_name=TARGET_ATTRIBUTE_NAME, sagemaker_session=sagemaker_session
)
candidates = auto_ml.list_candidates(job_name=AUTO_ML_JOB_NAME)
candidate = candidates[1]
candidate_estimator = CandidateEstimator(candidate, sagemaker_session)
inputs = sagemaker_session.upload_data(path=TEST_DATA, key_prefix=PREFIX + "/input")
endpoint_name = unique_name_from_base("sagemaker-auto-ml-rerun-candidate-test")
with timeout(minutes=AUTO_ML_DEFAULT_TIMEMOUT_MINUTES):
candidate_estimator.fit(inputs, encrypt_inter_container_traffic=True)
auto_ml.deploy(
initial_instance_count=INSTANCE_COUNT,
instance_type=cpu_instance_type,
candidate=candidate,
endpoint_name=endpoint_name,
)
endpoint_status = sagemaker_session.sagemaker_client.describe_endpoint(
EndpointName=endpoint_name
)["EndpointStatus"]
assert endpoint_status == "InService"
sagemaker_session.sagemaker_client.delete_endpoint(EndpointName=endpoint_name)
@pytest.mark.skipif(
tests.integ.test_region() in tests.integ.NO_AUTO_ML_REGIONS,
reason="AutoML is not supported in the region yet.",
)
def test_candidate_estimator_get_steps(sagemaker_session):
auto_ml_utils.create_auto_ml_job_if_not_exist(sagemaker_session)
auto_ml = AutoML(
role=ROLE, target_attribute_name=TARGET_ATTRIBUTE_NAME, sagemaker_session=sagemaker_session
)
candidates = auto_ml.list_candidates(job_name=AUTO_ML_JOB_NAME)
candidate = candidates[1]
candidate_estimator = CandidateEstimator(candidate, sagemaker_session)
steps = candidate_estimator.get_steps()
assert len(steps) == 3
| 38.866834
| 107
| 0.739156
|
from __future__ import absolute_import
import os
import pytest
import tests.integ
from sagemaker import AutoML, CandidateEstimator, AutoMLInput
from botocore.exceptions import ClientError
from sagemaker.utils import unique_name_from_base
from tests.integ import DATA_DIR, AUTO_ML_DEFAULT_TIMEMOUT_MINUTES, auto_ml_utils
from tests.integ.timeout import timeout
ROLE = "SageMakerRole"
PREFIX = "sagemaker/beta-automl-xgboost"
AUTO_ML_INSTANCE_TYPE = "ml.m5.2xlarge"
INSTANCE_COUNT = 1
RESOURCE_POOLS = [{"InstanceType": AUTO_ML_INSTANCE_TYPE, "PoolSize": INSTANCE_COUNT}]
TARGET_ATTRIBUTE_NAME = "virginica"
DATA_DIR = os.path.join(DATA_DIR, "automl", "data")
TRAINING_DATA = os.path.join(DATA_DIR, "iris_training.csv")
TEST_DATA = os.path.join(DATA_DIR, "iris_test.csv")
TRANSFORM_DATA = os.path.join(DATA_DIR, "iris_transform.csv")
PROBLEM_TYPE = "MultiClassClassification"
BASE_JOB_NAME = "auto-ml"
AUTO_ML_JOB_NAME = "python-sdk-integ-test-base-job"
DEFAULT_MODEL_NAME = "python-sdk-automl"
EXPECTED_DEFAULT_JOB_CONFIG = {
"CompletionCriteria": {"MaxCandidates": 3},
"SecurityConfig": {"EnableInterContainerTrafficEncryption": False},
}
@pytest.mark.skipif(
tests.integ.test_region() in tests.integ.NO_AUTO_ML_REGIONS,
reason="AutoML is not supported in the region yet.",
)
@pytest.mark.canary_quick
def test_auto_ml_fit(sagemaker_session):
auto_ml = AutoML(
role=ROLE,
target_attribute_name=TARGET_ATTRIBUTE_NAME,
sagemaker_session=sagemaker_session,
max_candidates=3,
)
job_name = unique_name_from_base("auto-ml", max_length=32)
inputs = sagemaker_session.upload_data(path=TRAINING_DATA, key_prefix=PREFIX + "/input")
with timeout(minutes=AUTO_ML_DEFAULT_TIMEMOUT_MINUTES):
auto_ml.fit(inputs, job_name=job_name)
@pytest.mark.skipif(
tests.integ.test_region() in tests.integ.NO_AUTO_ML_REGIONS,
reason="AutoML is not supported in the region yet.",
)
def test_auto_ml_fit_local_input(sagemaker_session):
auto_ml = AutoML(
role=ROLE,
target_attribute_name=TARGET_ATTRIBUTE_NAME,
sagemaker_session=sagemaker_session,
max_candidates=1,
)
inputs = TRAINING_DATA
job_name = unique_name_from_base("auto-ml", max_length=32)
with timeout(minutes=AUTO_ML_DEFAULT_TIMEMOUT_MINUTES):
auto_ml.fit(inputs, job_name=job_name)
@pytest.mark.skipif(
tests.integ.test_region() in tests.integ.NO_AUTO_ML_REGIONS,
reason="AutoML is not supported in the region yet.",
)
def test_auto_ml_input_object_fit(sagemaker_session):
auto_ml = AutoML(
role=ROLE,
target_attribute_name=TARGET_ATTRIBUTE_NAME,
sagemaker_session=sagemaker_session,
max_candidates=1,
)
job_name = unique_name_from_base("auto-ml", max_length=32)
s3_input = sagemaker_session.upload_data(path=TRAINING_DATA, key_prefix=PREFIX + "/input")
inputs = AutoMLInput(inputs=s3_input, target_attribute_name=TARGET_ATTRIBUTE_NAME)
with timeout(minutes=AUTO_ML_DEFAULT_TIMEMOUT_MINUTES):
auto_ml.fit(inputs, job_name=job_name)
@pytest.mark.skipif(
tests.integ.test_region() in tests.integ.NO_AUTO_ML_REGIONS,
reason="AutoML is not supported in the region yet.",
)
def test_auto_ml_fit_optional_args(sagemaker_session):
output_path = "s3://{}/{}".format(sagemaker_session.default_bucket(), "specified_ouput_path")
problem_type = "MulticlassClassification"
job_objective = {"MetricName": "Accuracy"}
auto_ml = AutoML(
role=ROLE,
target_attribute_name=TARGET_ATTRIBUTE_NAME,
sagemaker_session=sagemaker_session,
max_candidates=1,
output_path=output_path,
problem_type=problem_type,
job_objective=job_objective,
)
inputs = TRAINING_DATA
with timeout(minutes=AUTO_ML_DEFAULT_TIMEMOUT_MINUTES):
auto_ml.fit(inputs, job_name=unique_name_from_base(BASE_JOB_NAME))
auto_ml_desc = auto_ml.describe_auto_ml_job(job_name=auto_ml.latest_auto_ml_job.job_name)
assert auto_ml_desc["AutoMLJobStatus"] == "Completed"
assert auto_ml_desc["AutoMLJobName"] == auto_ml.latest_auto_ml_job.job_name
assert auto_ml_desc["AutoMLJobObjective"] == job_objective
assert auto_ml_desc["ProblemType"] == problem_type
assert auto_ml_desc["OutputDataConfig"]["S3OutputPath"] == output_path
@pytest.mark.skipif(
tests.integ.test_region() in tests.integ.NO_AUTO_ML_REGIONS,
reason="AutoML is not supported in the region yet.",
)
def test_auto_ml_invalid_target_attribute(sagemaker_session):
auto_ml = AutoML(
role=ROLE, target_attribute_name="y", sagemaker_session=sagemaker_session, max_candidates=1
)
job_name = unique_name_from_base("auto-ml", max_length=32)
inputs = sagemaker_session.upload_data(path=TRAINING_DATA, key_prefix=PREFIX + "/input")
with pytest.raises(
ClientError,
match=r"An error occurred \(ValidationException\) when calling the CreateAutoMLJob "
"operation: Target attribute name y does not exist in header.",
):
auto_ml.fit(inputs, job_name=job_name)
@pytest.mark.skipif(
tests.integ.test_region() in tests.integ.NO_AUTO_ML_REGIONS,
reason="AutoML is not supported in the region yet.",
)
def test_auto_ml_describe_auto_ml_job(sagemaker_session):
expected_default_input_config = [
{
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": "s3://{}/{}/input/iris_training.csv".format(
sagemaker_session.default_bucket(), PREFIX
),
}
},
"TargetAttributeName": TARGET_ATTRIBUTE_NAME,
}
]
expected_default_output_config = {
"S3OutputPath": "s3://{}/".format(sagemaker_session.default_bucket())
}
auto_ml_utils.create_auto_ml_job_if_not_exist(sagemaker_session)
auto_ml = AutoML(
role=ROLE, target_attribute_name=TARGET_ATTRIBUTE_NAME, sagemaker_session=sagemaker_session
)
desc = auto_ml.describe_auto_ml_job(job_name=AUTO_ML_JOB_NAME)
assert desc["AutoMLJobName"] == AUTO_ML_JOB_NAME
assert desc["AutoMLJobStatus"] == "Completed"
assert isinstance(desc["BestCandidate"], dict)
assert desc["InputDataConfig"] == expected_default_input_config
assert desc["AutoMLJobConfig"] == EXPECTED_DEFAULT_JOB_CONFIG
assert desc["OutputDataConfig"] == expected_default_output_config
@pytest.mark.skipif(
tests.integ.test_region() in tests.integ.NO_AUTO_ML_REGIONS,
reason="AutoML is not supported in the region yet.",
)
def test_auto_ml_attach(sagemaker_session):
expected_default_input_config = [
{
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": "s3://{}/{}/input/iris_training.csv".format(
sagemaker_session.default_bucket(), PREFIX
),
}
},
"TargetAttributeName": TARGET_ATTRIBUTE_NAME,
}
]
expected_default_output_config = {
"S3OutputPath": "s3://{}/".format(sagemaker_session.default_bucket())
}
auto_ml_utils.create_auto_ml_job_if_not_exist(sagemaker_session)
attached_automl_job = AutoML.attach(
auto_ml_job_name=AUTO_ML_JOB_NAME, sagemaker_session=sagemaker_session
)
attached_desc = attached_automl_job.describe_auto_ml_job()
assert attached_desc["AutoMLJobName"] == AUTO_ML_JOB_NAME
assert attached_desc["AutoMLJobStatus"] == "Completed"
assert isinstance(attached_desc["BestCandidate"], dict)
assert attached_desc["InputDataConfig"] == expected_default_input_config
assert attached_desc["AutoMLJobConfig"] == EXPECTED_DEFAULT_JOB_CONFIG
assert attached_desc["OutputDataConfig"] == expected_default_output_config
@pytest.mark.skipif(
tests.integ.test_region() in tests.integ.NO_AUTO_ML_REGIONS,
reason="AutoML is not supported in the region yet.",
)
def test_list_candidates(sagemaker_session):
auto_ml_utils.create_auto_ml_job_if_not_exist(sagemaker_session)
auto_ml = AutoML(
role=ROLE, target_attribute_name=TARGET_ATTRIBUTE_NAME, sagemaker_session=sagemaker_session
)
candidates = auto_ml.list_candidates(job_name=AUTO_ML_JOB_NAME)
assert len(candidates) == 3
@pytest.mark.skipif(
tests.integ.test_region() in tests.integ.NO_AUTO_ML_REGIONS,
reason="AutoML is not supported in the region yet.",
)
def test_best_candidate(sagemaker_session):
auto_ml_utils.create_auto_ml_job_if_not_exist(sagemaker_session)
auto_ml = AutoML(
role=ROLE, target_attribute_name=TARGET_ATTRIBUTE_NAME, sagemaker_session=sagemaker_session
)
best_candidate = auto_ml.best_candidate(job_name=AUTO_ML_JOB_NAME)
assert len(best_candidate["InferenceContainers"]) == 3
assert len(best_candidate["CandidateSteps"]) == 4
assert best_candidate["CandidateStatus"] == "Completed"
@pytest.mark.skipif(
tests.integ.test_region() in tests.integ.NO_AUTO_ML_REGIONS,
reason="AutoML is not supported in the region yet.",
)
@pytest.mark.canary_quick
def test_deploy_best_candidate(sagemaker_session, cpu_instance_type):
auto_ml_utils.create_auto_ml_job_if_not_exist(sagemaker_session)
auto_ml = AutoML(
role=ROLE, target_attribute_name=TARGET_ATTRIBUTE_NAME, sagemaker_session=sagemaker_session
)
best_candidate = auto_ml.best_candidate(job_name=AUTO_ML_JOB_NAME)
endpoint_name = unique_name_from_base("sagemaker-auto-ml-best-candidate-test")
with timeout(minutes=AUTO_ML_DEFAULT_TIMEMOUT_MINUTES):
auto_ml.deploy(
candidate=best_candidate,
initial_instance_count=INSTANCE_COUNT,
instance_type=cpu_instance_type,
endpoint_name=endpoint_name,
)
endpoint_status = sagemaker_session.sagemaker_client.describe_endpoint(
EndpointName=endpoint_name
)["EndpointStatus"]
assert endpoint_status == "InService"
sagemaker_session.sagemaker_client.delete_endpoint(EndpointName=endpoint_name)
@pytest.mark.skipif(
tests.integ.test_region() in tests.integ.NO_AUTO_ML_REGIONS,
reason="AutoML is not supported in the region yet.",
)
def test_create_model_best_candidate(sagemaker_session, cpu_instance_type):
auto_ml_utils.create_auto_ml_job_if_not_exist(sagemaker_session)
auto_ml = AutoML.attach(auto_ml_job_name=AUTO_ML_JOB_NAME, sagemaker_session=sagemaker_session)
best_candidate = auto_ml.best_candidate()
with timeout(minutes=5):
pipeline_model = auto_ml.create_model(
name=DEFAULT_MODEL_NAME,
candidate=best_candidate,
sagemaker_session=sagemaker_session,
vpc_config=None,
enable_network_isolation=False,
model_kms_key=None,
predictor_cls=None,
)
inputs = sagemaker_session.upload_data(
path=TRANSFORM_DATA, key_prefix=PREFIX + "/transform_input"
)
pipeline_model.transformer(
instance_count=1,
instance_type=cpu_instance_type,
assemble_with="Line",
output_path="s3://{}/{}".format(sagemaker_session.default_bucket(), "transform_test"),
accept="text/csv",
).transform(data=inputs, content_type="text/csv", split_type="Line", join_source="Input")
@pytest.mark.skipif(
tests.integ.test_region() in tests.integ.NO_AUTO_ML_REGIONS,
reason="AutoML is not supported in the region yet.",
)
def test_candidate_estimator_default_rerun_and_deploy(sagemaker_session, cpu_instance_type):
auto_ml_utils.create_auto_ml_job_if_not_exist(sagemaker_session)
auto_ml = AutoML(
role=ROLE, target_attribute_name=TARGET_ATTRIBUTE_NAME, sagemaker_session=sagemaker_session
)
candidates = auto_ml.list_candidates(job_name=AUTO_ML_JOB_NAME)
candidate = candidates[1]
candidate_estimator = CandidateEstimator(candidate, sagemaker_session)
inputs = sagemaker_session.upload_data(path=TEST_DATA, key_prefix=PREFIX + "/input")
endpoint_name = unique_name_from_base("sagemaker-auto-ml-rerun-candidate-test")
with timeout(minutes=AUTO_ML_DEFAULT_TIMEMOUT_MINUTES):
candidate_estimator.fit(inputs)
auto_ml.deploy(
initial_instance_count=INSTANCE_COUNT,
instance_type=cpu_instance_type,
candidate=candidate,
endpoint_name=endpoint_name,
)
endpoint_status = sagemaker_session.sagemaker_client.describe_endpoint(
EndpointName=endpoint_name
)["EndpointStatus"]
assert endpoint_status == "InService"
sagemaker_session.sagemaker_client.delete_endpoint(EndpointName=endpoint_name)
@pytest.mark.skipif(
tests.integ.test_region() in tests.integ.NO_AUTO_ML_REGIONS,
reason="AutoML is not supported in the region yet.",
)
def test_candidate_estimator_rerun_with_optional_args(sagemaker_session, cpu_instance_type):
auto_ml_utils.create_auto_ml_job_if_not_exist(sagemaker_session)
auto_ml = AutoML(
role=ROLE, target_attribute_name=TARGET_ATTRIBUTE_NAME, sagemaker_session=sagemaker_session
)
candidates = auto_ml.list_candidates(job_name=AUTO_ML_JOB_NAME)
candidate = candidates[1]
candidate_estimator = CandidateEstimator(candidate, sagemaker_session)
inputs = sagemaker_session.upload_data(path=TEST_DATA, key_prefix=PREFIX + "/input")
endpoint_name = unique_name_from_base("sagemaker-auto-ml-rerun-candidate-test")
with timeout(minutes=AUTO_ML_DEFAULT_TIMEMOUT_MINUTES):
candidate_estimator.fit(inputs, encrypt_inter_container_traffic=True)
auto_ml.deploy(
initial_instance_count=INSTANCE_COUNT,
instance_type=cpu_instance_type,
candidate=candidate,
endpoint_name=endpoint_name,
)
endpoint_status = sagemaker_session.sagemaker_client.describe_endpoint(
EndpointName=endpoint_name
)["EndpointStatus"]
assert endpoint_status == "InService"
sagemaker_session.sagemaker_client.delete_endpoint(EndpointName=endpoint_name)
@pytest.mark.skipif(
tests.integ.test_region() in tests.integ.NO_AUTO_ML_REGIONS,
reason="AutoML is not supported in the region yet.",
)
def test_candidate_estimator_get_steps(sagemaker_session):
auto_ml_utils.create_auto_ml_job_if_not_exist(sagemaker_session)
auto_ml = AutoML(
role=ROLE, target_attribute_name=TARGET_ATTRIBUTE_NAME, sagemaker_session=sagemaker_session
)
candidates = auto_ml.list_candidates(job_name=AUTO_ML_JOB_NAME)
candidate = candidates[1]
candidate_estimator = CandidateEstimator(candidate, sagemaker_session)
steps = candidate_estimator.get_steps()
assert len(steps) == 3
| true
| true
|
79054c3ceaa1df22b14cc922282eeb246d615aa6
| 9,405
|
py
|
Python
|
sdk/python/pulumi_aws/s3/analytics_configuration.py
|
michael-golden/pulumi-aws
|
165e876e166ecab1870e857822247585d78aef64
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/s3/analytics_configuration.py
|
michael-golden/pulumi-aws
|
165e876e166ecab1870e857822247585d78aef64
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/s3/analytics_configuration.py
|
michael-golden/pulumi-aws
|
165e876e166ecab1870e857822247585d78aef64
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class AnalyticsConfiguration(pulumi.CustomResource):
bucket: pulumi.Output[str]
"""
The name of the bucket this analytics configuration is associated with.
"""
filter: pulumi.Output[dict]
"""
Object filtering that accepts a prefix, tags, or a logical AND of prefix and tags (documented below).
* `prefix` (`str`) - Object prefix for filtering.
* `tags` (`dict`) - Set of object tags for filtering.
"""
name: pulumi.Output[str]
"""
Unique identifier of the analytics configuration for the bucket.
"""
storage_class_analysis: pulumi.Output[dict]
"""
Configuration for the analytics data export (documented below).
* `dataExport` (`dict`) - Data export configuration (documented below).
* `destination` (`dict`) - Specifies the destination for the exported analytics data (documented below).
* `s3BucketDestination` (`dict`) - Analytics data export currently only supports an S3 bucket destination (documented below).
* `bucketAccountId` (`str`) - The account ID that owns the destination bucket.
* `bucketArn` (`str`) - The ARN of the destination bucket.
* `format` (`str`) - The output format of exported analytics data. Allowed values: `CSV`. Default value: `CSV`.
* `prefix` (`str`) - Object prefix for filtering.
* `outputSchemaVersion` (`str`) - The schema version of exported analytics data. Allowed values: `V_1`. Default value: `V_1`.
"""
def __init__(__self__, resource_name, opts=None, bucket=None, filter=None, name=None, storage_class_analysis=None, __props__=None, __name__=None, __opts__=None):
"""
Provides a S3 bucket [analytics configuration](https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) resource.
## Example Usage
### Add analytics configuration for entire S3 bucket and export results to a second S3 bucket
```python
import pulumi
import pulumi_aws as aws
example = aws.s3.Bucket("example")
analytics = aws.s3.Bucket("analytics")
example_entire_bucket = aws.s3.AnalyticsConfiguration("example-entire-bucket",
bucket=example.bucket,
storage_class_analysis={
"dataExport": {
"destination": {
"s3BucketDestination": {
"bucketArn": analytics.arn,
},
},
},
})
```
### Add analytics configuration with S3 bucket object filter
```python
import pulumi
import pulumi_aws as aws
example = aws.s3.Bucket("example")
example_filtered = aws.s3.AnalyticsConfiguration("example-filtered",
bucket=example.bucket,
filter={
"prefix": "documents/",
"tags": {
"priority": "high",
"class": "blue",
},
})
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] bucket: The name of the bucket this analytics configuration is associated with.
:param pulumi.Input[dict] filter: Object filtering that accepts a prefix, tags, or a logical AND of prefix and tags (documented below).
:param pulumi.Input[str] name: Unique identifier of the analytics configuration for the bucket.
:param pulumi.Input[dict] storage_class_analysis: Configuration for the analytics data export (documented below).
The **filter** object supports the following:
* `prefix` (`pulumi.Input[str]`) - Object prefix for filtering.
* `tags` (`pulumi.Input[dict]`) - Set of object tags for filtering.
The **storage_class_analysis** object supports the following:
* `dataExport` (`pulumi.Input[dict]`) - Data export configuration (documented below).
* `destination` (`pulumi.Input[dict]`) - Specifies the destination for the exported analytics data (documented below).
* `s3BucketDestination` (`pulumi.Input[dict]`) - Analytics data export currently only supports an S3 bucket destination (documented below).
* `bucketAccountId` (`pulumi.Input[str]`) - The account ID that owns the destination bucket.
* `bucketArn` (`pulumi.Input[str]`) - The ARN of the destination bucket.
* `format` (`pulumi.Input[str]`) - The output format of exported analytics data. Allowed values: `CSV`. Default value: `CSV`.
* `prefix` (`pulumi.Input[str]`) - Object prefix for filtering.
* `outputSchemaVersion` (`pulumi.Input[str]`) - The schema version of exported analytics data. Allowed values: `V_1`. Default value: `V_1`.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if bucket is None:
raise TypeError("Missing required property 'bucket'")
__props__['bucket'] = bucket
__props__['filter'] = filter
__props__['name'] = name
__props__['storage_class_analysis'] = storage_class_analysis
super(AnalyticsConfiguration, __self__).__init__(
'aws:s3/analyticsConfiguration:AnalyticsConfiguration',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, bucket=None, filter=None, name=None, storage_class_analysis=None):
"""
Get an existing AnalyticsConfiguration resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] bucket: The name of the bucket this analytics configuration is associated with.
:param pulumi.Input[dict] filter: Object filtering that accepts a prefix, tags, or a logical AND of prefix and tags (documented below).
:param pulumi.Input[str] name: Unique identifier of the analytics configuration for the bucket.
:param pulumi.Input[dict] storage_class_analysis: Configuration for the analytics data export (documented below).
The **filter** object supports the following:
* `prefix` (`pulumi.Input[str]`) - Object prefix for filtering.
* `tags` (`pulumi.Input[dict]`) - Set of object tags for filtering.
The **storage_class_analysis** object supports the following:
* `dataExport` (`pulumi.Input[dict]`) - Data export configuration (documented below).
* `destination` (`pulumi.Input[dict]`) - Specifies the destination for the exported analytics data (documented below).
* `s3BucketDestination` (`pulumi.Input[dict]`) - Analytics data export currently only supports an S3 bucket destination (documented below).
* `bucketAccountId` (`pulumi.Input[str]`) - The account ID that owns the destination bucket.
* `bucketArn` (`pulumi.Input[str]`) - The ARN of the destination bucket.
* `format` (`pulumi.Input[str]`) - The output format of exported analytics data. Allowed values: `CSV`. Default value: `CSV`.
* `prefix` (`pulumi.Input[str]`) - Object prefix for filtering.
* `outputSchemaVersion` (`pulumi.Input[str]`) - The schema version of exported analytics data. Allowed values: `V_1`. Default value: `V_1`.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["bucket"] = bucket
__props__["filter"] = filter
__props__["name"] = name
__props__["storage_class_analysis"] = storage_class_analysis
return AnalyticsConfiguration(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 51.11413
| 165
| 0.645933
|
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class AnalyticsConfiguration(pulumi.CustomResource):
bucket: pulumi.Output[str]
filter: pulumi.Output[dict]
name: pulumi.Output[str]
storage_class_analysis: pulumi.Output[dict]
def __init__(__self__, resource_name, opts=None, bucket=None, filter=None, name=None, storage_class_analysis=None, __props__=None, __name__=None, __opts__=None):
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if bucket is None:
raise TypeError("Missing required property 'bucket'")
__props__['bucket'] = bucket
__props__['filter'] = filter
__props__['name'] = name
__props__['storage_class_analysis'] = storage_class_analysis
super(AnalyticsConfiguration, __self__).__init__(
'aws:s3/analyticsConfiguration:AnalyticsConfiguration',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, bucket=None, filter=None, name=None, storage_class_analysis=None):
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["bucket"] = bucket
__props__["filter"] = filter
__props__["name"] = name
__props__["storage_class_analysis"] = storage_class_analysis
return AnalyticsConfiguration(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| true
| true
|
79054c7e3af2a9f01df54f5e119515bb0908e283
| 627
|
py
|
Python
|
institution/migrations/0017_auto_20180906_1349.py
|
mmesiti/cogs3
|
c48cd48629570f418b93aec73de49bc2fb59edc2
|
[
"MIT"
] | null | null | null |
institution/migrations/0017_auto_20180906_1349.py
|
mmesiti/cogs3
|
c48cd48629570f418b93aec73de49bc2fb59edc2
|
[
"MIT"
] | 9
|
2019-08-01T09:50:34.000Z
|
2019-08-14T16:24:31.000Z
|
institution/migrations/0017_auto_20180906_1349.py
|
mmesiti/cogs3
|
c48cd48629570f418b93aec73de49bc2fb59edc2
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.2 on 2018-09-06 13:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('institution', '0016_institution_funding_document_email'),
]
operations = [
migrations.AddField(
model_name='institution',
name='funding_document_receiver',
field=models.CharField(max_length=100, null=True),
),
migrations.AddField(
model_name='institution',
name='funding_document_template',
field=models.CharField(max_length=100, null=True),
),
]
| 26.125
| 67
| 0.623604
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('institution', '0016_institution_funding_document_email'),
]
operations = [
migrations.AddField(
model_name='institution',
name='funding_document_receiver',
field=models.CharField(max_length=100, null=True),
),
migrations.AddField(
model_name='institution',
name='funding_document_template',
field=models.CharField(max_length=100, null=True),
),
]
| true
| true
|
79054d417a1b3f0beba404cdee72f6a85ccd0081
| 8,929
|
py
|
Python
|
changedetectionio/tests/test_notification.py
|
Pritam-Patra/changedetection.io
|
eeba8c864d8375775ac04720856e537036ca643e
|
[
"Apache-2.0"
] | 1
|
2022-01-21T06:25:24.000Z
|
2022-01-21T06:25:24.000Z
|
changedetectionio/tests/test_notification.py
|
Pritam-Patra/changedetection.io
|
eeba8c864d8375775ac04720856e537036ca643e
|
[
"Apache-2.0"
] | null | null | null |
changedetectionio/tests/test_notification.py
|
Pritam-Patra/changedetection.io
|
eeba8c864d8375775ac04720856e537036ca643e
|
[
"Apache-2.0"
] | null | null | null |
import os
import time
import re
from flask import url_for
from . util import set_original_response, set_modified_response, live_server_setup
import logging
from changedetectionio.notification import default_notification_body, default_notification_title
# Hard to just add more live server URLs when one test is already running (I think)
# So we add our test here (was in a different file)
def test_check_notification(client, live_server):
live_server_setup(live_server)
set_original_response()
# Give the endpoint time to spin up
time.sleep(3)
# Re 360 - new install should have defaults set
res = client.get(url_for("settings_page"))
assert default_notification_body.encode() in res.data
assert default_notification_title.encode() in res.data
# When test mode is in BASE_URL env mode, we should see this already configured
env_base_url = os.getenv('BASE_URL', '').strip()
if len(env_base_url):
logging.debug(">>> BASE_URL enabled, looking for %s", env_base_url)
res = client.get(url_for("settings_page"))
assert bytes(env_base_url.encode('utf-8')) in res.data
else:
logging.debug(">>> SKIPPING BASE_URL check")
# re #242 - when you edited an existing new entry, it would not correctly show the notification settings
# Add our URL to the import page
test_url = url_for('test_endpoint', _external=True)
res = client.post(
url_for("api_watch_add"),
data={"url": test_url, "tag": ''},
follow_redirects=True
)
assert b"Watch added" in res.data
# Give the thread time to pick up the first version
time.sleep(3)
# Goto the edit page, add our ignore text
# Add our URL to the import page
url = url_for('test_notification_endpoint', _external=True)
notification_url = url.replace('http', 'json')
print (">>>> Notification URL: "+notification_url)
res = client.post(
url_for("edit_page", uuid="first"),
data={"notification_urls": notification_url,
"notification_title": "New ChangeDetection.io Notification - {watch_url}",
"notification_body": "BASE URL: {base_url}\n"
"Watch URL: {watch_url}\n"
"Watch UUID: {watch_uuid}\n"
"Watch title: {watch_title}\n"
"Watch tag: {watch_tag}\n"
"Preview: {preview_url}\n"
"Diff URL: {diff_url}\n"
"Snapshot: {current_snapshot}\n"
"Diff: {diff}\n"
"Diff Full: {diff_full}\n"
":-)",
"notification_format": "Text",
"url": test_url,
"tag": "my tag",
"title": "my title",
"headers": "",
"fetch_backend": "html_requests",
"trigger_check": "y"},
follow_redirects=True
)
assert b"Updated watch." in res.data
assert b"Test notification queued" in res.data
# Hit the edit page, be sure that we saved it
res = client.get(
url_for("edit_page", uuid="first"))
assert bytes(notification_url.encode('utf-8')) in res.data
# Re #242 - wasnt saving?
assert bytes("New ChangeDetection.io Notification".encode('utf-8')) in res.data
# Because we hit 'send test notification on save'
time.sleep(3)
notification_submission = None
# Verify what was sent as a notification, this file should exist
with open("test-datastore/notification.txt", "r") as f:
notification_submission = f.read()
# Did we see the URL that had a change, in the notification?
assert test_url in notification_submission
os.unlink("test-datastore/notification.txt")
set_modified_response()
# Trigger a check
client.get(url_for("api_watch_checknow"), follow_redirects=True)
# Give the thread time to pick it up
time.sleep(3)
# Did the front end see it?
res = client.get(
url_for("index"))
assert bytes("just now".encode('utf-8')) in res.data
notification_submission=None
# Verify what was sent as a notification
with open("test-datastore/notification.txt", "r") as f:
notification_submission = f.read()
# Did we see the URL that had a change, in the notification?
assert test_url in notification_submission
# Diff was correctly executed
assert "Diff Full: Some initial text" in notification_submission
assert "Diff: (changed) Which is across multiple lines" in notification_submission
assert "(-> into) which has this one new line" in notification_submission
if env_base_url:
# Re #65 - did we see our BASE_URl ?
logging.debug (">>> BASE_URL checking in notification: %s", env_base_url)
assert env_base_url in notification_submission
else:
logging.debug(">>> Skipping BASE_URL check")
## Now configure something clever, we go into custom config (non-default) mode, this is returned by the endpoint
with open("test-datastore/endpoint-content.txt", "w") as f:
f.write(";jasdhflkjadshf kjhsdfkjl ahslkjf haslkjd hfaklsj hf\njl;asdhfkasj stuff we will detect\n")
res = client.post(
url_for("settings_page"),
data={"notification_title": "New ChangeDetection.io Notification - {watch_url}",
"notification_urls": "json://foobar.com", #Re #143 should not see that it sent without [test checkbox]
"minutes_between_check": 180,
"fetch_backend": "html_requests",
},
follow_redirects=True
)
assert b"Settings updated." in res.data
# Re #143 - should not see this if we didnt hit the test box
assert b"Test notification queued" not in res.data
# Trigger a check
client.get(url_for("api_watch_checknow"), follow_redirects=True)
# Give the thread time to pick it up
time.sleep(3)
# Did the front end see it?
res = client.get(
url_for("index"))
assert bytes("just now".encode('utf-8')) in res.data
with open("test-datastore/notification.txt", "r") as f:
notification_submission = f.read()
print ("Notification submission was:", notification_submission)
# Re #342 - check for accidental python byte encoding of non-utf8/string
assert "b'" not in notification_submission
assert re.search('Watch UUID: [0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}', notification_submission, re.IGNORECASE)
assert "Watch title: my title" in notification_submission
assert "Watch tag: my tag" in notification_submission
assert "diff/" in notification_submission
assert "preview/" in notification_submission
assert ":-)" in notification_submission
assert "New ChangeDetection.io Notification - {}".format(test_url) in notification_submission
# This should insert the {current_snapshot}
assert "stuff we will detect" in notification_submission
# Prove that "content constantly being marked as Changed with no Updating causes notification" is not a thing
# https://github.com/dgtlmoon/changedetection.io/discussions/192
os.unlink("test-datastore/notification.txt")
# Trigger a check
client.get(url_for("api_watch_checknow"), follow_redirects=True)
time.sleep(3)
client.get(url_for("api_watch_checknow"), follow_redirects=True)
time.sleep(3)
client.get(url_for("api_watch_checknow"), follow_redirects=True)
time.sleep(3)
assert os.path.exists("test-datastore/notification.txt") == False
# Now adding a wrong token should give us an error
res = client.post(
url_for("settings_page"),
data={"notification_title": "New ChangeDetection.io Notification - {watch_url}",
"notification_body": "Rubbish: {rubbish}\n",
"notification_format": "Text",
"notification_urls": "json://foobar.com",
"minutes_between_check": 180,
"fetch_backend": "html_requests"
},
follow_redirects=True
)
assert bytes("is not a valid token".encode('utf-8')) in res.data
# Re #360 some validation
res = client.post(
url_for("edit_page", uuid="first"),
data={"notification_urls": notification_url,
"notification_title": "",
"notification_body": "",
"notification_format": "Text",
"url": test_url,
"tag": "my tag",
"title": "my title",
"headers": "",
"fetch_backend": "html_requests",
"trigger_check": "y"},
follow_redirects=True
)
assert b"Notification Body and Title is required when a Notification URL is used" in res.data
| 38.991266
| 121
| 0.634786
|
import os
import time
import re
from flask import url_for
from . util import set_original_response, set_modified_response, live_server_setup
import logging
from changedetectionio.notification import default_notification_body, default_notification_title
def test_check_notification(client, live_server):
live_server_setup(live_server)
set_original_response()
time.sleep(3)
res = client.get(url_for("settings_page"))
assert default_notification_body.encode() in res.data
assert default_notification_title.encode() in res.data
env_base_url = os.getenv('BASE_URL', '').strip()
if len(env_base_url):
logging.debug(">>> BASE_URL enabled, looking for %s", env_base_url)
res = client.get(url_for("settings_page"))
assert bytes(env_base_url.encode('utf-8')) in res.data
else:
logging.debug(">>> SKIPPING BASE_URL check")
r("api_watch_add"),
data={"url": test_url, "tag": ''},
follow_redirects=True
)
assert b"Watch added" in res.data
time.sleep(3)
url = url_for('test_notification_endpoint', _external=True)
notification_url = url.replace('http', 'json')
print (">>>> Notification URL: "+notification_url)
res = client.post(
url_for("edit_page", uuid="first"),
data={"notification_urls": notification_url,
"notification_title": "New ChangeDetection.io Notification - {watch_url}",
"notification_body": "BASE URL: {base_url}\n"
"Watch URL: {watch_url}\n"
"Watch UUID: {watch_uuid}\n"
"Watch title: {watch_title}\n"
"Watch tag: {watch_tag}\n"
"Preview: {preview_url}\n"
"Diff URL: {diff_url}\n"
"Snapshot: {current_snapshot}\n"
"Diff: {diff}\n"
"Diff Full: {diff_full}\n"
":-)",
"notification_format": "Text",
"url": test_url,
"tag": "my tag",
"title": "my title",
"headers": "",
"fetch_backend": "html_requests",
"trigger_check": "y"},
follow_redirects=True
)
assert b"Updated watch." in res.data
assert b"Test notification queued" in res.data
res = client.get(
url_for("edit_page", uuid="first"))
assert bytes(notification_url.encode('utf-8')) in res.data
ew ChangeDetection.io Notification".encode('utf-8')) in res.data
time.sleep(3)
notification_submission = None
with open("test-datastore/notification.txt", "r") as f:
notification_submission = f.read()
assert test_url in notification_submission
os.unlink("test-datastore/notification.txt")
set_modified_response()
client.get(url_for("api_watch_checknow"), follow_redirects=True)
time.sleep(3)
res = client.get(
url_for("index"))
assert bytes("just now".encode('utf-8')) in res.data
notification_submission=None
with open("test-datastore/notification.txt", "r") as f:
notification_submission = f.read()
assert test_url in notification_submission
assert "Diff Full: Some initial text" in notification_submission
assert "Diff: (changed) Which is across multiple lines" in notification_submission
assert "(-> into) which has this one new line" in notification_submission
if env_base_url:
SE_URL checking in notification: %s", env_base_url)
assert env_base_url in notification_submission
else:
logging.debug(">>> Skipping BASE_URL check")
kjf haslkjd hfaklsj hf\njl;asdhfkasj stuff we will detect\n")
res = client.post(
url_for("settings_page"),
data={"notification_title": "New ChangeDetection.io Notification - {watch_url}",
"notification_urls": "json://foobar.com", "fetch_backend": "html_requests",
},
follow_redirects=True
)
assert b"Settings updated." in res.data
client.get(url_for("api_watch_checknow"), follow_redirects=True)
time.sleep(3)
res = client.get(
url_for("index"))
assert bytes("just now".encode('utf-8')) in res.data
with open("test-datastore/notification.txt", "r") as f:
notification_submission = f.read()
print ("Notification submission was:", notification_submission)
re.search('Watch UUID: [0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}', notification_submission, re.IGNORECASE)
assert "Watch title: my title" in notification_submission
assert "Watch tag: my tag" in notification_submission
assert "diff/" in notification_submission
assert "preview/" in notification_submission
assert ":-)" in notification_submission
assert "New ChangeDetection.io Notification - {}".format(test_url) in notification_submission
# This should insert the {current_snapshot}
assert "stuff we will detect" in notification_submission
# Prove that "content constantly being marked as Changed with no Updating causes notification" is not a thing
# https://github.com/dgtlmoon/changedetection.io/discussions/192
os.unlink("test-datastore/notification.txt")
# Trigger a check
client.get(url_for("api_watch_checknow"), follow_redirects=True)
time.sleep(3)
client.get(url_for("api_watch_checknow"), follow_redirects=True)
time.sleep(3)
client.get(url_for("api_watch_checknow"), follow_redirects=True)
time.sleep(3)
assert os.path.exists("test-datastore/notification.txt") == False
# Now adding a wrong token should give us an error
res = client.post(
url_for("settings_page"),
data={"notification_title": "New ChangeDetection.io Notification - {watch_url}",
"notification_body": "Rubbish: {rubbish}\n",
"notification_format": "Text",
"notification_urls": "json://foobar.com",
"minutes_between_check": 180,
"fetch_backend": "html_requests"
},
follow_redirects=True
)
assert bytes("is not a valid token".encode('utf-8')) in res.data
# Re #360 some validation
res = client.post(
url_for("edit_page", uuid="first"),
data={"notification_urls": notification_url,
"notification_title": "",
"notification_body": "",
"notification_format": "Text",
"url": test_url,
"tag": "my tag",
"title": "my title",
"headers": "",
"fetch_backend": "html_requests",
"trigger_check": "y"},
follow_redirects=True
)
assert b"Notification Body and Title is required when a Notification URL is used" in res.data
| true
| true
|
79054d8f0181b9529f319719809883d247168857
| 1,732
|
py
|
Python
|
kachery/_temporarydirectory.py
|
flatironinstitute/kachery
|
d1076f6e8e2df26d3440fdb89f366ec44a502b9b
|
[
"Apache-2.0"
] | 8
|
2020-03-05T19:41:03.000Z
|
2021-11-19T04:40:10.000Z
|
kachery/_temporarydirectory.py
|
flatironinstitute/kachery
|
d1076f6e8e2df26d3440fdb89f366ec44a502b9b
|
[
"Apache-2.0"
] | 8
|
2019-11-15T03:40:07.000Z
|
2020-09-08T22:14:07.000Z
|
kachery/_temporarydirectory.py
|
flatironinstitute/kachery
|
d1076f6e8e2df26d3440fdb89f366ec44a502b9b
|
[
"Apache-2.0"
] | 2
|
2020-08-06T19:56:12.000Z
|
2021-09-23T01:05:24.000Z
|
import os
import shutil
import tempfile
import time
class TemporaryDirectory():
def __init__(self, remove: bool=True, prefix: str='tmp'):
self._remove = remove
self._prefix = prefix
def __enter__(self) -> str:
if 'KACHERY_STORAGE_DIR' in os.environ:
storage_dir = os.getenv('KACHERY_STORAGE_DIR')
else:
storage_dir = None
if storage_dir is not None:
dirpath = os.path.join(storage_dir, 'tmp')
if not os.path.exists(dirpath):
try:
os.mkdir(dirpath)
except:
# maybe somebody else created this directory
if not os.path.exists:
raise Exception(f'Unexpected problem creating temporary directory: {dirpath}')
else:
dirpath = None
self._path = str(tempfile.mkdtemp(prefix=self._prefix, dir=dirpath))
return self._path
def __exit__(self, exc_type, exc_val, exc_tb):
if self._remove:
_rmdir_with_retries(self._path, num_retries=5)
def path(self):
return self._path
def _rmdir_with_retries(dirname: str, num_retries: int, delay_between_tries: float=1):
for retry_num in range(1, num_retries + 1):
if not os.path.exists(dirname):
return
try:
shutil.rmtree(dirname)
break
except: # pragma: no cover
if retry_num < num_retries:
print('Retrying to remove directory: {}'.format(dirname))
time.sleep(delay_between_tries)
else:
raise Exception('Unable to remove directory after {} tries: {}'.format(num_retries, dirname))
| 33.307692
| 109
| 0.586028
|
import os
import shutil
import tempfile
import time
class TemporaryDirectory():
def __init__(self, remove: bool=True, prefix: str='tmp'):
self._remove = remove
self._prefix = prefix
def __enter__(self) -> str:
if 'KACHERY_STORAGE_DIR' in os.environ:
storage_dir = os.getenv('KACHERY_STORAGE_DIR')
else:
storage_dir = None
if storage_dir is not None:
dirpath = os.path.join(storage_dir, 'tmp')
if not os.path.exists(dirpath):
try:
os.mkdir(dirpath)
except:
if not os.path.exists:
raise Exception(f'Unexpected problem creating temporary directory: {dirpath}')
else:
dirpath = None
self._path = str(tempfile.mkdtemp(prefix=self._prefix, dir=dirpath))
return self._path
def __exit__(self, exc_type, exc_val, exc_tb):
if self._remove:
_rmdir_with_retries(self._path, num_retries=5)
def path(self):
return self._path
def _rmdir_with_retries(dirname: str, num_retries: int, delay_between_tries: float=1):
for retry_num in range(1, num_retries + 1):
if not os.path.exists(dirname):
return
try:
shutil.rmtree(dirname)
break
except:
if retry_num < num_retries:
print('Retrying to remove directory: {}'.format(dirname))
time.sleep(delay_between_tries)
else:
raise Exception('Unable to remove directory after {} tries: {}'.format(num_retries, dirname))
| true
| true
|
79054e2c58d5213eaf729dd2add06483f2c10192
| 16,145
|
py
|
Python
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/spbnetworkrange_525415b0593fd4072368412490b137fa.py
|
Vibaswan/ixnetwork_restpy
|
239fedc7050890746cbabd71ea1e91c68d9e5cad
|
[
"MIT"
] | null | null | null |
ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/spbnetworkrange_525415b0593fd4072368412490b137fa.py
|
Vibaswan/ixnetwork_restpy
|
239fedc7050890746cbabd71ea1e91c68d9e5cad
|
[
"MIT"
] | null | null | null |
ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/spbnetworkrange_525415b0593fd4072368412490b137fa.py
|
Vibaswan/ixnetwork_restpy
|
239fedc7050890746cbabd71ea1e91c68d9e5cad
|
[
"MIT"
] | null | null | null |
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class SpbNetworkRange(Base):
"""The SPB Network Range.
The SpbNetworkRange class encapsulates a list of spbNetworkRange resources that are managed by the user.
A list of resources can be retrieved from the server using the SpbNetworkRange.find() method.
The list can be managed by using the SpbNetworkRange.add() and SpbNetworkRange.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'spbNetworkRange'
_SDM_ATT_MAP = {
'EnableAdvertiseNetworkRange': 'enableAdvertiseNetworkRange',
'EnableHostName': 'enableHostName',
'EntryColumn': 'entryColumn',
'EntryRow': 'entryRow',
'HostNamePrefix': 'hostNamePrefix',
'InterfaceMetric': 'interfaceMetric',
'NoOfColumns': 'noOfColumns',
'NoOfRows': 'noOfRows',
'StartSystemId': 'startSystemId',
'SystemIdIncrementBy': 'systemIdIncrementBy',
}
def __init__(self, parent):
super(SpbNetworkRange, self).__init__(parent)
@property
def SpbOutsideLinks(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.spboutsidelinks_dfb7b1e816409cddb14e138ebc2096dc.SpbOutsideLinks): An instance of the SpbOutsideLinks class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.spboutsidelinks_dfb7b1e816409cddb14e138ebc2096dc import SpbOutsideLinks
if self._properties.get('SpbOutsideLinks', None) is None:
return SpbOutsideLinks(self)
else:
return self._properties.get('SpbOutsideLinks')
@property
def SpbmNodeTopologyRange(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.spbmnodetopologyrange_199093afa11cd9f4488faaa1ad3ec3a7.SpbmNodeTopologyRange): An instance of the SpbmNodeTopologyRange class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.spbmnodetopologyrange_199093afa11cd9f4488faaa1ad3ec3a7 import SpbmNodeTopologyRange
if self._properties.get('SpbmNodeTopologyRange', None) is None:
return SpbmNodeTopologyRange(self)
else:
return self._properties.get('SpbmNodeTopologyRange')
@property
def EnableAdvertiseNetworkRange(self):
"""
Returns
-------
- bool: If true, this SPB ISIS Network Range is advertised.
"""
return self._get_attribute(self._SDM_ATT_MAP['EnableAdvertiseNetworkRange'])
@EnableAdvertiseNetworkRange.setter
def EnableAdvertiseNetworkRange(self, value):
self._set_attribute(self._SDM_ATT_MAP['EnableAdvertiseNetworkRange'], value)
@property
def EnableHostName(self):
"""
Returns
-------
- bool: If true, the host name of the router is activated.
"""
return self._get_attribute(self._SDM_ATT_MAP['EnableHostName'])
@EnableHostName.setter
def EnableHostName(self, value):
self._set_attribute(self._SDM_ATT_MAP['EnableHostName'], value)
@property
def EntryColumn(self):
"""
Returns
-------
- number: The value is used in combination to specify which virtual router in the Network Range is connected to the current ISIS L2/L3 Router.
"""
return self._get_attribute(self._SDM_ATT_MAP['EntryColumn'])
@EntryColumn.setter
def EntryColumn(self, value):
self._set_attribute(self._SDM_ATT_MAP['EntryColumn'], value)
@property
def EntryRow(self):
"""
Returns
-------
- number: The value is used in combination to specify which virtual router in the Network Range is connected to the current ISIS L2/L3 Router.
"""
return self._get_attribute(self._SDM_ATT_MAP['EntryRow'])
@EntryRow.setter
def EntryRow(self, value):
self._set_attribute(self._SDM_ATT_MAP['EntryRow'], value)
@property
def HostNamePrefix(self):
"""
Returns
-------
- str: The host name prefix information.
"""
return self._get_attribute(self._SDM_ATT_MAP['HostNamePrefix'])
@HostNamePrefix.setter
def HostNamePrefix(self, value):
self._set_attribute(self._SDM_ATT_MAP['HostNamePrefix'], value)
@property
def InterfaceMetric(self):
"""
Returns
-------
- number: The metric cost associated with this emulated SPB ISIS router.
"""
return self._get_attribute(self._SDM_ATT_MAP['InterfaceMetric'])
@InterfaceMetric.setter
def InterfaceMetric(self, value):
self._set_attribute(self._SDM_ATT_MAP['InterfaceMetric'], value)
@property
def NoOfColumns(self):
"""
Returns
-------
- number: The value is used in combination to create a matrix (grid) for an emulated network range of the following size: The # Rows multiplied the # Cols = Number of routers in this Network Range. (For example, 3 Rows x 3 Columns = 9 Routers).
"""
return self._get_attribute(self._SDM_ATT_MAP['NoOfColumns'])
@NoOfColumns.setter
def NoOfColumns(self, value):
self._set_attribute(self._SDM_ATT_MAP['NoOfColumns'], value)
@property
def NoOfRows(self):
"""
Returns
-------
- number: The value is used in combination to create a matrix (grid) for an emulated network range of the following size: The # Rows multiplied the # Cols = Number of routers in this Network Range. (For example, 3 Rows x 3 Columns = 9 Routers).
"""
return self._get_attribute(self._SDM_ATT_MAP['NoOfRows'])
@NoOfRows.setter
def NoOfRows(self, value):
self._set_attribute(self._SDM_ATT_MAP['NoOfRows'], value)
@property
def StartSystemId(self):
"""
Returns
-------
- str: The System ID assigned to the starting SPB ISIS router in this network range. The default is 00 00 00 00 00 00.
"""
return self._get_attribute(self._SDM_ATT_MAP['StartSystemId'])
@StartSystemId.setter
def StartSystemId(self, value):
self._set_attribute(self._SDM_ATT_MAP['StartSystemId'], value)
@property
def SystemIdIncrementBy(self):
"""
Returns
-------
- str: This is used when more than one router is to be emulated. The increment value is added to the previous System ID for each additional emulated router in this network range.
"""
return self._get_attribute(self._SDM_ATT_MAP['SystemIdIncrementBy'])
@SystemIdIncrementBy.setter
def SystemIdIncrementBy(self, value):
self._set_attribute(self._SDM_ATT_MAP['SystemIdIncrementBy'], value)
def update(self, EnableAdvertiseNetworkRange=None, EnableHostName=None, EntryColumn=None, EntryRow=None, HostNamePrefix=None, InterfaceMetric=None, NoOfColumns=None, NoOfRows=None, StartSystemId=None, SystemIdIncrementBy=None):
"""Updates spbNetworkRange resource on the server.
Args
----
- EnableAdvertiseNetworkRange (bool): If true, this SPB ISIS Network Range is advertised.
- EnableHostName (bool): If true, the host name of the router is activated.
- EntryColumn (number): The value is used in combination to specify which virtual router in the Network Range is connected to the current ISIS L2/L3 Router.
- EntryRow (number): The value is used in combination to specify which virtual router in the Network Range is connected to the current ISIS L2/L3 Router.
- HostNamePrefix (str): The host name prefix information.
- InterfaceMetric (number): The metric cost associated with this emulated SPB ISIS router.
- NoOfColumns (number): The value is used in combination to create a matrix (grid) for an emulated network range of the following size: The # Rows multiplied the # Cols = Number of routers in this Network Range. (For example, 3 Rows x 3 Columns = 9 Routers).
- NoOfRows (number): The value is used in combination to create a matrix (grid) for an emulated network range of the following size: The # Rows multiplied the # Cols = Number of routers in this Network Range. (For example, 3 Rows x 3 Columns = 9 Routers).
- StartSystemId (str): The System ID assigned to the starting SPB ISIS router in this network range. The default is 00 00 00 00 00 00.
- SystemIdIncrementBy (str): This is used when more than one router is to be emulated. The increment value is added to the previous System ID for each additional emulated router in this network range.
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, EnableAdvertiseNetworkRange=None, EnableHostName=None, EntryColumn=None, EntryRow=None, HostNamePrefix=None, InterfaceMetric=None, NoOfColumns=None, NoOfRows=None, StartSystemId=None, SystemIdIncrementBy=None):
"""Adds a new spbNetworkRange resource on the server and adds it to the container.
Args
----
- EnableAdvertiseNetworkRange (bool): If true, this SPB ISIS Network Range is advertised.
- EnableHostName (bool): If true, the host name of the router is activated.
- EntryColumn (number): The value is used in combination to specify which virtual router in the Network Range is connected to the current ISIS L2/L3 Router.
- EntryRow (number): The value is used in combination to specify which virtual router in the Network Range is connected to the current ISIS L2/L3 Router.
- HostNamePrefix (str): The host name prefix information.
- InterfaceMetric (number): The metric cost associated with this emulated SPB ISIS router.
- NoOfColumns (number): The value is used in combination to create a matrix (grid) for an emulated network range of the following size: The # Rows multiplied the # Cols = Number of routers in this Network Range. (For example, 3 Rows x 3 Columns = 9 Routers).
- NoOfRows (number): The value is used in combination to create a matrix (grid) for an emulated network range of the following size: The # Rows multiplied the # Cols = Number of routers in this Network Range. (For example, 3 Rows x 3 Columns = 9 Routers).
- StartSystemId (str): The System ID assigned to the starting SPB ISIS router in this network range. The default is 00 00 00 00 00 00.
- SystemIdIncrementBy (str): This is used when more than one router is to be emulated. The increment value is added to the previous System ID for each additional emulated router in this network range.
Returns
-------
- self: This instance with all currently retrieved spbNetworkRange resources using find and the newly added spbNetworkRange resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained spbNetworkRange resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, EnableAdvertiseNetworkRange=None, EnableHostName=None, EntryColumn=None, EntryRow=None, HostNamePrefix=None, InterfaceMetric=None, NoOfColumns=None, NoOfRows=None, StartSystemId=None, SystemIdIncrementBy=None):
"""Finds and retrieves spbNetworkRange resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve spbNetworkRange resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all spbNetworkRange resources from the server.
Args
----
- EnableAdvertiseNetworkRange (bool): If true, this SPB ISIS Network Range is advertised.
- EnableHostName (bool): If true, the host name of the router is activated.
- EntryColumn (number): The value is used in combination to specify which virtual router in the Network Range is connected to the current ISIS L2/L3 Router.
- EntryRow (number): The value is used in combination to specify which virtual router in the Network Range is connected to the current ISIS L2/L3 Router.
- HostNamePrefix (str): The host name prefix information.
- InterfaceMetric (number): The metric cost associated with this emulated SPB ISIS router.
- NoOfColumns (number): The value is used in combination to create a matrix (grid) for an emulated network range of the following size: The # Rows multiplied the # Cols = Number of routers in this Network Range. (For example, 3 Rows x 3 Columns = 9 Routers).
- NoOfRows (number): The value is used in combination to create a matrix (grid) for an emulated network range of the following size: The # Rows multiplied the # Cols = Number of routers in this Network Range. (For example, 3 Rows x 3 Columns = 9 Routers).
- StartSystemId (str): The System ID assigned to the starting SPB ISIS router in this network range. The default is 00 00 00 00 00 00.
- SystemIdIncrementBy (str): This is used when more than one router is to be emulated. The increment value is added to the previous System ID for each additional emulated router in this network range.
Returns
-------
- self: This instance with matching spbNetworkRange resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of spbNetworkRange data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the spbNetworkRange resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
| 52.080645
| 266
| 0.698235
|
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class SpbNetworkRange(Base):
__slots__ = ()
_SDM_NAME = 'spbNetworkRange'
_SDM_ATT_MAP = {
'EnableAdvertiseNetworkRange': 'enableAdvertiseNetworkRange',
'EnableHostName': 'enableHostName',
'EntryColumn': 'entryColumn',
'EntryRow': 'entryRow',
'HostNamePrefix': 'hostNamePrefix',
'InterfaceMetric': 'interfaceMetric',
'NoOfColumns': 'noOfColumns',
'NoOfRows': 'noOfRows',
'StartSystemId': 'startSystemId',
'SystemIdIncrementBy': 'systemIdIncrementBy',
}
def __init__(self, parent):
super(SpbNetworkRange, self).__init__(parent)
@property
def SpbOutsideLinks(self):
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.spboutsidelinks_dfb7b1e816409cddb14e138ebc2096dc import SpbOutsideLinks
if self._properties.get('SpbOutsideLinks', None) is None:
return SpbOutsideLinks(self)
else:
return self._properties.get('SpbOutsideLinks')
@property
def SpbmNodeTopologyRange(self):
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.spbmnodetopologyrange_199093afa11cd9f4488faaa1ad3ec3a7 import SpbmNodeTopologyRange
if self._properties.get('SpbmNodeTopologyRange', None) is None:
return SpbmNodeTopologyRange(self)
else:
return self._properties.get('SpbmNodeTopologyRange')
@property
def EnableAdvertiseNetworkRange(self):
return self._get_attribute(self._SDM_ATT_MAP['EnableAdvertiseNetworkRange'])
@EnableAdvertiseNetworkRange.setter
def EnableAdvertiseNetworkRange(self, value):
self._set_attribute(self._SDM_ATT_MAP['EnableAdvertiseNetworkRange'], value)
@property
def EnableHostName(self):
return self._get_attribute(self._SDM_ATT_MAP['EnableHostName'])
@EnableHostName.setter
def EnableHostName(self, value):
self._set_attribute(self._SDM_ATT_MAP['EnableHostName'], value)
@property
def EntryColumn(self):
return self._get_attribute(self._SDM_ATT_MAP['EntryColumn'])
@EntryColumn.setter
def EntryColumn(self, value):
self._set_attribute(self._SDM_ATT_MAP['EntryColumn'], value)
@property
def EntryRow(self):
return self._get_attribute(self._SDM_ATT_MAP['EntryRow'])
@EntryRow.setter
def EntryRow(self, value):
self._set_attribute(self._SDM_ATT_MAP['EntryRow'], value)
@property
def HostNamePrefix(self):
return self._get_attribute(self._SDM_ATT_MAP['HostNamePrefix'])
@HostNamePrefix.setter
def HostNamePrefix(self, value):
self._set_attribute(self._SDM_ATT_MAP['HostNamePrefix'], value)
@property
def InterfaceMetric(self):
return self._get_attribute(self._SDM_ATT_MAP['InterfaceMetric'])
@InterfaceMetric.setter
def InterfaceMetric(self, value):
self._set_attribute(self._SDM_ATT_MAP['InterfaceMetric'], value)
@property
def NoOfColumns(self):
return self._get_attribute(self._SDM_ATT_MAP['NoOfColumns'])
@NoOfColumns.setter
def NoOfColumns(self, value):
self._set_attribute(self._SDM_ATT_MAP['NoOfColumns'], value)
@property
def NoOfRows(self):
return self._get_attribute(self._SDM_ATT_MAP['NoOfRows'])
@NoOfRows.setter
def NoOfRows(self, value):
self._set_attribute(self._SDM_ATT_MAP['NoOfRows'], value)
@property
def StartSystemId(self):
return self._get_attribute(self._SDM_ATT_MAP['StartSystemId'])
@StartSystemId.setter
def StartSystemId(self, value):
self._set_attribute(self._SDM_ATT_MAP['StartSystemId'], value)
@property
def SystemIdIncrementBy(self):
return self._get_attribute(self._SDM_ATT_MAP['SystemIdIncrementBy'])
@SystemIdIncrementBy.setter
def SystemIdIncrementBy(self, value):
self._set_attribute(self._SDM_ATT_MAP['SystemIdIncrementBy'], value)
def update(self, EnableAdvertiseNetworkRange=None, EnableHostName=None, EntryColumn=None, EntryRow=None, HostNamePrefix=None, InterfaceMetric=None, NoOfColumns=None, NoOfRows=None, StartSystemId=None, SystemIdIncrementBy=None):
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, EnableAdvertiseNetworkRange=None, EnableHostName=None, EntryColumn=None, EntryRow=None, HostNamePrefix=None, InterfaceMetric=None, NoOfColumns=None, NoOfRows=None, StartSystemId=None, SystemIdIncrementBy=None):
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
self._delete()
def find(self, EnableAdvertiseNetworkRange=None, EnableHostName=None, EntryColumn=None, EntryRow=None, HostNamePrefix=None, InterfaceMetric=None, NoOfColumns=None, NoOfRows=None, StartSystemId=None, SystemIdIncrementBy=None):
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
return self._read(href)
| true
| true
|
79054e5faf9527e861973e04364d0957d65a1099
| 1,237
|
py
|
Python
|
g-code-testing/g_code_test_data/http/modules/magdeck.py
|
y3rsh/opentrons
|
b446567910db218030fef40396ab2255cc074bba
|
[
"Apache-2.0"
] | 235
|
2017-10-27T20:37:27.000Z
|
2022-03-30T14:09:49.000Z
|
g-code-testing/g_code_test_data/http/modules/magdeck.py
|
y3rsh/opentrons
|
b446567910db218030fef40396ab2255cc074bba
|
[
"Apache-2.0"
] | 8,425
|
2017-10-26T15:25:43.000Z
|
2022-03-31T23:54:26.000Z
|
g-code-testing/g_code_test_data/http/modules/magdeck.py
|
y3rsh/opentrons
|
b446567910db218030fef40396ab2255cc074bba
|
[
"Apache-2.0"
] | 130
|
2017-11-09T21:02:37.000Z
|
2022-03-15T18:01:24.000Z
|
from functools import partial
from g_code_test_data.http.http_settings import HTTP_SETTINGS
from g_code_test_data.g_code_configuration import HTTPGCodeConfirmConfig
from robot_server.service.legacy.routers.modules import post_serial_command
from robot_server.service.legacy.models.modules import SerialCommand
from opentrons.hardware_control.emulation.magdeck import SERIAL as SERIAL_NUM
MAGDECK_CALIBRATE = HTTPGCodeConfirmConfig(
name='magdeck_calibrate',
executable=partial(
post_serial_command,
command=SerialCommand(command_type='calibrate'),
serial=SERIAL_NUM,
),
settings=HTTP_SETTINGS,
)
MAGDECK_DEACTIVATE = HTTPGCodeConfirmConfig(
name='magdeck_deactivate',
executable=partial(
post_serial_command,
command=SerialCommand(command_type='deactivate'),
serial=SERIAL_NUM,
),
settings=HTTP_SETTINGS,
)
MAGDECK_ENGAGE = HTTPGCodeConfirmConfig(
name='magdeck_engage',
executable=partial(
post_serial_command,
command=SerialCommand(command_type='engage', args=[5.1]),
serial=SERIAL_NUM,
),
settings=HTTP_SETTINGS,
)
MAGDECK_CONFIGURATIONS = [
MAGDECK_CALIBRATE,
MAGDECK_DEACTIVATE,
MAGDECK_ENGAGE,
]
| 29.452381
| 77
| 0.760711
|
from functools import partial
from g_code_test_data.http.http_settings import HTTP_SETTINGS
from g_code_test_data.g_code_configuration import HTTPGCodeConfirmConfig
from robot_server.service.legacy.routers.modules import post_serial_command
from robot_server.service.legacy.models.modules import SerialCommand
from opentrons.hardware_control.emulation.magdeck import SERIAL as SERIAL_NUM
MAGDECK_CALIBRATE = HTTPGCodeConfirmConfig(
name='magdeck_calibrate',
executable=partial(
post_serial_command,
command=SerialCommand(command_type='calibrate'),
serial=SERIAL_NUM,
),
settings=HTTP_SETTINGS,
)
MAGDECK_DEACTIVATE = HTTPGCodeConfirmConfig(
name='magdeck_deactivate',
executable=partial(
post_serial_command,
command=SerialCommand(command_type='deactivate'),
serial=SERIAL_NUM,
),
settings=HTTP_SETTINGS,
)
MAGDECK_ENGAGE = HTTPGCodeConfirmConfig(
name='magdeck_engage',
executable=partial(
post_serial_command,
command=SerialCommand(command_type='engage', args=[5.1]),
serial=SERIAL_NUM,
),
settings=HTTP_SETTINGS,
)
MAGDECK_CONFIGURATIONS = [
MAGDECK_CALIBRATE,
MAGDECK_DEACTIVATE,
MAGDECK_ENGAGE,
]
| true
| true
|
79054ecc4d68fd8489ac680e07dd052948e513d5
| 3,740
|
py
|
Python
|
django/basic_auth/example1/decorators.py
|
tullyrankin/python-frameworks
|
d4bccf6c537c26bc421afadc09b5c83c3c5a5f35
|
[
"MIT"
] | 2
|
2016-08-15T07:05:40.000Z
|
2017-04-03T14:50:10.000Z
|
django/basic_auth/example1/decorators.py
|
tullyrankin/python-frameworks
|
d4bccf6c537c26bc421afadc09b5c83c3c5a5f35
|
[
"MIT"
] | null | null | null |
django/basic_auth/example1/decorators.py
|
tullyrankin/python-frameworks
|
d4bccf6c537c26bc421afadc09b5c83c3c5a5f35
|
[
"MIT"
] | null | null | null |
import base64
from django.http import HttpResponse
from django.contrib.auth import authenticate, login
# Reference: https://www.djangosnippets.org/snippets/243/
def view_or_basicauth(view, request, test_func, realm="", *args, **kwargs):
"""
This is a helper function used by both 'logged_in_or_basicauth' and
'has_perm_or_basicauth' that does the nitty of determining if they
are already logged in or if they have provided proper http-authorization
and returning the view if all goes well, otherwise responding with a 401.
"""
if test_func(request.user):
# Already logged in, just return the view.
return view(request, *args, **kwargs)
# They are not logged in. See if they provided login credentials
if 'HTTP_AUTHORIZATION' in request.META:
auth = request.META['HTTP_AUTHORIZATION'].split()
if len(auth) == 2:
# NOTE: We only support basic authentication for now.
if auth[0].lower() == "basic":
uname, passwd = base64.b64decode(auth[1]).split(':')
user = authenticate(username=uname, password=passwd)
if user is not None:
if user.is_active:
login(request, user)
request.user = user
return view(request, *args, **kwargs)
# Either they did not provide an authorization header or
# something in the authorization attempt failed. Send a 401
# back to them to ask them to authenticate.
response = HttpResponse()
response.status_code = 401
response['WWW-Authenticate'] = 'Basic realm="%s"' % realm
return response
def logged_in_or_basicauth(realm=""):
"""
A simple decorator that requires a user to be logged in. If they are not
logged in the request is examined for a 'authorization' header.
If the header is present it is tested for basic authentication and
the user is logged in with the provided credentials.
If the header is not present a http 401 is sent back to the
requestor to provide credentials.
The purpose of this is that in several django projects I have needed
several specific views that need to support basic authentication, yet the
web site as a whole used django's provided authentication.
The uses for this are for urls that are access programmatically such as
by rss feed readers, yet the view requires a user to be logged in. Many rss
readers support supplying the authentication credentials via http basic
auth (and they do NOT support a redirect to a form where they post a
username/password.)
Usage is simple:
@logged_in_or_basicauth()
def your_view:
...
You can provide the name of the realm to ask for authentication within.
"""
def view_decorator(func):
def wrapper(request, *args, **kwargs):
return view_or_basicauth(func, request,
lambda u: u.is_authenticated(),
realm, *args, **kwargs)
return wrapper
return view_decorator
def has_perm_or_basicauth(perm, realm=""):
"""
This is similar to the above decorator 'logged_in_or_basicauth'
except that it requires the logged in user to have a specific
permission.
Use:
@logged_in_or_basicauth('asforums.view_forumcollection')
def your_view:
...
"""
def view_decorator(func):
def wrapper(request, *args, **kwargs):
return view_or_basicauth(func, request,
lambda u: u.has_perm(perm),
realm, *args, **kwargs)
return wrapper
return view_decorator
| 37.4
| 79
| 0.649198
|
import base64
from django.http import HttpResponse
from django.contrib.auth import authenticate, login
def view_or_basicauth(view, request, test_func, realm="", *args, **kwargs):
if test_func(request.user):
return view(request, *args, **kwargs)
if 'HTTP_AUTHORIZATION' in request.META:
auth = request.META['HTTP_AUTHORIZATION'].split()
if len(auth) == 2:
if auth[0].lower() == "basic":
uname, passwd = base64.b64decode(auth[1]).split(':')
user = authenticate(username=uname, password=passwd)
if user is not None:
if user.is_active:
login(request, user)
request.user = user
return view(request, *args, **kwargs)
response = HttpResponse()
response.status_code = 401
response['WWW-Authenticate'] = 'Basic realm="%s"' % realm
return response
def logged_in_or_basicauth(realm=""):
def view_decorator(func):
def wrapper(request, *args, **kwargs):
return view_or_basicauth(func, request,
lambda u: u.is_authenticated(),
realm, *args, **kwargs)
return wrapper
return view_decorator
def has_perm_or_basicauth(perm, realm=""):
def view_decorator(func):
def wrapper(request, *args, **kwargs):
return view_or_basicauth(func, request,
lambda u: u.has_perm(perm),
realm, *args, **kwargs)
return wrapper
return view_decorator
| true
| true
|
79054f486c01712298f0bb79c509370968f8a559
| 4,124
|
py
|
Python
|
pydantic/validators.py
|
anentropic/pydantic
|
27887c6e997671ff0ea9d8f815e7628a40eb1134
|
[
"MIT"
] | null | null | null |
pydantic/validators.py
|
anentropic/pydantic
|
27887c6e997671ff0ea9d8f815e7628a40eb1134
|
[
"MIT"
] | null | null | null |
pydantic/validators.py
|
anentropic/pydantic
|
27887c6e997671ff0ea9d8f815e7628a40eb1134
|
[
"MIT"
] | null | null | null |
from collections import OrderedDict
from datetime import date, datetime, time, timedelta
from decimal import Decimal
from enum import Enum
from pathlib import Path
from typing import Any
from uuid import UUID
from .datetime_parse import parse_date, parse_datetime, parse_duration, parse_time
from .exceptions import ConfigError, type_display
NoneType = type(None)
def display_as_type(v):
return type_display(type(v))
def not_none_validator(v):
if v is None:
raise TypeError('None is not an allow value')
return v
def str_validator(v) -> str:
if isinstance(v, (str, NoneType)):
return v
elif isinstance(v, (bytes, bytearray)):
return v.decode()
elif isinstance(v, (float, int, Decimal)):
# is there anything else we want to add here? If you think so, create an issue.
return str(v)
else:
raise TypeError(f'str or byte type expected not {display_as_type(v)}')
def bytes_validator(v) -> bytes:
if isinstance(v, (bytes, NoneType)):
return v
return str_validator(v).encode()
BOOL_STRINGS = {
'1',
'TRUE',
'ON',
'YES',
}
def bool_validator(v) -> bool:
if isinstance(v, bool):
return v
if isinstance(v, bytes):
v = v.decode()
if isinstance(v, str):
return v.upper() in BOOL_STRINGS
return bool(v)
def number_size_validator(v, config, **kwargs):
if config.min_number_size <= v <= config.max_number_size:
return v
raise ValueError(f'size not in range {config.min_number_size} to {config.max_number_size}')
def anystr_length_validator(v, config, **kwargs):
if v is None or config.min_anystr_length <= len(v) <= config.max_anystr_length:
return v
raise ValueError(f'length {len(v)} not in range {config.min_anystr_length} to {config.max_anystr_length}')
def ordered_dict_validator(v) -> OrderedDict:
if isinstance(v, OrderedDict):
return v
return OrderedDict(v)
def dict_validator(v) -> dict:
if isinstance(v, dict):
return v
try:
return dict(v)
except TypeError as e:
raise TypeError(f'value is not a valid dict, got {display_as_type(v)}') from e
def list_validator(v) -> list:
if isinstance(v, list):
return v
return list(v)
def tuple_validator(v) -> tuple:
if isinstance(v, tuple):
return v
return tuple(v)
def set_validator(v) -> set:
if isinstance(v, set):
return v
return set(v)
def enum_validator(v, field, config, **kwargs) -> Enum:
enum_v = field.type_(v)
return enum_v.value if config.use_enum_values else enum_v
def uuid_validator(v) -> UUID:
if isinstance(v, UUID):
return v
elif isinstance(v, str):
return UUID(v)
elif isinstance(v, (bytes, bytearray)):
return UUID(v.decode())
else:
raise ValueError(f'str, byte or native UUID type expected not {type(v)}')
# order is important here, for example: bool is a subclass of int so has to come first, datetime before date same
_VALIDATORS = [
(Enum, [enum_validator]),
(str, [not_none_validator, str_validator, anystr_length_validator]),
(bytes, [not_none_validator, bytes_validator, anystr_length_validator]),
(bool, [bool_validator]),
(int, [int, number_size_validator]),
(float, [float, number_size_validator]),
(Path, [Path]),
(datetime, [parse_datetime]),
(date, [parse_date]),
(time, [parse_time]),
(timedelta, [parse_duration]),
(OrderedDict, [ordered_dict_validator]),
(dict, [dict_validator]),
(list, [list_validator]),
(tuple, [tuple_validator]),
(set, [set_validator]),
(UUID, [not_none_validator, uuid_validator]),
]
def find_validators(type_):
if type_ is Any:
return []
for val_type, validators in _VALIDATORS:
try:
if issubclass(type_, val_type):
return validators
except TypeError as e:
raise TypeError(f'error checking inheritance of {type_!r} (type: {display_as_type(type_)})') from e
raise ConfigError(f'no validator found for {type_}')
| 25.937107
| 113
| 0.660281
|
from collections import OrderedDict
from datetime import date, datetime, time, timedelta
from decimal import Decimal
from enum import Enum
from pathlib import Path
from typing import Any
from uuid import UUID
from .datetime_parse import parse_date, parse_datetime, parse_duration, parse_time
from .exceptions import ConfigError, type_display
NoneType = type(None)
def display_as_type(v):
return type_display(type(v))
def not_none_validator(v):
if v is None:
raise TypeError('None is not an allow value')
return v
def str_validator(v) -> str:
if isinstance(v, (str, NoneType)):
return v
elif isinstance(v, (bytes, bytearray)):
return v.decode()
elif isinstance(v, (float, int, Decimal)):
return str(v)
else:
raise TypeError(f'str or byte type expected not {display_as_type(v)}')
def bytes_validator(v) -> bytes:
if isinstance(v, (bytes, NoneType)):
return v
return str_validator(v).encode()
BOOL_STRINGS = {
'1',
'TRUE',
'ON',
'YES',
}
def bool_validator(v) -> bool:
if isinstance(v, bool):
return v
if isinstance(v, bytes):
v = v.decode()
if isinstance(v, str):
return v.upper() in BOOL_STRINGS
return bool(v)
def number_size_validator(v, config, **kwargs):
if config.min_number_size <= v <= config.max_number_size:
return v
raise ValueError(f'size not in range {config.min_number_size} to {config.max_number_size}')
def anystr_length_validator(v, config, **kwargs):
if v is None or config.min_anystr_length <= len(v) <= config.max_anystr_length:
return v
raise ValueError(f'length {len(v)} not in range {config.min_anystr_length} to {config.max_anystr_length}')
def ordered_dict_validator(v) -> OrderedDict:
if isinstance(v, OrderedDict):
return v
return OrderedDict(v)
def dict_validator(v) -> dict:
if isinstance(v, dict):
return v
try:
return dict(v)
except TypeError as e:
raise TypeError(f'value is not a valid dict, got {display_as_type(v)}') from e
def list_validator(v) -> list:
if isinstance(v, list):
return v
return list(v)
def tuple_validator(v) -> tuple:
if isinstance(v, tuple):
return v
return tuple(v)
def set_validator(v) -> set:
if isinstance(v, set):
return v
return set(v)
def enum_validator(v, field, config, **kwargs) -> Enum:
enum_v = field.type_(v)
return enum_v.value if config.use_enum_values else enum_v
def uuid_validator(v) -> UUID:
if isinstance(v, UUID):
return v
elif isinstance(v, str):
return UUID(v)
elif isinstance(v, (bytes, bytearray)):
return UUID(v.decode())
else:
raise ValueError(f'str, byte or native UUID type expected not {type(v)}')
_VALIDATORS = [
(Enum, [enum_validator]),
(str, [not_none_validator, str_validator, anystr_length_validator]),
(bytes, [not_none_validator, bytes_validator, anystr_length_validator]),
(bool, [bool_validator]),
(int, [int, number_size_validator]),
(float, [float, number_size_validator]),
(Path, [Path]),
(datetime, [parse_datetime]),
(date, [parse_date]),
(time, [parse_time]),
(timedelta, [parse_duration]),
(OrderedDict, [ordered_dict_validator]),
(dict, [dict_validator]),
(list, [list_validator]),
(tuple, [tuple_validator]),
(set, [set_validator]),
(UUID, [not_none_validator, uuid_validator]),
]
def find_validators(type_):
if type_ is Any:
return []
for val_type, validators in _VALIDATORS:
try:
if issubclass(type_, val_type):
return validators
except TypeError as e:
raise TypeError(f'error checking inheritance of {type_!r} (type: {display_as_type(type_)})') from e
raise ConfigError(f'no validator found for {type_}')
| true
| true
|
79055052ba0de8c87f991974cca41c422c24016a
| 8,572
|
py
|
Python
|
ItemList.py
|
mzxrules/MM-Randomizer
|
56260563e3737cbff8a2bbb98ff8bcb161f3440e
|
[
"MIT"
] | 1
|
2018-10-06T16:13:07.000Z
|
2018-10-06T16:13:07.000Z
|
ItemList.py
|
mzxrules/MM-Randomizer
|
56260563e3737cbff8a2bbb98ff8bcb161f3440e
|
[
"MIT"
] | null | null | null |
ItemList.py
|
mzxrules/MM-Randomizer
|
56260563e3737cbff8a2bbb98ff8bcb161f3440e
|
[
"MIT"
] | null | null | null |
from collections import namedtuple
import logging
import random
from Items import ItemFactory
#This file sets the item pools for various modes. Timed modes and triforce hunt are enforced first, and then extra items are specified per mode to fill in the remaining space.
#Some basic items that various modes require are placed here, including pendants and crystals. Medallion requirements for the two relevant entrances are also decided.
alwaysitems = (['Kokiri Sword', 'Gilded Sword', 'Great Fairy Sword', 'Hylian Shield', 'Mirror Shield'] +
['Deku Mask', 'Goron Mask', 'Zora Mask', 'Fierce Deity Mask'] +
['Postmans Hat', 'Blast Mask', 'Great Fairy Mask', 'All Night Mask', 'Stone Mask'] +
['Keaton Mask', 'Bremen Mask', 'Bunny Hood', 'Don Geros Mask', 'Mask of Scents'] +
['Romani Mask', 'Circus Leader Mask', 'Couple Mask', 'Mask of Truth'] +
['Kamaros Mask', 'Garo Mask', 'Captains Hat', 'Gibdo Mask', 'Giant Mask'] +
['Bow', 'Large Quiver', 'Largest Quiver'] + ['Fire Arrows', 'Ice Arrows', 'Light Arrows'] +
['Powder Keg', 'Pictograph Box', 'Lens of Truth', 'Hookshot'] +
['Bomb Bag', 'Big Bomb Bag', ] + ['Bottle'] * 2 + ['Bottle with Gold Dust'] +
['Bottle with Red Potion'] + ['Bottle with Milk'] + ['Bottle with Chateau Romani'] +
['Piece of Heart'] * 52 + ['Heart Container'] * 4 + ['Adult Wallet', 'Giant Wallet'])
notmapcompass = ['Ice Trap'] * 8
rewardlist = ['Odolwa\'s Remains', 'Goht\'s Remains', 'Gyorg\'s Remains', 'Twinmold\'s Remains']
songlist = ['Song of Time', 'Song of Healing', 'Song of Soaring', 'Eponas Song','Song of Storms', 'Sonata of Awakening', 'Goron Lullaby', 'New Wave Bossa Nova', 'Elegy of Emptiness', 'Oath to Order']
# TODO: this could need to be aligned with the location_table
stray_fairy_locations = (['WF-SF1', 'WF-SF2', 'WF-SF3', 'WF-SF4', 'WF-SF5', 'WF-SF6', 'WF-SF7', 'WF-SF8', 'WF-SF9', 'WF-SF10', 'WF-SF11', 'WF-SF12', 'WF-SF13', 'WF-SF14', 'WF-SF15'] +
['SH-SF1', 'SH-SF2', 'SH-SF3', 'SH-SF4', 'SH-SF5', 'SH-SF6', 'SH-SF7', 'SH-SF8', 'SH-SF9', 'SH-SF10', 'SH-SF11', 'SH-SF12', 'SH-SF13', 'SH-SF14', 'SH-SF15'] +
['GB-SF1', 'GB-SF2', 'GB-SF3', 'GB-SF4', 'GB-SF5', 'GB-SF6', 'GB-SF7', 'GB-SF8', 'GB-SF9', 'GB-SF10', 'GB-SF11', 'GB-SF12', 'GB-SF13', 'GB-SF14', 'GB-SF15'] +
['ST-SF1', 'ST-SF2', 'ST-SF3', 'ST-SF4', 'ST-SF5', 'ST-SF6', 'ST-SF7', 'ST-SF8', 'ST-SF9', 'ST-SF10', 'ST-SF11', 'ST-SF12', 'ST-SF13', 'ST-SF14', 'ST-SF15'])
tradeitems = (['Moon Tear', 'Town Title Deed', 'Swamp Title Deed', 'Mountain Title Deed', 'Ocean Title Deed'])
WF_vanilla = (['Recovery Heart'] * 2)
SH_vanilla = (['Recovery Heart'] * 2)
GB_vanilla = (['Recovery Heart'] * 2)
ST_vanilla = (['Recovery Heart'] * 2)
PF_vanilla = (['Recovery Heart'] * 2)
normal_bottles = [
'Bottle',
'Bottle with Milk',
'Bottle with Red Potion',
'Bottle with Green Potion',
'Bottle with Blue Potion',
'Bottle with Fairy',
'Bottle with Fish',
'Bottle with Bugs',
'Bottle with Poe',
'Bottle with Big Poe']
normal_bottle_count = 6
normal_rupees = (
['Rupees (5)'] * 13
+ ['Rupees (20)'] * 5
+ ['Rupees (50)'] * 7
+ ['Rupees (200)'] * 3)
shopsanity_rupees = (
['Rupees (5)'] * 2
+ ['Rupees (20)'] * 10
+ ['Rupees (50)'] * 10
+ ['Rupees (200)'] * 5
+ ['Progressive Wallet'])
vanilla_shop_items = {
'Trading Post Item 1': 'Buy Hylian Shield',
# TODO: Fill out the rest
}
titledeeds = {
'Sad Moon Crater': 'Moon\'s Tear',
# TODO: fill out the rest
}
npc_items = {
# TODO: List all locations which give items by NPC, and set them to give that specific item
}
eventlocations = {
'Majora': 'Majora\'s Mask'
}
junk_pool = (
8 * ['Bombs (5)'] +
2 * ['Bombs (10)'] +
8 * ['Arrows (5)'] +
2 * ['Arrows (10)'] +
5 * ['Deku Stick (1)'] +
5 * ['Deku Nuts (5)'] +
10 * ['Rupees (5)'] +
4 * ['Rupees (20)'] +
20 * ['Ice Trap'])
def get_junk_item(count=1):
ret_junk = []
for _ in range(count):
ret_junk.append(random.choice(junk_pool))
return ret_junk
def generate_itempool(world):
# set up item pool
(pool, placed_items) = get_pool_core(world)
world.itempool = ItemFactory(pool, world)
for (location, item) in placed_items.items():
world.push_item(location, ItemFactory(item, world))
world.get_location(location).event = True
fill_bosses(world)
world.initialize_items()
'''
This is where we decide what items to place and how
'''
def get_pool_core(world):
pool = []
placed_items = {}
'''
# Used to place an item randomly into the pool
pool.append('Kokiri Sword')
# Used to place a specific item in a specific location
placed_items['Kokiri Sword Chest'] = 'Kokiri Sword'
# Adds x items to the pool which are not progression items
pool.extend(get_junk_item(37))
# locations_with_items is a list of key value pairs where
# the key is the location name for an item
# the value is the item being placed at that location
placed_items.update(locations_with_items)
# tells the logic that you start out with the given item
world.state.collect(item)
'''
pool.extend(songlist)
if world.shuffle_mapcompass == 'remove':
for item in [item for dungeon in world.dungeons for item in dungeon.dungeon_items]:
world.state.collect(item)
pool.extend(get_junk_item())
if world.shuffle_smallkeys == 'remove':
for item in [item for dungeon in world.dungeons for item in dungeon.small_keys]:
world.state.collect(item)
pool.extend(get_junk_item())
if world.shuffle_bosskeys == 'remove':
for item in [item for dungeon in world.dungeons for item in dungeon.boss_key]:
world.state.collect(item)
pool.extend(get_junk_item())
return (pool, placed_items)
def fill_songs(world, attempts=15):
songs = ItemFactory(songlist)
song_locations = [world.get_location('Song from Skull Kid'), world.get_location('Song from HMS'), world.get_location('Song from Owl Tablet'), world.get_location('Song from Romani'), world.get_location('Song at Grave'), world.get_location('Song from Monkey'), world.get_location('Song from Baby Goron'), world.get_location('Song from Goron Elder'), world.get_location('Song from Zora Eggs'), world.get_location('Song from Igos'), world.get_location('Song from the Giants')]
placed_prizes = [loc.item.name for loc in song_locations if loc.item is not None]
unplaced_prizes = [song for song in songs if song.name not in placed_prizes]
empty_song_locations = [loc for loc in song_locations if loc.item is None]
while attempts:
attempts -= 1
try:
prizepool = list(unplaced_prizes)
prize_locs = list(empty_song_locations)
random.shuffle(prizepool)
random.shuffle(prize_locs)
fill_restrictive(world, world.get_all_state(keys=True), prize_locs, prizepool) #TODO: Set keys to true once keys are properly implemented
except FillError:
logging.getLogger('').info("Failed to place songs. Will retry %s more times", attempts)
for location in empty_song_locations:
location.item = None
continue
break
else:
raise FillError('Unable to place songs')
def fill_bosses(world, bossCount=4):
boss_rewards = ItemFactory(rewardlist)
boss_locations = [world.get_location('Odolwa'), world.get_location('Goht'), world.get_location('Gyorg'), world.get_location('Twinmold')]
placed_prizes = [loc.item.name for loc in boss_locations if loc.item is not None]
unplaced_prizes = [item for item in boss_rewards if item.name not in placed_prizes]
empty_boss_locations = [loc for loc in boss_locations if loc.item is None]
prizepool = list(unplaced_prizes)
prize_locs = list(empty_boss_locations)
while bossCount:
bossCount -= 1
random.shuffle(prizepool)
random.shuffle(prize_locs)
item = prizepool.pop()
loc = prize_locs.pop()
world.push_item(loc, item, False)
world.get_location(loc).event = True
| 45.354497
| 477
| 0.615726
|
from collections import namedtuple
import logging
import random
from Items import ItemFactory
alwaysitems = (['Kokiri Sword', 'Gilded Sword', 'Great Fairy Sword', 'Hylian Shield', 'Mirror Shield'] +
['Deku Mask', 'Goron Mask', 'Zora Mask', 'Fierce Deity Mask'] +
['Postmans Hat', 'Blast Mask', 'Great Fairy Mask', 'All Night Mask', 'Stone Mask'] +
['Keaton Mask', 'Bremen Mask', 'Bunny Hood', 'Don Geros Mask', 'Mask of Scents'] +
['Romani Mask', 'Circus Leader Mask', 'Couple Mask', 'Mask of Truth'] +
['Kamaros Mask', 'Garo Mask', 'Captains Hat', 'Gibdo Mask', 'Giant Mask'] +
['Bow', 'Large Quiver', 'Largest Quiver'] + ['Fire Arrows', 'Ice Arrows', 'Light Arrows'] +
['Powder Keg', 'Pictograph Box', 'Lens of Truth', 'Hookshot'] +
['Bomb Bag', 'Big Bomb Bag', ] + ['Bottle'] * 2 + ['Bottle with Gold Dust'] +
['Bottle with Red Potion'] + ['Bottle with Milk'] + ['Bottle with Chateau Romani'] +
['Piece of Heart'] * 52 + ['Heart Container'] * 4 + ['Adult Wallet', 'Giant Wallet'])
notmapcompass = ['Ice Trap'] * 8
rewardlist = ['Odolwa\'s Remains', 'Goht\'s Remains', 'Gyorg\'s Remains', 'Twinmold\'s Remains']
songlist = ['Song of Time', 'Song of Healing', 'Song of Soaring', 'Eponas Song','Song of Storms', 'Sonata of Awakening', 'Goron Lullaby', 'New Wave Bossa Nova', 'Elegy of Emptiness', 'Oath to Order']
stray_fairy_locations = (['WF-SF1', 'WF-SF2', 'WF-SF3', 'WF-SF4', 'WF-SF5', 'WF-SF6', 'WF-SF7', 'WF-SF8', 'WF-SF9', 'WF-SF10', 'WF-SF11', 'WF-SF12', 'WF-SF13', 'WF-SF14', 'WF-SF15'] +
['SH-SF1', 'SH-SF2', 'SH-SF3', 'SH-SF4', 'SH-SF5', 'SH-SF6', 'SH-SF7', 'SH-SF8', 'SH-SF9', 'SH-SF10', 'SH-SF11', 'SH-SF12', 'SH-SF13', 'SH-SF14', 'SH-SF15'] +
['GB-SF1', 'GB-SF2', 'GB-SF3', 'GB-SF4', 'GB-SF5', 'GB-SF6', 'GB-SF7', 'GB-SF8', 'GB-SF9', 'GB-SF10', 'GB-SF11', 'GB-SF12', 'GB-SF13', 'GB-SF14', 'GB-SF15'] +
['ST-SF1', 'ST-SF2', 'ST-SF3', 'ST-SF4', 'ST-SF5', 'ST-SF6', 'ST-SF7', 'ST-SF8', 'ST-SF9', 'ST-SF10', 'ST-SF11', 'ST-SF12', 'ST-SF13', 'ST-SF14', 'ST-SF15'])
tradeitems = (['Moon Tear', 'Town Title Deed', 'Swamp Title Deed', 'Mountain Title Deed', 'Ocean Title Deed'])
WF_vanilla = (['Recovery Heart'] * 2)
SH_vanilla = (['Recovery Heart'] * 2)
GB_vanilla = (['Recovery Heart'] * 2)
ST_vanilla = (['Recovery Heart'] * 2)
PF_vanilla = (['Recovery Heart'] * 2)
normal_bottles = [
'Bottle',
'Bottle with Milk',
'Bottle with Red Potion',
'Bottle with Green Potion',
'Bottle with Blue Potion',
'Bottle with Fairy',
'Bottle with Fish',
'Bottle with Bugs',
'Bottle with Poe',
'Bottle with Big Poe']
normal_bottle_count = 6
normal_rupees = (
['Rupees (5)'] * 13
+ ['Rupees (20)'] * 5
+ ['Rupees (50)'] * 7
+ ['Rupees (200)'] * 3)
shopsanity_rupees = (
['Rupees (5)'] * 2
+ ['Rupees (20)'] * 10
+ ['Rupees (50)'] * 10
+ ['Rupees (200)'] * 5
+ ['Progressive Wallet'])
vanilla_shop_items = {
'Trading Post Item 1': 'Buy Hylian Shield',
}
titledeeds = {
'Sad Moon Crater': 'Moon\'s Tear',
# TODO: fill out the rest
}
npc_items = {
# TODO: List all locations which give items by NPC, and set them to give that specific item
}
eventlocations = {
'Majora': 'Majora\'s Mask'
}
junk_pool = (
8 * ['Bombs (5)'] +
2 * ['Bombs (10)'] +
8 * ['Arrows (5)'] +
2 * ['Arrows (10)'] +
5 * ['Deku Stick (1)'] +
5 * ['Deku Nuts (5)'] +
10 * ['Rupees (5)'] +
4 * ['Rupees (20)'] +
20 * ['Ice Trap'])
def get_junk_item(count=1):
ret_junk = []
for _ in range(count):
ret_junk.append(random.choice(junk_pool))
return ret_junk
def generate_itempool(world):
(pool, placed_items) = get_pool_core(world)
world.itempool = ItemFactory(pool, world)
for (location, item) in placed_items.items():
world.push_item(location, ItemFactory(item, world))
world.get_location(location).event = True
fill_bosses(world)
world.initialize_items()
def get_pool_core(world):
pool = []
placed_items = {}
pool.extend(songlist)
if world.shuffle_mapcompass == 'remove':
for item in [item for dungeon in world.dungeons for item in dungeon.dungeon_items]:
world.state.collect(item)
pool.extend(get_junk_item())
if world.shuffle_smallkeys == 'remove':
for item in [item for dungeon in world.dungeons for item in dungeon.small_keys]:
world.state.collect(item)
pool.extend(get_junk_item())
if world.shuffle_bosskeys == 'remove':
for item in [item for dungeon in world.dungeons for item in dungeon.boss_key]:
world.state.collect(item)
pool.extend(get_junk_item())
return (pool, placed_items)
def fill_songs(world, attempts=15):
songs = ItemFactory(songlist)
song_locations = [world.get_location('Song from Skull Kid'), world.get_location('Song from HMS'), world.get_location('Song from Owl Tablet'), world.get_location('Song from Romani'), world.get_location('Song at Grave'), world.get_location('Song from Monkey'), world.get_location('Song from Baby Goron'), world.get_location('Song from Goron Elder'), world.get_location('Song from Zora Eggs'), world.get_location('Song from Igos'), world.get_location('Song from the Giants')]
placed_prizes = [loc.item.name for loc in song_locations if loc.item is not None]
unplaced_prizes = [song for song in songs if song.name not in placed_prizes]
empty_song_locations = [loc for loc in song_locations if loc.item is None]
while attempts:
attempts -= 1
try:
prizepool = list(unplaced_prizes)
prize_locs = list(empty_song_locations)
random.shuffle(prizepool)
random.shuffle(prize_locs)
fill_restrictive(world, world.get_all_state(keys=True), prize_locs, prizepool)
except FillError:
logging.getLogger('').info("Failed to place songs. Will retry %s more times", attempts)
for location in empty_song_locations:
location.item = None
continue
break
else:
raise FillError('Unable to place songs')
def fill_bosses(world, bossCount=4):
boss_rewards = ItemFactory(rewardlist)
boss_locations = [world.get_location('Odolwa'), world.get_location('Goht'), world.get_location('Gyorg'), world.get_location('Twinmold')]
placed_prizes = [loc.item.name for loc in boss_locations if loc.item is not None]
unplaced_prizes = [item for item in boss_rewards if item.name not in placed_prizes]
empty_boss_locations = [loc for loc in boss_locations if loc.item is None]
prizepool = list(unplaced_prizes)
prize_locs = list(empty_boss_locations)
while bossCount:
bossCount -= 1
random.shuffle(prizepool)
random.shuffle(prize_locs)
item = prizepool.pop()
loc = prize_locs.pop()
world.push_item(loc, item, False)
world.get_location(loc).event = True
| true
| true
|
790550ef453b5f646ef2d87dc1f1cbab439ff425
| 21,321
|
py
|
Python
|
pipeline/configs/grb-citeseer/config.py
|
sigeisler/grb
|
c89e21076dc05d1edb87dfe2eff20c29ba6bd0c1
|
[
"MIT"
] | null | null | null |
pipeline/configs/grb-citeseer/config.py
|
sigeisler/grb
|
c89e21076dc05d1edb87dfe2eff20c29ba6bd0c1
|
[
"MIT"
] | null | null | null |
pipeline/configs/grb-citeseer/config.py
|
sigeisler/grb
|
c89e21076dc05d1edb87dfe2eff20c29ba6bd0c1
|
[
"MIT"
] | null | null | null |
"""Configuration for reproducing leaderboard of grb-citeseer dataset."""
import torch
import torch.nn.functional as F
from grb.evaluator import metric
model_list = ["gcn",
"gcn_ln",
"gcn_at",
"graphsage",
"graphsage_ln",
"graphsage_at",
"sgcn",
"sgcn_ln",
"sgcn_at",
"robustgcn",
"robustgcn_at",
"tagcn",
"tagcn_ln",
"tagcn_at",
"appnp",
"appnp_ln",
"appnp_at",
"gin",
"gin_ln",
"gin_at",
"gat",
"gat_ln",
"gat_at",
"gcnguard",
"gatguard",
"gcnsvd"]
model_list_basic = ["gcn",
"graphsage",
"sgcn",
"tagcn",
"appnp",
"gin",
"gat"]
modification_attack_list = ["dice",
"rand",
"flip",
"fga",
"nea",
"pgd",
"prbcd",
"stack"]
injection_attack_list = ["rand",
"fgsm",
"pgd",
"speit",
"tdgia"]
model_sur_list = ["gcn"]
def build_model(model_name, num_features, num_classes):
"""Hyper-parameters are determined by auto training, refer to grb.utils.trainer.AutoTrainer."""
if model_name in ["gcn", "gcn_ln", "gcn_at", "gcn_ln_at"]:
from grb.model.torch import GCN
model = GCN(in_features=num_features,
out_features=num_classes,
hidden_features=128,
n_layers=3,
layer_norm=True if "ln" in model_name else False,
dropout=0.7)
train_params = {
"lr" : 0.001,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
if model_name in ["graphsage", "graphsage_ln", "graphsage_at", "graphsage_ln_at"]:
from grb.model.torch import GraphSAGE
model = GraphSAGE(in_features=num_features,
out_features=num_classes,
hidden_features=256,
n_layers=5,
layer_norm=True if "ln" in model_name else False,
dropout=0.5)
train_params = {
"lr" : 0.0001,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
if model_name in ["sgcn", "sgcn_ln", "sgcn_at", "sgcn_ln_at"]:
from grb.model.torch import SGCN
model = SGCN(in_features=num_features,
out_features=num_classes,
hidden_features=256,
n_layers=4,
k=4,
layer_norm=True if "ln" in model_name else False,
dropout=0.5)
train_params = {
"lr" : 0.01,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
if model_name in ["tagcn", "tagcn_ln", "tagcn_at", "tagcn_ln_at"]:
from grb.model.torch import TAGCN
model = TAGCN(in_features=num_features,
out_features=num_classes,
hidden_features=256,
n_layers=3,
k=2,
layer_norm=True if "ln" in model_name else False,
dropout=0.5)
train_params = {
"lr" : 0.005,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
if model_name in ["appnp", "appnp_ln", "appnp_at", "appnp_ln_at"]:
from grb.model.torch import APPNP
model = APPNP(in_features=num_features,
out_features=num_classes,
hidden_features=128,
n_layers=3,
k=3,
layer_norm=True if "ln" in model_name else False,
dropout=0.5)
train_params = {
"lr" : 0.001,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
if model_name in ["gin", "gin_ln", "gin_at", "gin_ln_at"]:
from grb.model.torch import GIN
model = GIN(in_features=num_features,
out_features=num_classes,
hidden_features=256,
n_layers=2,
layer_norm=True if "ln" in model_name else False,
dropout=0.6)
train_params = {
"lr" : 0.0001,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
if model_name in ["gat", "gat_ln", "gat_at", "gat_ln_at"]:
from grb.model.dgl import GAT
model = GAT(in_features=num_features,
out_features=num_classes,
hidden_features=64,
n_layers=3,
n_heads=6,
layer_norm=True if "ln" in model_name else False,
dropout=0.6)
train_params = {
"lr" : 0.005,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
if model_name in ["robustgcn", "robustgcn_at"]:
from grb.defense import RobustGCN
model = RobustGCN(in_features=num_features,
out_features=num_classes,
hidden_features=128,
n_layers=3,
dropout=0.5)
train_params = {
"lr" : 0.001,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
if model_name in ["gcnsvd", "gcnsvd_ln"]:
from grb.defense.gcnsvd import GCNSVD
model = GCNSVD(in_features=num_features,
out_features=num_classes,
hidden_features=128,
n_layers=3,
dropout=0.5)
train_params = {
"lr" : 0.001,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
if model_name in ["gcnguard"]:
from grb.defense import GCNGuard
model = GCNGuard(in_features=num_features,
out_features=num_classes,
hidden_features=128,
n_layers=3,
dropout=0.5)
train_params = {
"lr" : 0.001,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
if model_name in ["gatguard"]:
from grb.defense import GATGuard
model = GATGuard(in_features=num_features,
out_features=num_classes,
hidden_features=64,
n_heads=6,
n_layers=3,
dropout=0.5)
train_params = {
"lr" : 0.001,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
def build_optimizer(model, lr):
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
return optimizer
def build_loss():
return F.nll_loss
def build_metric():
return metric.eval_acc
def build_attack(attack_name, device="cpu", args=None, mode="modification"):
if mode == "modification":
if attack_name == "dice":
from grb.attack.modification import DICE
attack = DICE(n_edge_mod=args.n_edge_mod,
ratio_delete=0.6,
device=device)
return attack
if attack_name == "fga":
from grb.attack.modification import FGA
attack = FGA(n_edge_mod=args.n_edge_mod,
device=device)
return attack
if attack_name == "flip":
from grb.attack.modification import FLIP
attack = FLIP(n_edge_mod=args.n_edge_mod,
flip_type=args.flip_type,
mode="descend",
device=device)
return attack
if attack_name == "rand":
from grb.attack.modification import RAND
attack = RAND(n_edge_mod=args.n_edge_mod,
device=device)
return attack
if attack_name == "nea":
from grb.attack.modification import NEA
attack = NEA(n_edge_mod=args.n_edge_mod,
device=device)
return attack
if attack_name == "stack":
from grb.attack.modification import STACK
attack = STACK(n_edge_mod=args.n_edge_mod,
device=device)
return attack
if attack_name == "pgd":
from grb.attack.modification import PGD
attack = PGD(epsilon=args.epsilon,
n_epoch=args.attack_epoch,
n_node_mod=args.n_node_mod,
n_edge_mod=args.n_edge_mod,
feat_lim_min=args.feat_lim_min,
feat_lim_max=args.feat_lim_max,
early_stop=args.early_stop,
device=device)
return attack
if attack_name == "prbcd":
from grb.attack.modification import PRBCD
attack = PRBCD(epsilon=args.epsilon,
n_epoch=args.attack_epoch,
n_node_mod=args.n_node_mod,
n_edge_mod=args.n_edge_mod,
feat_lim_min=args.feat_lim_min,
feat_lim_max=args.feat_lim_max,
early_stop=args.early_stop,
device=device)
return attack
elif mode == "injection":
if attack_name == "rand":
from grb.attack.injection import RAND
attack = RAND(n_inject_max=args.n_inject_max,
n_edge_max=args.n_edge_max,
feat_lim_min=args.feat_lim_min,
feat_lim_max=args.feat_lim_max,
device=device)
return attack
elif attack_name == "fgsm":
from grb.attack.injection import FGSM
attack = FGSM(epsilon=args.lr,
n_epoch=args.n_epoch,
n_inject_max=args.n_inject_max,
n_edge_max=args.n_edge_max,
feat_lim_min=args.feat_lim_min,
feat_lim_max=args.feat_lim_max,
early_stop=args.early_stop,
device=device)
return attack
elif attack_name == "pgd":
from grb.attack.injection import PGD
attack = PGD(epsilon=args.lr,
n_epoch=args.n_epoch,
n_inject_max=args.n_inject_max,
n_edge_max=args.n_edge_max,
feat_lim_min=args.feat_lim_min,
feat_lim_max=args.feat_lim_max,
early_stop=args.early_stop,
device=device)
return attack
elif attack_name == "speit":
from grb.attack.injection import SPEIT
attack = SPEIT(lr=args.lr,
n_epoch=args.n_epoch,
n_inject_max=args.n_inject_max,
n_edge_max=args.n_edge_max,
feat_lim_min=args.feat_lim_min,
feat_lim_max=args.feat_lim_max,
early_stop=args.early_stop,
device=device)
return attack
elif attack_name == "tdgia":
from grb.attack.injection import TDGIA
attack = TDGIA(lr=args.lr,
n_epoch=args.n_epoch,
n_inject_max=args.n_inject_max,
n_edge_max=args.n_edge_max,
feat_lim_min=args.feat_lim_min,
feat_lim_max=args.feat_lim_max,
early_stop=args.early_stop,
inject_mode='random',
sequential_step=1.0,
device=device)
return attack
elif attack_name == "tdgia_random":
from grb.attack.injection.tdgia import TDGIA
attack = TDGIA(lr=args.lr,
n_epoch=args.n_epoch,
n_inject_max=args.n_inject_max,
n_edge_max=args.n_edge_max,
feat_lim_min=args.feat_lim_min,
feat_lim_max=args.feat_lim_max,
early_stop=args.early_stop,
inject_mode='random',
device=device)
return attack
elif attack_name == "tdgia_uniform":
from grb.attack.injection import TDGIA
attack = TDGIA(lr=args.lr,
n_epoch=args.n_epoch,
n_inject_max=args.n_inject_max,
n_edge_max=args.n_edge_max,
feat_lim_min=args.feat_lim_min,
feat_lim_max=args.feat_lim_max,
early_stop=args.early_stop,
inject_mode='uniform',
sequential_step=1.0,
device=device)
return attack
else:
raise NotImplementedError
def build_model_autotrain(model_name):
if model_name == "gcn":
from grb.model.torch import GCN
def params_search(trial):
model_params = {
"hidden_features": trial.suggest_categorical("hidden_features", [32, 64, 128, 256]),
"n_layers" : trial.suggest_categorical("n_layers", [2, 3, 4, 5]),
"dropout" : trial.suggest_categorical("dropout", [0.5, 0.6, 0.7, 0.8]),
}
other_params = {
"lr" : trial.suggest_categorical("lr", [1e-2, 1e-3, 5e-3, 1e-4]),
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
}
return model_params, other_params
return GCN, params_search
if model_name == "graphsage":
from grb.model.torch import GraphSAGE
def params_search(trial):
model_params = {
"hidden_features": trial.suggest_categorical("hidden_features", [32, 64, 128, 256]),
"n_layers" : trial.suggest_categorical("n_layers", [2, 3, 4, 5]),
"dropout" : trial.suggest_categorical("dropout", [0.5, 0.6, 0.7, 0.8]),
}
other_params = {
"lr" : trial.suggest_categorical("lr", [1e-2, 1e-3, 5e-3, 1e-4]),
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
}
return model_params, other_params
return GraphSAGE, params_search
if model_name == "sgcn":
from grb.model.torch import SGCN
def params_search(trial):
model_params = {
"hidden_features": trial.suggest_categorical("hidden_features", [32, 64, 128, 256]),
"n_layers" : trial.suggest_categorical("n_layers", [2, 3, 4, 5]),
"dropout" : trial.suggest_categorical("dropout", [0.5, 0.6, 0.7, 0.8]),
}
other_params = {
"lr" : trial.suggest_categorical("lr", [1e-2, 1e-3, 5e-3, 1e-4]),
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
}
return model_params, other_params
return SGCN, params_search
if model_name == "tagcn":
from grb.model.torch import TAGCN
def params_search(trial):
model_params = {
"hidden_features": trial.suggest_categorical("hidden_features", [32, 64, 128, 256]),
"n_layers" : trial.suggest_categorical("n_layers", [2, 3, 4, 5]),
"k" : trial.suggest_categorical("k", [2, 3, 4, 5]),
"dropout" : trial.suggest_categorical("dropout", [0.5, 0.6, 0.7, 0.8]),
}
other_params = {
"lr" : trial.suggest_categorical("lr", [1e-2, 1e-3, 5e-3, 1e-4]),
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
}
return model_params, other_params
return TAGCN, params_search
if model_name == "appnp":
from grb.model.torch import APPNP
def params_search(trial):
model_params = {
"hidden_features": trial.suggest_categorical("hidden_features", [32, 64, 128, 256]),
"n_layers" : trial.suggest_categorical("n_layers", [2, 3, 4, 5]),
"k" : trial.suggest_categorical("k", [2, 3, 4, 5]),
"dropout" : trial.suggest_categorical("dropout", [0.5, 0.6, 0.7, 0.8]),
}
other_params = {
"lr" : trial.suggest_categorical("lr", [1e-2, 1e-3, 5e-3, 1e-4]),
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
}
return model_params, other_params
return APPNP, params_search
if model_name == "gin":
from grb.model.torch import GIN
def params_search(trial):
model_params = {
"hidden_features": trial.suggest_categorical("hidden_features", [32, 64, 128, 256]),
"n_layers" : trial.suggest_categorical("n_layers", [2, 3, 4, 5]),
"dropout" : trial.suggest_categorical("dropout", [0.5, 0.6, 0.7, 0.8]),
}
other_params = {
"lr" : trial.suggest_categorical("lr", [1e-2, 1e-3, 5e-3, 1e-4]),
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
}
return model_params, other_params
return GIN, params_search
if model_name == "gat":
from grb.model.dgl import GAT
def params_search(trial):
model_params = {
"hidden_features": trial.suggest_categorical("hidden_features", [32, 64, 128, 256]),
"n_layers" : trial.suggest_categorical("n_layers", [2, 3, 4, 5]),
"n_heads" : trial.suggest_categorical("n_heads", [2, 4, 6, 8]),
"dropout" : trial.suggest_categorical("dropout", [0.5, 0.6, 0.7, 0.8]),
}
other_params = {
"lr" : trial.suggest_categorical("lr", [1e-2, 1e-3, 5e-3, 1e-4]),
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
}
return model_params, other_params
return GAT, params_search
| 39.193015
| 100
| 0.463862
|
import torch
import torch.nn.functional as F
from grb.evaluator import metric
model_list = ["gcn",
"gcn_ln",
"gcn_at",
"graphsage",
"graphsage_ln",
"graphsage_at",
"sgcn",
"sgcn_ln",
"sgcn_at",
"robustgcn",
"robustgcn_at",
"tagcn",
"tagcn_ln",
"tagcn_at",
"appnp",
"appnp_ln",
"appnp_at",
"gin",
"gin_ln",
"gin_at",
"gat",
"gat_ln",
"gat_at",
"gcnguard",
"gatguard",
"gcnsvd"]
model_list_basic = ["gcn",
"graphsage",
"sgcn",
"tagcn",
"appnp",
"gin",
"gat"]
modification_attack_list = ["dice",
"rand",
"flip",
"fga",
"nea",
"pgd",
"prbcd",
"stack"]
injection_attack_list = ["rand",
"fgsm",
"pgd",
"speit",
"tdgia"]
model_sur_list = ["gcn"]
def build_model(model_name, num_features, num_classes):
if model_name in ["gcn", "gcn_ln", "gcn_at", "gcn_ln_at"]:
from grb.model.torch import GCN
model = GCN(in_features=num_features,
out_features=num_classes,
hidden_features=128,
n_layers=3,
layer_norm=True if "ln" in model_name else False,
dropout=0.7)
train_params = {
"lr" : 0.001,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
if model_name in ["graphsage", "graphsage_ln", "graphsage_at", "graphsage_ln_at"]:
from grb.model.torch import GraphSAGE
model = GraphSAGE(in_features=num_features,
out_features=num_classes,
hidden_features=256,
n_layers=5,
layer_norm=True if "ln" in model_name else False,
dropout=0.5)
train_params = {
"lr" : 0.0001,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
if model_name in ["sgcn", "sgcn_ln", "sgcn_at", "sgcn_ln_at"]:
from grb.model.torch import SGCN
model = SGCN(in_features=num_features,
out_features=num_classes,
hidden_features=256,
n_layers=4,
k=4,
layer_norm=True if "ln" in model_name else False,
dropout=0.5)
train_params = {
"lr" : 0.01,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
if model_name in ["tagcn", "tagcn_ln", "tagcn_at", "tagcn_ln_at"]:
from grb.model.torch import TAGCN
model = TAGCN(in_features=num_features,
out_features=num_classes,
hidden_features=256,
n_layers=3,
k=2,
layer_norm=True if "ln" in model_name else False,
dropout=0.5)
train_params = {
"lr" : 0.005,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
if model_name in ["appnp", "appnp_ln", "appnp_at", "appnp_ln_at"]:
from grb.model.torch import APPNP
model = APPNP(in_features=num_features,
out_features=num_classes,
hidden_features=128,
n_layers=3,
k=3,
layer_norm=True if "ln" in model_name else False,
dropout=0.5)
train_params = {
"lr" : 0.001,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
if model_name in ["gin", "gin_ln", "gin_at", "gin_ln_at"]:
from grb.model.torch import GIN
model = GIN(in_features=num_features,
out_features=num_classes,
hidden_features=256,
n_layers=2,
layer_norm=True if "ln" in model_name else False,
dropout=0.6)
train_params = {
"lr" : 0.0001,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
if model_name in ["gat", "gat_ln", "gat_at", "gat_ln_at"]:
from grb.model.dgl import GAT
model = GAT(in_features=num_features,
out_features=num_classes,
hidden_features=64,
n_layers=3,
n_heads=6,
layer_norm=True if "ln" in model_name else False,
dropout=0.6)
train_params = {
"lr" : 0.005,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
if model_name in ["robustgcn", "robustgcn_at"]:
from grb.defense import RobustGCN
model = RobustGCN(in_features=num_features,
out_features=num_classes,
hidden_features=128,
n_layers=3,
dropout=0.5)
train_params = {
"lr" : 0.001,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
if model_name in ["gcnsvd", "gcnsvd_ln"]:
from grb.defense.gcnsvd import GCNSVD
model = GCNSVD(in_features=num_features,
out_features=num_classes,
hidden_features=128,
n_layers=3,
dropout=0.5)
train_params = {
"lr" : 0.001,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
if model_name in ["gcnguard"]:
from grb.defense import GCNGuard
model = GCNGuard(in_features=num_features,
out_features=num_classes,
hidden_features=128,
n_layers=3,
dropout=0.5)
train_params = {
"lr" : 0.001,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
if model_name in ["gatguard"]:
from grb.defense import GATGuard
model = GATGuard(in_features=num_features,
out_features=num_classes,
hidden_features=64,
n_heads=6,
n_layers=3,
dropout=0.5)
train_params = {
"lr" : 0.001,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
def build_optimizer(model, lr):
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
return optimizer
def build_loss():
return F.nll_loss
def build_metric():
return metric.eval_acc
def build_attack(attack_name, device="cpu", args=None, mode="modification"):
if mode == "modification":
if attack_name == "dice":
from grb.attack.modification import DICE
attack = DICE(n_edge_mod=args.n_edge_mod,
ratio_delete=0.6,
device=device)
return attack
if attack_name == "fga":
from grb.attack.modification import FGA
attack = FGA(n_edge_mod=args.n_edge_mod,
device=device)
return attack
if attack_name == "flip":
from grb.attack.modification import FLIP
attack = FLIP(n_edge_mod=args.n_edge_mod,
flip_type=args.flip_type,
mode="descend",
device=device)
return attack
if attack_name == "rand":
from grb.attack.modification import RAND
attack = RAND(n_edge_mod=args.n_edge_mod,
device=device)
return attack
if attack_name == "nea":
from grb.attack.modification import NEA
attack = NEA(n_edge_mod=args.n_edge_mod,
device=device)
return attack
if attack_name == "stack":
from grb.attack.modification import STACK
attack = STACK(n_edge_mod=args.n_edge_mod,
device=device)
return attack
if attack_name == "pgd":
from grb.attack.modification import PGD
attack = PGD(epsilon=args.epsilon,
n_epoch=args.attack_epoch,
n_node_mod=args.n_node_mod,
n_edge_mod=args.n_edge_mod,
feat_lim_min=args.feat_lim_min,
feat_lim_max=args.feat_lim_max,
early_stop=args.early_stop,
device=device)
return attack
if attack_name == "prbcd":
from grb.attack.modification import PRBCD
attack = PRBCD(epsilon=args.epsilon,
n_epoch=args.attack_epoch,
n_node_mod=args.n_node_mod,
n_edge_mod=args.n_edge_mod,
feat_lim_min=args.feat_lim_min,
feat_lim_max=args.feat_lim_max,
early_stop=args.early_stop,
device=device)
return attack
elif mode == "injection":
if attack_name == "rand":
from grb.attack.injection import RAND
attack = RAND(n_inject_max=args.n_inject_max,
n_edge_max=args.n_edge_max,
feat_lim_min=args.feat_lim_min,
feat_lim_max=args.feat_lim_max,
device=device)
return attack
elif attack_name == "fgsm":
from grb.attack.injection import FGSM
attack = FGSM(epsilon=args.lr,
n_epoch=args.n_epoch,
n_inject_max=args.n_inject_max,
n_edge_max=args.n_edge_max,
feat_lim_min=args.feat_lim_min,
feat_lim_max=args.feat_lim_max,
early_stop=args.early_stop,
device=device)
return attack
elif attack_name == "pgd":
from grb.attack.injection import PGD
attack = PGD(epsilon=args.lr,
n_epoch=args.n_epoch,
n_inject_max=args.n_inject_max,
n_edge_max=args.n_edge_max,
feat_lim_min=args.feat_lim_min,
feat_lim_max=args.feat_lim_max,
early_stop=args.early_stop,
device=device)
return attack
elif attack_name == "speit":
from grb.attack.injection import SPEIT
attack = SPEIT(lr=args.lr,
n_epoch=args.n_epoch,
n_inject_max=args.n_inject_max,
n_edge_max=args.n_edge_max,
feat_lim_min=args.feat_lim_min,
feat_lim_max=args.feat_lim_max,
early_stop=args.early_stop,
device=device)
return attack
elif attack_name == "tdgia":
from grb.attack.injection import TDGIA
attack = TDGIA(lr=args.lr,
n_epoch=args.n_epoch,
n_inject_max=args.n_inject_max,
n_edge_max=args.n_edge_max,
feat_lim_min=args.feat_lim_min,
feat_lim_max=args.feat_lim_max,
early_stop=args.early_stop,
inject_mode='random',
sequential_step=1.0,
device=device)
return attack
elif attack_name == "tdgia_random":
from grb.attack.injection.tdgia import TDGIA
attack = TDGIA(lr=args.lr,
n_epoch=args.n_epoch,
n_inject_max=args.n_inject_max,
n_edge_max=args.n_edge_max,
feat_lim_min=args.feat_lim_min,
feat_lim_max=args.feat_lim_max,
early_stop=args.early_stop,
inject_mode='random',
device=device)
return attack
elif attack_name == "tdgia_uniform":
from grb.attack.injection import TDGIA
attack = TDGIA(lr=args.lr,
n_epoch=args.n_epoch,
n_inject_max=args.n_inject_max,
n_edge_max=args.n_edge_max,
feat_lim_min=args.feat_lim_min,
feat_lim_max=args.feat_lim_max,
early_stop=args.early_stop,
inject_mode='uniform',
sequential_step=1.0,
device=device)
return attack
else:
raise NotImplementedError
def build_model_autotrain(model_name):
if model_name == "gcn":
from grb.model.torch import GCN
def params_search(trial):
model_params = {
"hidden_features": trial.suggest_categorical("hidden_features", [32, 64, 128, 256]),
"n_layers" : trial.suggest_categorical("n_layers", [2, 3, 4, 5]),
"dropout" : trial.suggest_categorical("dropout", [0.5, 0.6, 0.7, 0.8]),
}
other_params = {
"lr" : trial.suggest_categorical("lr", [1e-2, 1e-3, 5e-3, 1e-4]),
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
}
return model_params, other_params
return GCN, params_search
if model_name == "graphsage":
from grb.model.torch import GraphSAGE
def params_search(trial):
model_params = {
"hidden_features": trial.suggest_categorical("hidden_features", [32, 64, 128, 256]),
"n_layers" : trial.suggest_categorical("n_layers", [2, 3, 4, 5]),
"dropout" : trial.suggest_categorical("dropout", [0.5, 0.6, 0.7, 0.8]),
}
other_params = {
"lr" : trial.suggest_categorical("lr", [1e-2, 1e-3, 5e-3, 1e-4]),
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
}
return model_params, other_params
return GraphSAGE, params_search
if model_name == "sgcn":
from grb.model.torch import SGCN
def params_search(trial):
model_params = {
"hidden_features": trial.suggest_categorical("hidden_features", [32, 64, 128, 256]),
"n_layers" : trial.suggest_categorical("n_layers", [2, 3, 4, 5]),
"dropout" : trial.suggest_categorical("dropout", [0.5, 0.6, 0.7, 0.8]),
}
other_params = {
"lr" : trial.suggest_categorical("lr", [1e-2, 1e-3, 5e-3, 1e-4]),
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
}
return model_params, other_params
return SGCN, params_search
if model_name == "tagcn":
from grb.model.torch import TAGCN
def params_search(trial):
model_params = {
"hidden_features": trial.suggest_categorical("hidden_features", [32, 64, 128, 256]),
"n_layers" : trial.suggest_categorical("n_layers", [2, 3, 4, 5]),
"k" : trial.suggest_categorical("k", [2, 3, 4, 5]),
"dropout" : trial.suggest_categorical("dropout", [0.5, 0.6, 0.7, 0.8]),
}
other_params = {
"lr" : trial.suggest_categorical("lr", [1e-2, 1e-3, 5e-3, 1e-4]),
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
}
return model_params, other_params
return TAGCN, params_search
if model_name == "appnp":
from grb.model.torch import APPNP
def params_search(trial):
model_params = {
"hidden_features": trial.suggest_categorical("hidden_features", [32, 64, 128, 256]),
"n_layers" : trial.suggest_categorical("n_layers", [2, 3, 4, 5]),
"k" : trial.suggest_categorical("k", [2, 3, 4, 5]),
"dropout" : trial.suggest_categorical("dropout", [0.5, 0.6, 0.7, 0.8]),
}
other_params = {
"lr" : trial.suggest_categorical("lr", [1e-2, 1e-3, 5e-3, 1e-4]),
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
}
return model_params, other_params
return APPNP, params_search
if model_name == "gin":
from grb.model.torch import GIN
def params_search(trial):
model_params = {
"hidden_features": trial.suggest_categorical("hidden_features", [32, 64, 128, 256]),
"n_layers" : trial.suggest_categorical("n_layers", [2, 3, 4, 5]),
"dropout" : trial.suggest_categorical("dropout", [0.5, 0.6, 0.7, 0.8]),
}
other_params = {
"lr" : trial.suggest_categorical("lr", [1e-2, 1e-3, 5e-3, 1e-4]),
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
}
return model_params, other_params
return GIN, params_search
if model_name == "gat":
from grb.model.dgl import GAT
def params_search(trial):
model_params = {
"hidden_features": trial.suggest_categorical("hidden_features", [32, 64, 128, 256]),
"n_layers" : trial.suggest_categorical("n_layers", [2, 3, 4, 5]),
"n_heads" : trial.suggest_categorical("n_heads", [2, 4, 6, 8]),
"dropout" : trial.suggest_categorical("dropout", [0.5, 0.6, 0.7, 0.8]),
}
other_params = {
"lr" : trial.suggest_categorical("lr", [1e-2, 1e-3, 5e-3, 1e-4]),
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
}
return model_params, other_params
return GAT, params_search
| true
| true
|
7905549ff257c755f3b337a529446a0563faa51d
| 4,278
|
py
|
Python
|
netpy/earl/__init__.py
|
stronklab/netpy
|
0d22a6ce43d66c3355457e681b83f338ae806e0f
|
[
"MIT"
] | null | null | null |
netpy/earl/__init__.py
|
stronklab/netpy
|
0d22a6ce43d66c3355457e681b83f338ae806e0f
|
[
"MIT"
] | null | null | null |
netpy/earl/__init__.py
|
stronklab/netpy
|
0d22a6ce43d66c3355457e681b83f338ae806e0f
|
[
"MIT"
] | null | null | null |
from math import sqrt
import networkx as nx
import matplotlib.pyplot as plt
import pygraphviz
from networkx.drawing.nx_agraph import graphviz_layout
class Distribution:
from random import random
from random import gauss
from numpy.random import poisson
_h = [0]
h = property(lambda s: s._h[0])
drop_rate = 0
move_rate = 0
move_int = 600
tx_rate = 0
em_rate = 0
aw_rate = lambda s, n: 0
@classmethod
def aloha(cls, k, n):
r = cls.random()
return r
@classmethod
def tx_chn(cls, a, g):
return 0
@classmethod
def tx_awt(cls, a, g):
global awt
fold = sum(p.timeout for p in a.buffer)
return fold + cls.aw_rate(len(b.children))
@classmethod
def emit(cls, k):
return cls.poisson(cls.em_rate*k)
@classmethod
def tx(cls, a, b, g):
return cls.tx_awt(a, b, g) + cls.tx_chn(a, b, g)
@classmethod
def mv(cls):
if cls.random() < cls.move_rate:
return cls.random()*cls.move_int
@classmethod
def drop(cls):
return cls.random() < cls.drop_rate
class Abonent(Distribution):
drop_rate = 1e-8
move_rate = 0
aw_rate = 1.0/1e9
em_rate = property(lambda s: s.h/100.0)
class MobileAbonent(Abonent):
move_rate = 0.5
class Operator(Distribution):
drop_rate = 1e-8
move_rate = 0
aw_rate = 1.0/1e10
em_rate = 0
class Server(Distribution):
drop_rate = 1e-8
move_rate = 0
aw_rate = 1.0/5e9
em_rate = property(lambda s: s.h/100.0)
class WiFi(Distribution):
mu, sigma = 2e-6, 1e-6
drop_rate = 0.005
tx_rate = 0.1
aw_rate = lambda s, n: s.aloha(s.mu, n)
class Fiber(Distribution):
mu, sigma = 2e-8, 1e-8
drop_rate = 1e-12
tx_rate = 10
aw_rate = lambda s, n: s.aloha(s.mu, n)
class Ethernet(Distribution):
mu = 2e-7
drop_rate = 1e-10
tx_rate = property(lambda s: 6 - s.random()*5)
aw_rate = lambda s, n: s.aloha(s.mu, 2)
class LTE(Distribution):
mu, sigma = 2e-7, 1e-7
drop_rate = 1e-10
tx_rate = property(lambda s: 6 - s.random()*5)
aw_rate = lambda s, n: s.gauss(s.mu*n, s.sigma*sqrt(n))
class Node:
def __init__(self, id, g):
self.id = id
self.g = g
def __getattr__(self, key):
return self.g.node[self.id][key]
@property
def buffer(self):
return filter(lambda p: p.curr == self, map(lambda e: e.obj, self.g.events))
class Graph(nx.DiGraph):
c = root = 12007
def iterate(self, r, n, d, node, channel):
for _ in xrange(0, n):
self.c += 1
self.add_node(self.c, deep=d, distr=node)
self.add_edge(r, self.c, distr=channel)
self.add_edge(self.c, r, distr=Ethernet)
yield self.c
def paths(self, a, b):
return self.all_shortest_paths(a.id, b.id)
def __init__(self, deep=5, icount=3, operators=10):
nx.DiGraph.__init__(self)
q = [self.root + i for i in xrange(0, operators)]
self.c += operators - 1
self.deep = deep
for r in q:
self.add_node(r, distr=Operator, deep=0)
if operators > 1:
for u, v in zip(q[1:], q[:-1]):
self.add_edge(u, v, distr=Fiber)
for deep in xrange(1, deep+1):
q, last = [], q
for r in last:
for v in self.iterate(r, icount + 1 if deep == self.deep else icount, deep, Operator, Ethernet):
q.append(v)
@property
def operators(self):
return filter(lambda x: self.node[x]["deep"] != self.deep, self.nodes())
@property
def leaves(self):
return filter(lambda x: self.node[x]["deep"] == self.deep, self.nodes())
def show(self):
print len(self.nodes())
pos = graphviz_layout(self, prog="sfdp", args="")
plt.rcParams["axes.facecolor"] = "black"
nx.draw_networkx_nodes(self, pos, nodelist=self.operators, node_color="gray", node_size=10)
nx.draw_networkx_nodes(self, pos, nodelist=self.leaves, node_color="red", node_size=10)
nx.draw_networkx_edges(self, pos, edge_color="white", arrows=False)
plt.show()
if __name__ == "__main__":
Graph().show()
| 24.169492
| 112
| 0.586489
|
from math import sqrt
import networkx as nx
import matplotlib.pyplot as plt
import pygraphviz
from networkx.drawing.nx_agraph import graphviz_layout
class Distribution:
from random import random
from random import gauss
from numpy.random import poisson
_h = [0]
h = property(lambda s: s._h[0])
drop_rate = 0
move_rate = 0
move_int = 600
tx_rate = 0
em_rate = 0
aw_rate = lambda s, n: 0
@classmethod
def aloha(cls, k, n):
r = cls.random()
return r
@classmethod
def tx_chn(cls, a, g):
return 0
@classmethod
def tx_awt(cls, a, g):
global awt
fold = sum(p.timeout for p in a.buffer)
return fold + cls.aw_rate(len(b.children))
@classmethod
def emit(cls, k):
return cls.poisson(cls.em_rate*k)
@classmethod
def tx(cls, a, b, g):
return cls.tx_awt(a, b, g) + cls.tx_chn(a, b, g)
@classmethod
def mv(cls):
if cls.random() < cls.move_rate:
return cls.random()*cls.move_int
@classmethod
def drop(cls):
return cls.random() < cls.drop_rate
class Abonent(Distribution):
drop_rate = 1e-8
move_rate = 0
aw_rate = 1.0/1e9
em_rate = property(lambda s: s.h/100.0)
class MobileAbonent(Abonent):
move_rate = 0.5
class Operator(Distribution):
drop_rate = 1e-8
move_rate = 0
aw_rate = 1.0/1e10
em_rate = 0
class Server(Distribution):
drop_rate = 1e-8
move_rate = 0
aw_rate = 1.0/5e9
em_rate = property(lambda s: s.h/100.0)
class WiFi(Distribution):
mu, sigma = 2e-6, 1e-6
drop_rate = 0.005
tx_rate = 0.1
aw_rate = lambda s, n: s.aloha(s.mu, n)
class Fiber(Distribution):
mu, sigma = 2e-8, 1e-8
drop_rate = 1e-12
tx_rate = 10
aw_rate = lambda s, n: s.aloha(s.mu, n)
class Ethernet(Distribution):
mu = 2e-7
drop_rate = 1e-10
tx_rate = property(lambda s: 6 - s.random()*5)
aw_rate = lambda s, n: s.aloha(s.mu, 2)
class LTE(Distribution):
mu, sigma = 2e-7, 1e-7
drop_rate = 1e-10
tx_rate = property(lambda s: 6 - s.random()*5)
aw_rate = lambda s, n: s.gauss(s.mu*n, s.sigma*sqrt(n))
class Node:
def __init__(self, id, g):
self.id = id
self.g = g
def __getattr__(self, key):
return self.g.node[self.id][key]
@property
def buffer(self):
return filter(lambda p: p.curr == self, map(lambda e: e.obj, self.g.events))
class Graph(nx.DiGraph):
c = root = 12007
def iterate(self, r, n, d, node, channel):
for _ in xrange(0, n):
self.c += 1
self.add_node(self.c, deep=d, distr=node)
self.add_edge(r, self.c, distr=channel)
self.add_edge(self.c, r, distr=Ethernet)
yield self.c
def paths(self, a, b):
return self.all_shortest_paths(a.id, b.id)
def __init__(self, deep=5, icount=3, operators=10):
nx.DiGraph.__init__(self)
q = [self.root + i for i in xrange(0, operators)]
self.c += operators - 1
self.deep = deep
for r in q:
self.add_node(r, distr=Operator, deep=0)
if operators > 1:
for u, v in zip(q[1:], q[:-1]):
self.add_edge(u, v, distr=Fiber)
for deep in xrange(1, deep+1):
q, last = [], q
for r in last:
for v in self.iterate(r, icount + 1 if deep == self.deep else icount, deep, Operator, Ethernet):
q.append(v)
@property
def operators(self):
return filter(lambda x: self.node[x]["deep"] != self.deep, self.nodes())
@property
def leaves(self):
return filter(lambda x: self.node[x]["deep"] == self.deep, self.nodes())
def show(self):
print len(self.nodes())
pos = graphviz_layout(self, prog="sfdp", args="")
plt.rcParams["axes.facecolor"] = "black"
nx.draw_networkx_nodes(self, pos, nodelist=self.operators, node_color="gray", node_size=10)
nx.draw_networkx_nodes(self, pos, nodelist=self.leaves, node_color="red", node_size=10)
nx.draw_networkx_edges(self, pos, edge_color="white", arrows=False)
plt.show()
if __name__ == "__main__":
Graph().show()
| false
| true
|
790555c66bd4daf274748bedfb9610ca07d0dad9
| 3,576
|
py
|
Python
|
ctf/2020/nullcon/msg/solve.py
|
kamithanthanh/hacmao.github.io
|
87b06df827cc65f737831301bae1d5f3a2d014ff
|
[
"MIT"
] | 1
|
2019-09-27T13:23:00.000Z
|
2019-09-27T13:23:00.000Z
|
ctf/2020/nullcon/msg/solve.py
|
kamithanthanh/hacmao.github.io
|
87b06df827cc65f737831301bae1d5f3a2d014ff
|
[
"MIT"
] | null | null | null |
ctf/2020/nullcon/msg/solve.py
|
kamithanthanh/hacmao.github.io
|
87b06df827cc65f737831301bae1d5f3a2d014ff
|
[
"MIT"
] | 1
|
2019-08-25T09:17:07.000Z
|
2019-08-25T09:17:07.000Z
|
#!/usr/bin/env python3
from Crypto.PublicKey import RSA, ECC
import json
from hashlib import sha256
from Crypto.Cipher import AES, PKCS1_OAEP
from base64 import b64decode
from Crypto.Signature import DSS
from Crypto.Hash import SHA256
import socket
from base64 import *
from server import *
# key = RSA.importKey(open("rsapubkey.pem", "r").read() )
# key = ECC.generate(curve='P-256')
# f = open("fakekey.pem", 'w')
# f.write(key.export_key(format='PEM'))
message = json.loads('{"aeskey": "nwmHkXTN/EjnoO5IzhpNwE3nXEUMHsNWFI7dcHnpxIIiXCO+dLCjR6TfqYfbL9Z6a7SNCKbeTFBLnipXcRoN6o56urZMWwCioVTsV7PHrlCU42cKX+c/ShcVFrA5aOTTjaO9rxTMxB1PxJqYyxlpNaUpRFslzj9LKH+g8hVEuP9lVMm7q4aniyOUgPrAxyn044mbuxPu6Kh+JHSt5dkmnPZGNfUDKCwvMKeilb5ZkLaW/EaoXXsJLh/wUinMROIqmD2dkiWnk10633sJIu1lEOUsiykYXtJcd3o/B2dfTx2/85C2J6IsIp3+jJne76AYryAONPSxuh+M0h1xCzNeQg==", "message": "6VCnnSOU1DBImyhlqt7SoEjRtmBxjmABFVmXYhlKDyc+NBlnZ3Hpj4EkLwydPGpHiAvr4R0zTXSyUnMk5N6fi0/BFZE=", "nonce": "Cems9uHF6mk=", "signature": "uhLCnBvGfdC1fVkGUKQ8zNp/fOXNnFxNuDEc7CDGEYSxnuZMoGqbEqMLguJqDdvHFSHoUrq2R9/+mfk8LHndhw==", "eccpubkey": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEGww+NA3xHj4kCyztekLhmJVB62Hhq/oGDWwo4fxgZCgbODqD3vrMFFTGCWfO8ZyHtstuW+Yztpq94CnSNpJoug=="}')
def fake_signature(msg) :
eccpubkey = ECC.import_key(msg["eccpubkey"])
h = SHA256.new(msg["aeskey"] + msg["nonce"] + msg["message"])
sign = DSS.new(eccpubkey, 'fips-186-3')
msg['signature'] = sign.sign(h)
return msg
HOST = 'crypto1.ctf.nullcon.net' # The server's hostname or IP address
PORT = 5001 # The port used by the server
def sendMsg(msg) :
msg = fake_signature(msg)
msg["nonce"] = b64encode(msg["nonce"]).decode()
msg["message"] = b64encode(msg["message"]).decode()
msg["aeskey"] = b64encode(msg["aeskey"]).decode()
msg["signature"] = b64encode(msg["signature"]).decode()
msg["eccpubkey"] = b64encode(msg["eccpubkey"]).decode()
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((HOST, PORT))
s.recv(1024)
s.sendall(json.dumps(msg).encode() + b"\n")
recpt = s.recv(1024).split(b'\n')
assert recpt[0] == b'Here is your read receipt:'
return recpt[1]
"""
Recovery xor key
"""
def xor(a, b) :
return bytes([ai ^ bi for (ai, bi) in zip(a,b)])
ciphertext = b64decode(message['message'])
print(ciphertext)
flag = b"hackim20{digital_singatures_does_not_always_imp"
fake_message = xor(flag, ciphertext[:len(flag)])
import progressbar
from string import ascii_lowercase , digits
printable = ascii_lowercase + "{}_" + digits
for _ in range(len(flag), len(ciphertext)) :
print(_)
H = SHA256.new(bytes(len(fake_message) + 1)).hexdigest().encode()
brute = list(map(lambda x : ord(x) ^ ciphertext[_], printable))
for i in progressbar.ProgressBar(widgets=[progressbar.Counter(), ' ', progressbar.Percentage(), ' ', progressbar.Bar(), ' ', progressbar.ETA()])(brute) :
message["nonce"] = b64decode(message["nonce"])
message["aeskey"] = b64decode(message["aeskey"])
message["signature"] = b64decode(message["signature"])
message['eccpubkey'] = open("fakekey.pem","r").read().encode()
new_fake_message = fake_message + bytes([i])
message['message'] = new_fake_message
recpt = sendMsg(message)
if recpt == H :
fake_message += bytes([i])
flag = xor(fake_message, ciphertext[:_+1])
print(flag)
break
| 44.7
| 761
| 0.683166
|
from Crypto.PublicKey import RSA, ECC
import json
from hashlib import sha256
from Crypto.Cipher import AES, PKCS1_OAEP
from base64 import b64decode
from Crypto.Signature import DSS
from Crypto.Hash import SHA256
import socket
from base64 import *
from server import *
message = json.loads('{"aeskey": "nwmHkXTN/EjnoO5IzhpNwE3nXEUMHsNWFI7dcHnpxIIiXCO+dLCjR6TfqYfbL9Z6a7SNCKbeTFBLnipXcRoN6o56urZMWwCioVTsV7PHrlCU42cKX+c/ShcVFrA5aOTTjaO9rxTMxB1PxJqYyxlpNaUpRFslzj9LKH+g8hVEuP9lVMm7q4aniyOUgPrAxyn044mbuxPu6Kh+JHSt5dkmnPZGNfUDKCwvMKeilb5ZkLaW/EaoXXsJLh/wUinMROIqmD2dkiWnk10633sJIu1lEOUsiykYXtJcd3o/B2dfTx2/85C2J6IsIp3+jJne76AYryAONPSxuh+M0h1xCzNeQg==", "message": "6VCnnSOU1DBImyhlqt7SoEjRtmBxjmABFVmXYhlKDyc+NBlnZ3Hpj4EkLwydPGpHiAvr4R0zTXSyUnMk5N6fi0/BFZE=", "nonce": "Cems9uHF6mk=", "signature": "uhLCnBvGfdC1fVkGUKQ8zNp/fOXNnFxNuDEc7CDGEYSxnuZMoGqbEqMLguJqDdvHFSHoUrq2R9/+mfk8LHndhw==", "eccpubkey": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEGww+NA3xHj4kCyztekLhmJVB62Hhq/oGDWwo4fxgZCgbODqD3vrMFFTGCWfO8ZyHtstuW+Yztpq94CnSNpJoug=="}')
def fake_signature(msg) :
eccpubkey = ECC.import_key(msg["eccpubkey"])
h = SHA256.new(msg["aeskey"] + msg["nonce"] + msg["message"])
sign = DSS.new(eccpubkey, 'fips-186-3')
msg['signature'] = sign.sign(h)
return msg
HOST = 'crypto1.ctf.nullcon.net'
PORT = 5001 # The port used by the server
def sendMsg(msg) :
msg = fake_signature(msg)
msg["nonce"] = b64encode(msg["nonce"]).decode()
msg["message"] = b64encode(msg["message"]).decode()
msg["aeskey"] = b64encode(msg["aeskey"]).decode()
msg["signature"] = b64encode(msg["signature"]).decode()
msg["eccpubkey"] = b64encode(msg["eccpubkey"]).decode()
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((HOST, PORT))
s.recv(1024)
s.sendall(json.dumps(msg).encode() + b"\n")
recpt = s.recv(1024).split(b'\n')
assert recpt[0] == b'Here is your read receipt:'
return recpt[1]
def xor(a, b) :
return bytes([ai ^ bi for (ai, bi) in zip(a,b)])
ciphertext = b64decode(message['message'])
print(ciphertext)
flag = b"hackim20{digital_singatures_does_not_always_imp"
fake_message = xor(flag, ciphertext[:len(flag)])
import progressbar
from string import ascii_lowercase , digits
printable = ascii_lowercase + "{}_" + digits
for _ in range(len(flag), len(ciphertext)) :
print(_)
H = SHA256.new(bytes(len(fake_message) + 1)).hexdigest().encode()
brute = list(map(lambda x : ord(x) ^ ciphertext[_], printable))
for i in progressbar.ProgressBar(widgets=[progressbar.Counter(), ' ', progressbar.Percentage(), ' ', progressbar.Bar(), ' ', progressbar.ETA()])(brute) :
message["nonce"] = b64decode(message["nonce"])
message["aeskey"] = b64decode(message["aeskey"])
message["signature"] = b64decode(message["signature"])
message['eccpubkey'] = open("fakekey.pem","r").read().encode()
new_fake_message = fake_message + bytes([i])
message['message'] = new_fake_message
recpt = sendMsg(message)
if recpt == H :
fake_message += bytes([i])
flag = xor(fake_message, ciphertext[:_+1])
print(flag)
break
| true
| true
|
7905575a4a1a8ef4bbc139a7e7eb9cb22e8d7758
| 18,632
|
py
|
Python
|
pandas/tests/groupby/aggregate/test_other.py
|
ajspera/pandas
|
f38020f33052ea9029b410d7fae79bc8f249c0ac
|
[
"BSD-3-Clause"
] | 5
|
2019-07-26T15:22:41.000Z
|
2021-09-28T09:22:17.000Z
|
pandas/tests/groupby/aggregate/test_other.py
|
ajspera/pandas
|
f38020f33052ea9029b410d7fae79bc8f249c0ac
|
[
"BSD-3-Clause"
] | null | null | null |
pandas/tests/groupby/aggregate/test_other.py
|
ajspera/pandas
|
f38020f33052ea9029b410d7fae79bc8f249c0ac
|
[
"BSD-3-Clause"
] | 3
|
2019-07-26T10:47:23.000Z
|
2020-08-10T12:40:32.000Z
|
"""
test all other .agg behavior
"""
from collections import OrderedDict
import datetime as dt
from functools import partial
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
PeriodIndex,
Series,
date_range,
period_range,
)
from pandas.core.groupby.groupby import SpecificationError
import pandas.util.testing as tm
from pandas.io.formats.printing import pprint_thing
def test_agg_api():
# GH 6337
# http://stackoverflow.com/questions/21706030/pandas-groupby-agg-function-column-dtype-error
# different api for agg when passed custom function with mixed frame
df = DataFrame(
{
"data1": np.random.randn(5),
"data2": np.random.randn(5),
"key1": ["a", "a", "b", "b", "a"],
"key2": ["one", "two", "one", "two", "one"],
}
)
grouped = df.groupby("key1")
def peak_to_peak(arr):
return arr.max() - arr.min()
expected = grouped.agg([peak_to_peak])
expected.columns = ["data1", "data2"]
result = grouped.agg(peak_to_peak)
tm.assert_frame_equal(result, expected)
def test_agg_datetimes_mixed():
data = [[1, "2012-01-01", 1.0], [2, "2012-01-02", 2.0], [3, None, 3.0]]
df1 = DataFrame(
{
"key": [x[0] for x in data],
"date": [x[1] for x in data],
"value": [x[2] for x in data],
}
)
data = [
[
row[0],
(dt.datetime.strptime(row[1], "%Y-%m-%d").date() if row[1] else None),
row[2],
]
for row in data
]
df2 = DataFrame(
{
"key": [x[0] for x in data],
"date": [x[1] for x in data],
"value": [x[2] for x in data],
}
)
df1["weights"] = df1["value"] / df1["value"].sum()
gb1 = df1.groupby("date").aggregate(np.sum)
df2["weights"] = df1["value"] / df1["value"].sum()
gb2 = df2.groupby("date").aggregate(np.sum)
assert len(gb1) == len(gb2)
def test_agg_period_index():
prng = period_range("2012-1-1", freq="M", periods=3)
df = DataFrame(np.random.randn(3, 2), index=prng)
rs = df.groupby(level=0).sum()
assert isinstance(rs.index, PeriodIndex)
# GH 3579
index = period_range(start="1999-01", periods=5, freq="M")
s1 = Series(np.random.rand(len(index)), index=index)
s2 = Series(np.random.rand(len(index)), index=index)
series = [("s1", s1), ("s2", s2)]
df = DataFrame.from_dict(OrderedDict(series))
grouped = df.groupby(df.index.month)
list(grouped)
def test_agg_dict_parameter_cast_result_dtypes():
# GH 12821
df = DataFrame(
{
"class": ["A", "A", "B", "B", "C", "C", "D", "D"],
"time": date_range("1/1/2011", periods=8, freq="H"),
}
)
df.loc[[0, 1, 2, 5], "time"] = None
# test for `first` function
exp = df.loc[[0, 3, 4, 6]].set_index("class")
grouped = df.groupby("class")
tm.assert_frame_equal(grouped.first(), exp)
tm.assert_frame_equal(grouped.agg("first"), exp)
tm.assert_frame_equal(grouped.agg({"time": "first"}), exp)
tm.assert_series_equal(grouped.time.first(), exp["time"])
tm.assert_series_equal(grouped.time.agg("first"), exp["time"])
# test for `last` function
exp = df.loc[[0, 3, 4, 7]].set_index("class")
grouped = df.groupby("class")
tm.assert_frame_equal(grouped.last(), exp)
tm.assert_frame_equal(grouped.agg("last"), exp)
tm.assert_frame_equal(grouped.agg({"time": "last"}), exp)
tm.assert_series_equal(grouped.time.last(), exp["time"])
tm.assert_series_equal(grouped.time.agg("last"), exp["time"])
# count
exp = pd.Series([2, 2, 2, 2], index=Index(list("ABCD"), name="class"), name="time")
tm.assert_series_equal(grouped.time.agg(len), exp)
tm.assert_series_equal(grouped.time.size(), exp)
exp = pd.Series([0, 1, 1, 2], index=Index(list("ABCD"), name="class"), name="time")
tm.assert_series_equal(grouped.time.count(), exp)
def test_agg_cast_results_dtypes():
# similar to GH12821
# xref #11444
u = [dt.datetime(2015, x + 1, 1) for x in range(12)]
v = list("aaabbbbbbccd")
df = pd.DataFrame({"X": v, "Y": u})
result = df.groupby("X")["Y"].agg(len)
expected = df.groupby("X")["Y"].count()
tm.assert_series_equal(result, expected)
def test_aggregate_float64_no_int64():
# see gh-11199
df = DataFrame({"a": [1, 2, 3, 4, 5], "b": [1, 2, 2, 4, 5], "c": [1, 2, 3, 4, 5]})
expected = DataFrame({"a": [1, 2.5, 4, 5]}, index=[1, 2, 4, 5])
expected.index.name = "b"
result = df.groupby("b")[["a"]].mean()
tm.assert_frame_equal(result, expected)
expected = DataFrame({"a": [1, 2.5, 4, 5], "c": [1, 2.5, 4, 5]}, index=[1, 2, 4, 5])
expected.index.name = "b"
result = df.groupby("b")[["a", "c"]].mean()
tm.assert_frame_equal(result, expected)
def test_aggregate_api_consistency():
# GH 9052
# make sure that the aggregates via dict
# are consistent
df = DataFrame(
{
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
"B": ["one", "one", "two", "two", "two", "two", "one", "two"],
"C": np.random.randn(8) + 1.0,
"D": np.arange(8),
}
)
grouped = df.groupby(["A", "B"])
c_mean = grouped["C"].mean()
c_sum = grouped["C"].sum()
d_mean = grouped["D"].mean()
d_sum = grouped["D"].sum()
result = grouped["D"].agg(["sum", "mean"])
expected = pd.concat([d_sum, d_mean], axis=1)
expected.columns = ["sum", "mean"]
tm.assert_frame_equal(result, expected, check_like=True)
result = grouped.agg([np.sum, np.mean])
expected = pd.concat([c_sum, c_mean, d_sum, d_mean], axis=1)
expected.columns = MultiIndex.from_product([["C", "D"], ["sum", "mean"]])
tm.assert_frame_equal(result, expected, check_like=True)
result = grouped[["D", "C"]].agg([np.sum, np.mean])
expected = pd.concat([d_sum, d_mean, c_sum, c_mean], axis=1)
expected.columns = MultiIndex.from_product([["D", "C"], ["sum", "mean"]])
tm.assert_frame_equal(result, expected, check_like=True)
result = grouped.agg({"C": "mean", "D": "sum"})
expected = pd.concat([d_sum, c_mean], axis=1)
tm.assert_frame_equal(result, expected, check_like=True)
result = grouped.agg({"C": ["mean", "sum"], "D": ["mean", "sum"]})
expected = pd.concat([c_mean, c_sum, d_mean, d_sum], axis=1)
expected.columns = MultiIndex.from_product([["C", "D"], ["mean", "sum"]])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = grouped[["D", "C"]].agg({"r": np.sum, "r2": np.mean})
expected = pd.concat([d_sum, c_sum, d_mean, c_mean], axis=1)
expected.columns = MultiIndex.from_product([["r", "r2"], ["D", "C"]])
tm.assert_frame_equal(result, expected, check_like=True)
def test_agg_dict_renaming_deprecation():
# 15931
df = pd.DataFrame({"A": [1, 1, 1, 2, 2], "B": range(5), "C": range(5)})
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False) as w:
df.groupby("A").agg(
{"B": {"foo": ["sum", "max"]}, "C": {"bar": ["count", "min"]}}
)
assert "using a dict with renaming" in str(w[0].message)
assert "named aggregation" in str(w[0].message)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
df.groupby("A")[["B", "C"]].agg({"ma": "max"})
with tm.assert_produces_warning(FutureWarning) as w:
df.groupby("A").B.agg({"foo": "count"})
assert "using a dict on a Series for aggregation" in str(w[0].message)
assert "named aggregation instead." in str(w[0].message)
def test_agg_compat():
# GH 12334
df = DataFrame(
{
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
"B": ["one", "one", "two", "two", "two", "two", "one", "two"],
"C": np.random.randn(8) + 1.0,
"D": np.arange(8),
}
)
g = df.groupby(["A", "B"])
expected = pd.concat([g["D"].sum(), g["D"].std()], axis=1)
expected.columns = MultiIndex.from_tuples([("C", "sum"), ("C", "std")])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = g["D"].agg({"C": ["sum", "std"]})
tm.assert_frame_equal(result, expected, check_like=True)
expected = pd.concat([g["D"].sum(), g["D"].std()], axis=1)
expected.columns = ["C", "D"]
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = g["D"].agg({"C": "sum", "D": "std"})
tm.assert_frame_equal(result, expected, check_like=True)
def test_agg_nested_dicts():
# API change for disallowing these types of nested dicts
df = DataFrame(
{
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
"B": ["one", "one", "two", "two", "two", "two", "one", "two"],
"C": np.random.randn(8) + 1.0,
"D": np.arange(8),
}
)
g = df.groupby(["A", "B"])
msg = r"cannot perform renaming for r[1-2] with a nested dictionary"
with pytest.raises(SpecificationError, match=msg):
g.aggregate({"r1": {"C": ["mean", "sum"]}, "r2": {"D": ["mean", "sum"]}})
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = g.agg({"C": {"ra": ["mean", "std"]}, "D": {"rb": ["mean", "std"]}})
expected = pd.concat(
[g["C"].mean(), g["C"].std(), g["D"].mean(), g["D"].std()], axis=1
)
expected.columns = pd.MultiIndex.from_tuples(
[("ra", "mean"), ("ra", "std"), ("rb", "mean"), ("rb", "std")]
)
tm.assert_frame_equal(result, expected, check_like=True)
# same name as the original column
# GH9052
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
expected = g["D"].agg({"result1": np.sum, "result2": np.mean})
expected = expected.rename(columns={"result1": "D"})
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = g["D"].agg({"D": np.sum, "result2": np.mean})
tm.assert_frame_equal(result, expected, check_like=True)
def test_agg_item_by_item_raise_typeerror():
df = DataFrame(np.random.randint(10, size=(20, 10)))
def raiseException(df):
pprint_thing("----------------------------------------")
pprint_thing(df.to_string())
raise TypeError("test")
with pytest.raises(TypeError, match="test"):
df.groupby(0).agg(raiseException)
def test_series_agg_multikey():
ts = tm.makeTimeSeries()
grouped = ts.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.agg(np.sum)
expected = grouped.sum()
tm.assert_series_equal(result, expected)
def test_series_agg_multi_pure_python():
data = DataFrame(
{
"A": [
"foo",
"foo",
"foo",
"foo",
"bar",
"bar",
"bar",
"bar",
"foo",
"foo",
"foo",
],
"B": [
"one",
"one",
"one",
"two",
"one",
"one",
"one",
"two",
"two",
"two",
"one",
],
"C": [
"dull",
"dull",
"shiny",
"dull",
"dull",
"shiny",
"shiny",
"dull",
"shiny",
"shiny",
"shiny",
],
"D": np.random.randn(11),
"E": np.random.randn(11),
"F": np.random.randn(11),
}
)
def bad(x):
assert len(x.values.base) > 0
return "foo"
result = data.groupby(["A", "B"]).agg(bad)
expected = data.groupby(["A", "B"]).agg(lambda x: "foo")
tm.assert_frame_equal(result, expected)
def test_agg_consistency():
# agg with ([]) and () not consistent
# GH 6715
def P1(a):
try:
return np.percentile(a.dropna(), q=1)
except Exception:
return np.nan
df = DataFrame(
{
"col1": [1, 2, 3, 4],
"col2": [10, 25, 26, 31],
"date": [
dt.date(2013, 2, 10),
dt.date(2013, 2, 10),
dt.date(2013, 2, 11),
dt.date(2013, 2, 11),
],
}
)
g = df.groupby("date")
expected = g.agg([P1])
expected.columns = expected.columns.levels[0]
result = g.agg(P1)
tm.assert_frame_equal(result, expected)
def test_agg_callables():
# GH 7929
df = DataFrame({"foo": [1, 2], "bar": [3, 4]}).astype(np.int64)
class fn_class:
def __call__(self, x):
return sum(x)
equiv_callables = [
sum,
np.sum,
lambda x: sum(x),
lambda x: x.sum(),
partial(sum),
fn_class(),
]
expected = df.groupby("foo").agg(sum)
for ecall in equiv_callables:
result = df.groupby("foo").agg(ecall)
tm.assert_frame_equal(result, expected)
def test_agg_over_numpy_arrays():
# GH 3788
df = pd.DataFrame(
[
[1, np.array([10, 20, 30])],
[1, np.array([40, 50, 60])],
[2, np.array([20, 30, 40])],
],
columns=["category", "arraydata"],
)
result = df.groupby("category").agg(sum)
expected_data = [[np.array([50, 70, 90])], [np.array([20, 30, 40])]]
expected_index = pd.Index([1, 2], name="category")
expected_column = ["arraydata"]
expected = pd.DataFrame(
expected_data, index=expected_index, columns=expected_column
)
tm.assert_frame_equal(result, expected)
def test_agg_timezone_round_trip():
# GH 15426
ts = pd.Timestamp("2016-01-01 12:00:00", tz="US/Pacific")
df = pd.DataFrame(
{"a": 1, "b": [ts + dt.timedelta(minutes=nn) for nn in range(10)]}
)
result1 = df.groupby("a")["b"].agg(np.min).iloc[0]
result2 = df.groupby("a")["b"].agg(lambda x: np.min(x)).iloc[0]
result3 = df.groupby("a")["b"].min().iloc[0]
assert result1 == ts
assert result2 == ts
assert result3 == ts
dates = [
pd.Timestamp("2016-01-0%d 12:00:00" % i, tz="US/Pacific") for i in range(1, 5)
]
df = pd.DataFrame({"A": ["a", "b"] * 2, "B": dates})
grouped = df.groupby("A")
ts = df["B"].iloc[0]
assert ts == grouped.nth(0)["B"].iloc[0]
assert ts == grouped.head(1)["B"].iloc[0]
assert ts == grouped.first()["B"].iloc[0]
# GH#27110 applying iloc should return a DataFrame
assert ts == grouped.apply(lambda x: x.iloc[0]).iloc[0, 0]
ts = df["B"].iloc[2]
assert ts == grouped.last()["B"].iloc[0]
# GH#27110 applying iloc should return a DataFrame
assert ts == grouped.apply(lambda x: x.iloc[-1]).iloc[0, 0]
def test_sum_uint64_overflow():
# see gh-14758
# Convert to uint64 and don't overflow
df = pd.DataFrame([[1, 2], [3, 4], [5, 6]], dtype=object)
df = df + 9223372036854775807
index = pd.Index(
[9223372036854775808, 9223372036854775810, 9223372036854775812], dtype=np.uint64
)
expected = pd.DataFrame(
{1: [9223372036854775809, 9223372036854775811, 9223372036854775813]},
index=index,
)
expected.index.name = 0
result = df.groupby(0).sum()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"structure, expected",
[
(tuple, pd.DataFrame({"C": {(1, 1): (1, 1, 1), (3, 4): (3, 4, 4)}})),
(list, pd.DataFrame({"C": {(1, 1): [1, 1, 1], (3, 4): [3, 4, 4]}})),
(
lambda x: tuple(x),
pd.DataFrame({"C": {(1, 1): (1, 1, 1), (3, 4): (3, 4, 4)}}),
),
(
lambda x: list(x),
pd.DataFrame({"C": {(1, 1): [1, 1, 1], (3, 4): [3, 4, 4]}}),
),
],
)
def test_agg_structs_dataframe(structure, expected):
df = pd.DataFrame(
{"A": [1, 1, 1, 3, 3, 3], "B": [1, 1, 1, 4, 4, 4], "C": [1, 1, 1, 3, 4, 4]}
)
result = df.groupby(["A", "B"]).aggregate(structure)
expected.index.names = ["A", "B"]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"structure, expected",
[
(tuple, pd.Series([(1, 1, 1), (3, 4, 4)], index=[1, 3], name="C")),
(list, pd.Series([[1, 1, 1], [3, 4, 4]], index=[1, 3], name="C")),
(lambda x: tuple(x), pd.Series([(1, 1, 1), (3, 4, 4)], index=[1, 3], name="C")),
(lambda x: list(x), pd.Series([[1, 1, 1], [3, 4, 4]], index=[1, 3], name="C")),
],
)
def test_agg_structs_series(structure, expected):
# Issue #18079
df = pd.DataFrame(
{"A": [1, 1, 1, 3, 3, 3], "B": [1, 1, 1, 4, 4, 4], "C": [1, 1, 1, 3, 4, 4]}
)
result = df.groupby("A")["C"].aggregate(structure)
expected.index.name = "A"
tm.assert_series_equal(result, expected)
def test_agg_category_nansum(observed):
categories = ["a", "b", "c"]
df = pd.DataFrame(
{"A": pd.Categorical(["a", "a", "b"], categories=categories), "B": [1, 2, 3]}
)
result = df.groupby("A", observed=observed).B.agg(np.nansum)
expected = pd.Series(
[3, 3, 0],
index=pd.CategoricalIndex(["a", "b", "c"], categories=categories, name="A"),
name="B",
)
if observed:
expected = expected[expected != 0]
tm.assert_series_equal(result, expected)
def test_agg_list_like_func():
# GH 18473
df = pd.DataFrame(
{"A": [str(x) for x in range(3)], "B": [str(x) for x in range(3)]}
)
grouped = df.groupby("A", as_index=False, sort=False)
result = grouped.agg({"B": lambda x: list(x)})
expected = pd.DataFrame(
{"A": [str(x) for x in range(3)], "B": [[str(x)] for x in range(3)]}
)
tm.assert_frame_equal(result, expected)
def test_agg_lambda_with_timezone():
# GH 23683
df = pd.DataFrame(
{
"tag": [1, 1],
"date": [
pd.Timestamp("2018-01-01", tz="UTC"),
pd.Timestamp("2018-01-02", tz="UTC"),
],
}
)
result = df.groupby("tag").agg({"date": lambda e: e.head(1)})
expected = pd.DataFrame(
[pd.Timestamp("2018-01-01", tz="UTC")],
index=pd.Index([1], name="tag"),
columns=["date"],
)
tm.assert_frame_equal(result, expected)
| 30.695222
| 96
| 0.537784
|
from collections import OrderedDict
import datetime as dt
from functools import partial
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
PeriodIndex,
Series,
date_range,
period_range,
)
from pandas.core.groupby.groupby import SpecificationError
import pandas.util.testing as tm
from pandas.io.formats.printing import pprint_thing
def test_agg_api():
df = DataFrame(
{
"data1": np.random.randn(5),
"data2": np.random.randn(5),
"key1": ["a", "a", "b", "b", "a"],
"key2": ["one", "two", "one", "two", "one"],
}
)
grouped = df.groupby("key1")
def peak_to_peak(arr):
return arr.max() - arr.min()
expected = grouped.agg([peak_to_peak])
expected.columns = ["data1", "data2"]
result = grouped.agg(peak_to_peak)
tm.assert_frame_equal(result, expected)
def test_agg_datetimes_mixed():
data = [[1, "2012-01-01", 1.0], [2, "2012-01-02", 2.0], [3, None, 3.0]]
df1 = DataFrame(
{
"key": [x[0] for x in data],
"date": [x[1] for x in data],
"value": [x[2] for x in data],
}
)
data = [
[
row[0],
(dt.datetime.strptime(row[1], "%Y-%m-%d").date() if row[1] else None),
row[2],
]
for row in data
]
df2 = DataFrame(
{
"key": [x[0] for x in data],
"date": [x[1] for x in data],
"value": [x[2] for x in data],
}
)
df1["weights"] = df1["value"] / df1["value"].sum()
gb1 = df1.groupby("date").aggregate(np.sum)
df2["weights"] = df1["value"] / df1["value"].sum()
gb2 = df2.groupby("date").aggregate(np.sum)
assert len(gb1) == len(gb2)
def test_agg_period_index():
prng = period_range("2012-1-1", freq="M", periods=3)
df = DataFrame(np.random.randn(3, 2), index=prng)
rs = df.groupby(level=0).sum()
assert isinstance(rs.index, PeriodIndex)
index = period_range(start="1999-01", periods=5, freq="M")
s1 = Series(np.random.rand(len(index)), index=index)
s2 = Series(np.random.rand(len(index)), index=index)
series = [("s1", s1), ("s2", s2)]
df = DataFrame.from_dict(OrderedDict(series))
grouped = df.groupby(df.index.month)
list(grouped)
def test_agg_dict_parameter_cast_result_dtypes():
df = DataFrame(
{
"class": ["A", "A", "B", "B", "C", "C", "D", "D"],
"time": date_range("1/1/2011", periods=8, freq="H"),
}
)
df.loc[[0, 1, 2, 5], "time"] = None
exp = df.loc[[0, 3, 4, 6]].set_index("class")
grouped = df.groupby("class")
tm.assert_frame_equal(grouped.first(), exp)
tm.assert_frame_equal(grouped.agg("first"), exp)
tm.assert_frame_equal(grouped.agg({"time": "first"}), exp)
tm.assert_series_equal(grouped.time.first(), exp["time"])
tm.assert_series_equal(grouped.time.agg("first"), exp["time"])
exp = df.loc[[0, 3, 4, 7]].set_index("class")
grouped = df.groupby("class")
tm.assert_frame_equal(grouped.last(), exp)
tm.assert_frame_equal(grouped.agg("last"), exp)
tm.assert_frame_equal(grouped.agg({"time": "last"}), exp)
tm.assert_series_equal(grouped.time.last(), exp["time"])
tm.assert_series_equal(grouped.time.agg("last"), exp["time"])
exp = pd.Series([2, 2, 2, 2], index=Index(list("ABCD"), name="class"), name="time")
tm.assert_series_equal(grouped.time.agg(len), exp)
tm.assert_series_equal(grouped.time.size(), exp)
exp = pd.Series([0, 1, 1, 2], index=Index(list("ABCD"), name="class"), name="time")
tm.assert_series_equal(grouped.time.count(), exp)
def test_agg_cast_results_dtypes():
= [dt.datetime(2015, x + 1, 1) for x in range(12)]
v = list("aaabbbbbbccd")
df = pd.DataFrame({"X": v, "Y": u})
result = df.groupby("X")["Y"].agg(len)
expected = df.groupby("X")["Y"].count()
tm.assert_series_equal(result, expected)
def test_aggregate_float64_no_int64():
df = DataFrame({"a": [1, 2, 3, 4, 5], "b": [1, 2, 2, 4, 5], "c": [1, 2, 3, 4, 5]})
expected = DataFrame({"a": [1, 2.5, 4, 5]}, index=[1, 2, 4, 5])
expected.index.name = "b"
result = df.groupby("b")[["a"]].mean()
tm.assert_frame_equal(result, expected)
expected = DataFrame({"a": [1, 2.5, 4, 5], "c": [1, 2.5, 4, 5]}, index=[1, 2, 4, 5])
expected.index.name = "b"
result = df.groupby("b")[["a", "c"]].mean()
tm.assert_frame_equal(result, expected)
def test_aggregate_api_consistency():
df = DataFrame(
{
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
"B": ["one", "one", "two", "two", "two", "two", "one", "two"],
"C": np.random.randn(8) + 1.0,
"D": np.arange(8),
}
)
grouped = df.groupby(["A", "B"])
c_mean = grouped["C"].mean()
c_sum = grouped["C"].sum()
d_mean = grouped["D"].mean()
d_sum = grouped["D"].sum()
result = grouped["D"].agg(["sum", "mean"])
expected = pd.concat([d_sum, d_mean], axis=1)
expected.columns = ["sum", "mean"]
tm.assert_frame_equal(result, expected, check_like=True)
result = grouped.agg([np.sum, np.mean])
expected = pd.concat([c_sum, c_mean, d_sum, d_mean], axis=1)
expected.columns = MultiIndex.from_product([["C", "D"], ["sum", "mean"]])
tm.assert_frame_equal(result, expected, check_like=True)
result = grouped[["D", "C"]].agg([np.sum, np.mean])
expected = pd.concat([d_sum, d_mean, c_sum, c_mean], axis=1)
expected.columns = MultiIndex.from_product([["D", "C"], ["sum", "mean"]])
tm.assert_frame_equal(result, expected, check_like=True)
result = grouped.agg({"C": "mean", "D": "sum"})
expected = pd.concat([d_sum, c_mean], axis=1)
tm.assert_frame_equal(result, expected, check_like=True)
result = grouped.agg({"C": ["mean", "sum"], "D": ["mean", "sum"]})
expected = pd.concat([c_mean, c_sum, d_mean, d_sum], axis=1)
expected.columns = MultiIndex.from_product([["C", "D"], ["mean", "sum"]])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = grouped[["D", "C"]].agg({"r": np.sum, "r2": np.mean})
expected = pd.concat([d_sum, c_sum, d_mean, c_mean], axis=1)
expected.columns = MultiIndex.from_product([["r", "r2"], ["D", "C"]])
tm.assert_frame_equal(result, expected, check_like=True)
def test_agg_dict_renaming_deprecation():
df = pd.DataFrame({"A": [1, 1, 1, 2, 2], "B": range(5), "C": range(5)})
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False) as w:
df.groupby("A").agg(
{"B": {"foo": ["sum", "max"]}, "C": {"bar": ["count", "min"]}}
)
assert "using a dict with renaming" in str(w[0].message)
assert "named aggregation" in str(w[0].message)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
df.groupby("A")[["B", "C"]].agg({"ma": "max"})
with tm.assert_produces_warning(FutureWarning) as w:
df.groupby("A").B.agg({"foo": "count"})
assert "using a dict on a Series for aggregation" in str(w[0].message)
assert "named aggregation instead." in str(w[0].message)
def test_agg_compat():
df = DataFrame(
{
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
"B": ["one", "one", "two", "two", "two", "two", "one", "two"],
"C": np.random.randn(8) + 1.0,
"D": np.arange(8),
}
)
g = df.groupby(["A", "B"])
expected = pd.concat([g["D"].sum(), g["D"].std()], axis=1)
expected.columns = MultiIndex.from_tuples([("C", "sum"), ("C", "std")])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = g["D"].agg({"C": ["sum", "std"]})
tm.assert_frame_equal(result, expected, check_like=True)
expected = pd.concat([g["D"].sum(), g["D"].std()], axis=1)
expected.columns = ["C", "D"]
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = g["D"].agg({"C": "sum", "D": "std"})
tm.assert_frame_equal(result, expected, check_like=True)
def test_agg_nested_dicts():
df = DataFrame(
{
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
"B": ["one", "one", "two", "two", "two", "two", "one", "two"],
"C": np.random.randn(8) + 1.0,
"D": np.arange(8),
}
)
g = df.groupby(["A", "B"])
msg = r"cannot perform renaming for r[1-2] with a nested dictionary"
with pytest.raises(SpecificationError, match=msg):
g.aggregate({"r1": {"C": ["mean", "sum"]}, "r2": {"D": ["mean", "sum"]}})
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = g.agg({"C": {"ra": ["mean", "std"]}, "D": {"rb": ["mean", "std"]}})
expected = pd.concat(
[g["C"].mean(), g["C"].std(), g["D"].mean(), g["D"].std()], axis=1
)
expected.columns = pd.MultiIndex.from_tuples(
[("ra", "mean"), ("ra", "std"), ("rb", "mean"), ("rb", "std")]
)
tm.assert_frame_equal(result, expected, check_like=True)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
expected = g["D"].agg({"result1": np.sum, "result2": np.mean})
expected = expected.rename(columns={"result1": "D"})
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = g["D"].agg({"D": np.sum, "result2": np.mean})
tm.assert_frame_equal(result, expected, check_like=True)
def test_agg_item_by_item_raise_typeerror():
df = DataFrame(np.random.randint(10, size=(20, 10)))
def raiseException(df):
pprint_thing("----------------------------------------")
pprint_thing(df.to_string())
raise TypeError("test")
with pytest.raises(TypeError, match="test"):
df.groupby(0).agg(raiseException)
def test_series_agg_multikey():
ts = tm.makeTimeSeries()
grouped = ts.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.agg(np.sum)
expected = grouped.sum()
tm.assert_series_equal(result, expected)
def test_series_agg_multi_pure_python():
data = DataFrame(
{
"A": [
"foo",
"foo",
"foo",
"foo",
"bar",
"bar",
"bar",
"bar",
"foo",
"foo",
"foo",
],
"B": [
"one",
"one",
"one",
"two",
"one",
"one",
"one",
"two",
"two",
"two",
"one",
],
"C": [
"dull",
"dull",
"shiny",
"dull",
"dull",
"shiny",
"shiny",
"dull",
"shiny",
"shiny",
"shiny",
],
"D": np.random.randn(11),
"E": np.random.randn(11),
"F": np.random.randn(11),
}
)
def bad(x):
assert len(x.values.base) > 0
return "foo"
result = data.groupby(["A", "B"]).agg(bad)
expected = data.groupby(["A", "B"]).agg(lambda x: "foo")
tm.assert_frame_equal(result, expected)
def test_agg_consistency():
def P1(a):
try:
return np.percentile(a.dropna(), q=1)
except Exception:
return np.nan
df = DataFrame(
{
"col1": [1, 2, 3, 4],
"col2": [10, 25, 26, 31],
"date": [
dt.date(2013, 2, 10),
dt.date(2013, 2, 10),
dt.date(2013, 2, 11),
dt.date(2013, 2, 11),
],
}
)
g = df.groupby("date")
expected = g.agg([P1])
expected.columns = expected.columns.levels[0]
result = g.agg(P1)
tm.assert_frame_equal(result, expected)
def test_agg_callables():
df = DataFrame({"foo": [1, 2], "bar": [3, 4]}).astype(np.int64)
class fn_class:
def __call__(self, x):
return sum(x)
equiv_callables = [
sum,
np.sum,
lambda x: sum(x),
lambda x: x.sum(),
partial(sum),
fn_class(),
]
expected = df.groupby("foo").agg(sum)
for ecall in equiv_callables:
result = df.groupby("foo").agg(ecall)
tm.assert_frame_equal(result, expected)
def test_agg_over_numpy_arrays():
df = pd.DataFrame(
[
[1, np.array([10, 20, 30])],
[1, np.array([40, 50, 60])],
[2, np.array([20, 30, 40])],
],
columns=["category", "arraydata"],
)
result = df.groupby("category").agg(sum)
expected_data = [[np.array([50, 70, 90])], [np.array([20, 30, 40])]]
expected_index = pd.Index([1, 2], name="category")
expected_column = ["arraydata"]
expected = pd.DataFrame(
expected_data, index=expected_index, columns=expected_column
)
tm.assert_frame_equal(result, expected)
def test_agg_timezone_round_trip():
ts = pd.Timestamp("2016-01-01 12:00:00", tz="US/Pacific")
df = pd.DataFrame(
{"a": 1, "b": [ts + dt.timedelta(minutes=nn) for nn in range(10)]}
)
result1 = df.groupby("a")["b"].agg(np.min).iloc[0]
result2 = df.groupby("a")["b"].agg(lambda x: np.min(x)).iloc[0]
result3 = df.groupby("a")["b"].min().iloc[0]
assert result1 == ts
assert result2 == ts
assert result3 == ts
dates = [
pd.Timestamp("2016-01-0%d 12:00:00" % i, tz="US/Pacific") for i in range(1, 5)
]
df = pd.DataFrame({"A": ["a", "b"] * 2, "B": dates})
grouped = df.groupby("A")
ts = df["B"].iloc[0]
assert ts == grouped.nth(0)["B"].iloc[0]
assert ts == grouped.head(1)["B"].iloc[0]
assert ts == grouped.first()["B"].iloc[0]
oc[0]).iloc[0, 0]
ts = df["B"].iloc[2]
assert ts == grouped.last()["B"].iloc[0]
oc[-1]).iloc[0, 0]
def test_sum_uint64_overflow():
df = pd.DataFrame([[1, 2], [3, 4], [5, 6]], dtype=object)
df = df + 9223372036854775807
index = pd.Index(
[9223372036854775808, 9223372036854775810, 9223372036854775812], dtype=np.uint64
)
expected = pd.DataFrame(
{1: [9223372036854775809, 9223372036854775811, 9223372036854775813]},
index=index,
)
expected.index.name = 0
result = df.groupby(0).sum()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"structure, expected",
[
(tuple, pd.DataFrame({"C": {(1, 1): (1, 1, 1), (3, 4): (3, 4, 4)}})),
(list, pd.DataFrame({"C": {(1, 1): [1, 1, 1], (3, 4): [3, 4, 4]}})),
(
lambda x: tuple(x),
pd.DataFrame({"C": {(1, 1): (1, 1, 1), (3, 4): (3, 4, 4)}}),
),
(
lambda x: list(x),
pd.DataFrame({"C": {(1, 1): [1, 1, 1], (3, 4): [3, 4, 4]}}),
),
],
)
def test_agg_structs_dataframe(structure, expected):
df = pd.DataFrame(
{"A": [1, 1, 1, 3, 3, 3], "B": [1, 1, 1, 4, 4, 4], "C": [1, 1, 1, 3, 4, 4]}
)
result = df.groupby(["A", "B"]).aggregate(structure)
expected.index.names = ["A", "B"]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"structure, expected",
[
(tuple, pd.Series([(1, 1, 1), (3, 4, 4)], index=[1, 3], name="C")),
(list, pd.Series([[1, 1, 1], [3, 4, 4]], index=[1, 3], name="C")),
(lambda x: tuple(x), pd.Series([(1, 1, 1), (3, 4, 4)], index=[1, 3], name="C")),
(lambda x: list(x), pd.Series([[1, 1, 1], [3, 4, 4]], index=[1, 3], name="C")),
],
)
def test_agg_structs_series(structure, expected):
# Issue #18079
df = pd.DataFrame(
{"A": [1, 1, 1, 3, 3, 3], "B": [1, 1, 1, 4, 4, 4], "C": [1, 1, 1, 3, 4, 4]}
)
result = df.groupby("A")["C"].aggregate(structure)
expected.index.name = "A"
tm.assert_series_equal(result, expected)
def test_agg_category_nansum(observed):
categories = ["a", "b", "c"]
df = pd.DataFrame(
{"A": pd.Categorical(["a", "a", "b"], categories=categories), "B": [1, 2, 3]}
)
result = df.groupby("A", observed=observed).B.agg(np.nansum)
expected = pd.Series(
[3, 3, 0],
index=pd.CategoricalIndex(["a", "b", "c"], categories=categories, name="A"),
name="B",
)
if observed:
expected = expected[expected != 0]
tm.assert_series_equal(result, expected)
def test_agg_list_like_func():
# GH 18473
df = pd.DataFrame(
{"A": [str(x) for x in range(3)], "B": [str(x) for x in range(3)]}
)
grouped = df.groupby("A", as_index=False, sort=False)
result = grouped.agg({"B": lambda x: list(x)})
expected = pd.DataFrame(
{"A": [str(x) for x in range(3)], "B": [[str(x)] for x in range(3)]}
)
tm.assert_frame_equal(result, expected)
def test_agg_lambda_with_timezone():
# GH 23683
df = pd.DataFrame(
{
"tag": [1, 1],
"date": [
pd.Timestamp("2018-01-01", tz="UTC"),
pd.Timestamp("2018-01-02", tz="UTC"),
],
}
)
result = df.groupby("tag").agg({"date": lambda e: e.head(1)})
expected = pd.DataFrame(
[pd.Timestamp("2018-01-01", tz="UTC")],
index=pd.Index([1], name="tag"),
columns=["date"],
)
tm.assert_frame_equal(result, expected)
| true
| true
|
7905575b290cc37b57379ed96675626f5008d2cd
| 4,931
|
py
|
Python
|
seno/full_node/sync_store.py
|
emilson0407/seno-blockchain
|
fa73fc06639faaacbb82504a6c8698c3bcab57c0
|
[
"Apache-2.0"
] | 33
|
2021-06-26T22:50:48.000Z
|
2022-02-09T04:31:40.000Z
|
seno/full_node/sync_store.py
|
emilson0407/seno-blockchain
|
fa73fc06639faaacbb82504a6c8698c3bcab57c0
|
[
"Apache-2.0"
] | 18
|
2021-06-27T17:13:13.000Z
|
2022-01-04T11:45:56.000Z
|
seno/full_node/sync_store.py
|
emilson0407/seno-blockchain
|
fa73fc06639faaacbb82504a6c8698c3bcab57c0
|
[
"Apache-2.0"
] | 19
|
2021-06-26T00:17:08.000Z
|
2022-03-15T06:58:21.000Z
|
import asyncio
import logging
from typing import Dict, List, Optional, Set, Tuple
from seno.types.blockchain_format.sized_bytes import bytes32
from seno.util.ints import uint32, uint128
log = logging.getLogger(__name__)
class SyncStore:
# Whether or not we are syncing
sync_mode: bool
long_sync: bool
peak_to_peer: Dict[bytes32, Set[bytes32]] # Header hash : peer node id
peer_to_peak: Dict[bytes32, Tuple[bytes32, uint32, uint128]] # peer node id : [header_hash, height, weight]
sync_target_header_hash: Optional[bytes32] # Peak hash we are syncing towards
sync_target_height: Optional[uint32] # Peak height we are syncing towards
peers_changed: asyncio.Event
batch_syncing: Set[bytes32] # Set of nodes which we are batch syncing from
backtrack_syncing: Dict[bytes32, int] # Set of nodes which we are backtrack syncing from, and how many threads
@classmethod
async def create(cls):
self = cls()
self.sync_mode = False
self.long_sync = False
self.sync_target_header_hash = None
self.sync_target_height = None
self.peak_fork_point = {}
self.peak_to_peer = {}
self.peer_to_peak = {}
self.peers_changed = asyncio.Event()
self.batch_syncing = set()
self.backtrack_syncing = {}
return self
def set_peak_target(self, peak_hash: bytes32, target_height: uint32):
self.sync_target_header_hash = peak_hash
self.sync_target_height = target_height
def get_sync_target_hash(self) -> Optional[bytes32]:
return self.sync_target_header_hash
def get_sync_target_height(self) -> Optional[bytes32]:
return self.sync_target_height
def set_sync_mode(self, sync_mode: bool):
self.sync_mode = sync_mode
def get_sync_mode(self) -> bool:
return self.sync_mode
def set_long_sync(self, long_sync: bool):
self.long_sync = long_sync
def get_long_sync(self) -> bool:
return self.long_sync
def peer_has_block(self, header_hash: bytes32, peer_id: bytes32, weight: uint128, height: uint32, new_peak: bool):
"""
Adds a record that a certain peer has a block.
"""
if header_hash == self.sync_target_header_hash:
self.peers_changed.set()
if header_hash in self.peak_to_peer:
self.peak_to_peer[header_hash].add(peer_id)
else:
self.peak_to_peer[header_hash] = {peer_id}
if new_peak:
self.peer_to_peak[peer_id] = (header_hash, height, weight)
def get_peers_that_have_peak(self, header_hashes: List[bytes32]) -> Set[bytes32]:
"""
Returns: peer ids of peers that have at least one of the header hashes.
"""
node_ids: Set[bytes32] = set()
for header_hash in header_hashes:
if header_hash in self.peak_to_peer:
for node_id in self.peak_to_peer[header_hash]:
node_ids.add(node_id)
return node_ids
def get_peak_of_each_peer(self) -> Dict[bytes32, Tuple[bytes32, uint32, uint128]]:
"""
Returns: dictionary of peer id to peak information.
"""
ret = {}
for peer_id, v in self.peer_to_peak.items():
if v[0] not in self.peak_to_peer:
continue
ret[peer_id] = v
return ret
def get_heaviest_peak(self) -> Optional[Tuple[bytes32, uint32, uint128]]:
"""
Returns: the header_hash, height, and weight of the heaviest block that one of our peers has notified
us of.
"""
if len(self.peer_to_peak) == 0:
return None
heaviest_peak_hash: Optional[bytes32] = None
heaviest_peak_weight: uint128 = uint128(0)
heaviest_peak_height: Optional[uint32] = None
for peer_id, (peak_hash, height, weight) in self.peer_to_peak.items():
if peak_hash not in self.peak_to_peer:
continue
if heaviest_peak_hash is None or weight > heaviest_peak_weight:
heaviest_peak_hash = peak_hash
heaviest_peak_weight = weight
heaviest_peak_height = height
assert heaviest_peak_hash is not None and heaviest_peak_weight is not None and heaviest_peak_height is not None
return heaviest_peak_hash, heaviest_peak_height, heaviest_peak_weight
async def clear_sync_info(self):
"""
Clears the peak_to_peer info which can get quite large.
"""
self.peak_to_peer = {}
def peer_disconnected(self, node_id: bytes32):
if node_id in self.peer_to_peak:
del self.peer_to_peak[node_id]
for peak, peers in self.peak_to_peer.items():
if node_id in peers:
self.peak_to_peer[peak].remove(node_id)
assert node_id not in self.peak_to_peer[peak]
self.peers_changed.set()
| 35.992701
| 119
| 0.653012
|
import asyncio
import logging
from typing import Dict, List, Optional, Set, Tuple
from seno.types.blockchain_format.sized_bytes import bytes32
from seno.util.ints import uint32, uint128
log = logging.getLogger(__name__)
class SyncStore:
sync_mode: bool
long_sync: bool
peak_to_peer: Dict[bytes32, Set[bytes32]]
peer_to_peak: Dict[bytes32, Tuple[bytes32, uint32, uint128]]
sync_target_header_hash: Optional[bytes32]
sync_target_height: Optional[uint32]
peers_changed: asyncio.Event
batch_syncing: Set[bytes32]
backtrack_syncing: Dict[bytes32, int]
@classmethod
async def create(cls):
self = cls()
self.sync_mode = False
self.long_sync = False
self.sync_target_header_hash = None
self.sync_target_height = None
self.peak_fork_point = {}
self.peak_to_peer = {}
self.peer_to_peak = {}
self.peers_changed = asyncio.Event()
self.batch_syncing = set()
self.backtrack_syncing = {}
return self
def set_peak_target(self, peak_hash: bytes32, target_height: uint32):
self.sync_target_header_hash = peak_hash
self.sync_target_height = target_height
def get_sync_target_hash(self) -> Optional[bytes32]:
return self.sync_target_header_hash
def get_sync_target_height(self) -> Optional[bytes32]:
return self.sync_target_height
def set_sync_mode(self, sync_mode: bool):
self.sync_mode = sync_mode
def get_sync_mode(self) -> bool:
return self.sync_mode
def set_long_sync(self, long_sync: bool):
self.long_sync = long_sync
def get_long_sync(self) -> bool:
return self.long_sync
def peer_has_block(self, header_hash: bytes32, peer_id: bytes32, weight: uint128, height: uint32, new_peak: bool):
if header_hash == self.sync_target_header_hash:
self.peers_changed.set()
if header_hash in self.peak_to_peer:
self.peak_to_peer[header_hash].add(peer_id)
else:
self.peak_to_peer[header_hash] = {peer_id}
if new_peak:
self.peer_to_peak[peer_id] = (header_hash, height, weight)
def get_peers_that_have_peak(self, header_hashes: List[bytes32]) -> Set[bytes32]:
node_ids: Set[bytes32] = set()
for header_hash in header_hashes:
if header_hash in self.peak_to_peer:
for node_id in self.peak_to_peer[header_hash]:
node_ids.add(node_id)
return node_ids
def get_peak_of_each_peer(self) -> Dict[bytes32, Tuple[bytes32, uint32, uint128]]:
ret = {}
for peer_id, v in self.peer_to_peak.items():
if v[0] not in self.peak_to_peer:
continue
ret[peer_id] = v
return ret
def get_heaviest_peak(self) -> Optional[Tuple[bytes32, uint32, uint128]]:
if len(self.peer_to_peak) == 0:
return None
heaviest_peak_hash: Optional[bytes32] = None
heaviest_peak_weight: uint128 = uint128(0)
heaviest_peak_height: Optional[uint32] = None
for peer_id, (peak_hash, height, weight) in self.peer_to_peak.items():
if peak_hash not in self.peak_to_peer:
continue
if heaviest_peak_hash is None or weight > heaviest_peak_weight:
heaviest_peak_hash = peak_hash
heaviest_peak_weight = weight
heaviest_peak_height = height
assert heaviest_peak_hash is not None and heaviest_peak_weight is not None and heaviest_peak_height is not None
return heaviest_peak_hash, heaviest_peak_height, heaviest_peak_weight
async def clear_sync_info(self):
self.peak_to_peer = {}
def peer_disconnected(self, node_id: bytes32):
if node_id in self.peer_to_peak:
del self.peer_to_peak[node_id]
for peak, peers in self.peak_to_peer.items():
if node_id in peers:
self.peak_to_peer[peak].remove(node_id)
assert node_id not in self.peak_to_peer[peak]
self.peers_changed.set()
| true
| true
|
79055773e342b565349c3866c5c53ea28f9eb2a8
| 636
|
py
|
Python
|
pyffm/test/test_utils.py
|
mascaroa/pyffm
|
2445ed2c048347ebbfc76d39990065eb76a8d784
|
[
"MIT"
] | 4
|
2020-12-22T02:59:37.000Z
|
2022-03-28T20:54:40.000Z
|
pyffm/test/test_utils.py
|
mascaroa/pyffm
|
2445ed2c048347ebbfc76d39990065eb76a8d784
|
[
"MIT"
] | 1
|
2021-04-05T01:56:13.000Z
|
2021-11-10T02:40:31.000Z
|
pyffm/test/test_utils.py
|
mascaroa/pyffm
|
2445ed2c048347ebbfc76d39990065eb76a8d784
|
[
"MIT"
] | null | null | null |
import unittest
import numpy as np
import string
from pyffm.util import Map
class TestMap(unittest.TestCase):
def test_basic(self):
map1 = Map()
map_size_to_test = 1000
all_letters = string.ascii_uppercase + string.ascii_lowercase
counter = 0
for char in "".join(
all_letters[np.random.choice(len(all_letters))]
for _ in range(map_size_to_test)
):
if char not in map1:
counter += 1
map_index = map1.add(char)
self.assertEqual(map_index, map1._map_dict[char])
self.assertEqual(len(map1), counter)
| 26.5
| 69
| 0.610063
|
import unittest
import numpy as np
import string
from pyffm.util import Map
class TestMap(unittest.TestCase):
def test_basic(self):
map1 = Map()
map_size_to_test = 1000
all_letters = string.ascii_uppercase + string.ascii_lowercase
counter = 0
for char in "".join(
all_letters[np.random.choice(len(all_letters))]
for _ in range(map_size_to_test)
):
if char not in map1:
counter += 1
map_index = map1.add(char)
self.assertEqual(map_index, map1._map_dict[char])
self.assertEqual(len(map1), counter)
| true
| true
|
790559a4a8ab6d684e8ef5b88798f1797fc0fa6e
| 200
|
py
|
Python
|
tests/util.py
|
popravich/rdbtools3
|
c2b097f58e7d3a3b12e6671aa413c263c1fb96cf
|
[
"MIT"
] | 3
|
2016-01-12T23:14:47.000Z
|
2019-07-10T05:36:22.000Z
|
tests/util.py
|
popravich/rdbtools3
|
c2b097f58e7d3a3b12e6671aa413c263c1fb96cf
|
[
"MIT"
] | null | null | null |
tests/util.py
|
popravich/rdbtools3
|
c2b097f58e7d3a3b12e6671aa413c263c1fb96cf
|
[
"MIT"
] | null | null | null |
import io
import os.path
_DUMPS = os.path.join(os.path.dirname(__file__), 'dumps')
def load_dump(fname):
with open(os.path.join(_DUMPS, fname), 'rb') as f:
return io.BytesIO(f.read())
| 18.181818
| 57
| 0.665
|
import io
import os.path
_DUMPS = os.path.join(os.path.dirname(__file__), 'dumps')
def load_dump(fname):
with open(os.path.join(_DUMPS, fname), 'rb') as f:
return io.BytesIO(f.read())
| true
| true
|
79055abec64a4aaf513564325568d9bd7fc1157d
| 544
|
py
|
Python
|
bot/utils/prometheus_tools.py
|
trilleplay/kanelbulle
|
1e715dced4f63437b287078108d651155824429e
|
[
"MIT"
] | 4
|
2018-09-23T10:13:16.000Z
|
2018-10-31T19:07:53.000Z
|
bot/utils/prometheus_tools.py
|
trilleplay/kanelbulle
|
1e715dced4f63437b287078108d651155824429e
|
[
"MIT"
] | 5
|
2018-09-30T08:34:54.000Z
|
2018-10-27T09:04:53.000Z
|
bot/utils/prometheus_tools.py
|
trilleplay/kanelbulle
|
1e715dced4f63437b287078108d651155824429e
|
[
"MIT"
] | 2
|
2018-09-29T22:32:43.000Z
|
2019-07-18T15:15:51.000Z
|
from prometheus_client import start_http_server, Gauge, Counter
all_users = Gauge('users_in_all_guilds', 'All users the bot is able to see.')
all_guilds = Gauge('guilds_bot_is_in', 'The amount of guilds the bot is in.')
ready_events = Counter('ready_events', 'Amount of READY events recieved during uptime.')
message_events = Counter('message_events', 'Amount of messages sent during uptime.')
reconnects = Counter('reconnects', 'Amount of reconnects the bot has done to Discords API.')
def startup_prometheus():
start_http_server(9091)
| 45.333333
| 92
| 0.775735
|
from prometheus_client import start_http_server, Gauge, Counter
all_users = Gauge('users_in_all_guilds', 'All users the bot is able to see.')
all_guilds = Gauge('guilds_bot_is_in', 'The amount of guilds the bot is in.')
ready_events = Counter('ready_events', 'Amount of READY events recieved during uptime.')
message_events = Counter('message_events', 'Amount of messages sent during uptime.')
reconnects = Counter('reconnects', 'Amount of reconnects the bot has done to Discords API.')
def startup_prometheus():
start_http_server(9091)
| true
| true
|
79055b2ff650728675ac64f5f2d9b12e54f1cd39
| 23,655
|
py
|
Python
|
tests/test_book.py
|
nilfoer/mangadb
|
860d7de310002735631ea26810b4df5b6bc08d7b
|
[
"MIT"
] | 3
|
2021-01-14T16:22:41.000Z
|
2022-02-21T03:31:22.000Z
|
tests/test_book.py
|
nilfoer/mangadb
|
860d7de310002735631ea26810b4df5b6bc08d7b
|
[
"MIT"
] | 13
|
2021-01-14T10:34:19.000Z
|
2021-05-20T08:47:54.000Z
|
tests/test_book.py
|
nilfoer/mangadb
|
860d7de310002735631ea26810b4df5b6bc08d7b
|
[
"MIT"
] | 1
|
2022-02-24T03:10:04.000Z
|
2022-02-24T03:10:04.000Z
|
import os
import datetime
import logging
import sqlite3
import pytest
from utils import setup_mdb_dir, all_book_info, load_db_from_sql_file, TESTS_DIR
from manga_db.manga_db import MangaDB
from manga_db.manga import Book
from manga_db.ext_info import ExternalInfo
from manga_db.constants import LANG_IDS
@pytest.mark.parametrize("title_eng, title_foreign, expected", [
("English", "Foreign", "English / Foreign"),
("English", None, "English"),
(None, "Foreign", "Foreign")])
def test_build_title(title_eng, title_foreign, expected):
assert Book.build_title(title_eng, title_foreign) == expected
def test_fetch_extinfo(monkeypatch, setup_mdb_dir):
tmpdir = setup_mdb_dir
os.chdir(tmpdir)
mdb_file = os.path.join(TESTS_DIR, "all_test_files", "manga_db.sqlite.sql")
memdb = load_db_from_sql_file(mdb_file, ":memory:", True)
monkeypatch.setattr("manga_db.manga_db.MangaDB._load_or_create_sql_db",
lambda x, y, z: (memdb, None))
mdb = MangaDB(tmpdir, mdb_file)
b = Book(mdb, in_db=False, id=16)
assert b.ext_infos == []
db_con = memdb
ei_rows_man = db_con.execute("SELECT * FROM ExternalInfo WHERE id IN (16, 18)").fetchall()
ei1 = ExternalInfo(mdb, b, **ei_rows_man[0])
ei2 = ExternalInfo(mdb, b, **ei_rows_man[1])
assert b._fetch_external_infos() == [ei1, ei2]
def test_fetch_assoc_col(monkeypatch, setup_mdb_dir):
tmpdir = setup_mdb_dir
os.chdir(tmpdir)
mdb_file = os.path.join(TESTS_DIR, "all_test_files", "manga_db.sqlite.sql")
memdb = load_db_from_sql_file(mdb_file, ":memory:", True)
monkeypatch.setattr("manga_db.manga_db.MangaDB._load_or_create_sql_db",
lambda x, y, z: (memdb, None))
mdb = MangaDB(tmpdir, mdb_file)
b = Book(mdb, in_db=False, id=14)
tags = ["Ahegao", "Anal", "Collar", "Large Breasts", "Maid", "Mind Break",
"Mind Control", "Nakadashi", "Office Lady", "Pantyhose", "Rape", "Stockings",
"X-ray"]
assert sorted(b._fetch_associated_column("tag")) == sorted(tags)
assert b._fetch_associated_column("character") == []
assert b._fetch_associated_column("artist") == ["Fan no Hitori"]
def test_upd_assoc_col(monkeypatch, setup_mdb_dir):
# update_assoc_columns/get_assoc_cols
tmpdir = setup_mdb_dir
os.chdir(tmpdir)
mdb_file = os.path.join(TESTS_DIR, "all_test_files", "manga_db.sqlite.sql")
memdb = load_db_from_sql_file(mdb_file, ":memory:", True)
monkeypatch.setattr("manga_db.manga_db.MangaDB._load_or_create_sql_db",
lambda x, y, z: (memdb, None))
mdb = MangaDB(tmpdir, mdb_file)
db_con = memdb
# pass last_change kwarg so it doesnt get auto set and counts as change
b = Book(mdb, in_db=False, id=12, last_change=datetime.date.today())
ei_row = db_con.execute("SELECT * FROM ExternalInfo WHERE id = 12").fetchone()
ei = ExternalInfo(mdb, b, **ei_row)
tags = ("Anal;Femdom;Large Breasts;Nakadashi;Straight Shota;Big Ass;Short Hair;Hat"
";Royalty;Dark Skin;Huge Penis;Big Areola;Defloration;Double Penetration;"
"Elder Sister;Tall Girl".split(";"))
artists = ["Kaneda Asou"]
category = ["Doujinshi"]
groups = ["Dokumushi Shokeitai"]
lists = ["to-read"]
assoc_cols = b.get_associated_columns()
assert assoc_cols["tag"] == tags
assert assoc_cols["artist"] == artists
assert assoc_cols["category"] == category
assert assoc_cols["groups"] == groups
assert assoc_cols["list"] == lists
assert assoc_cols["character"] == []
assert assoc_cols["collection"] == []
assert assoc_cols["parody"] == []
assert assoc_cols["ext_infos"] == [ei]
# upd
# changes
b.tag = ["delchange1", "delchange"]
b.category = ["testcat"]
b.update_assoc_columns_from_db()
# changes should be reset
assert not b._committed_state
assert b.tag == tags
assert b.artist == artists
assert b.category == category
assert b.groups == groups
assert b.list == lists
assert b.character == []
assert b.collection == []
assert b.parody == []
assert b.ext_infos == [ei]
b = Book(mdb, in_db=False, id=16, last_change=datetime.date.today())
ei_rows = db_con.execute("SELECT * FROM ExternalInfo WHERE id IN (16, 18)").fetchall()
ei1 = ExternalInfo(mdb, b, **ei_rows[0])
ei2 = ExternalInfo(mdb, b, **ei_rows[1])
tags = ("Blowjob;Ahegao;Megane;Happy Sex;Threesome;Group Sex;Layer Cake;Selfcest".split(";"))
artists = ["bariun"]
category = ["Doujinshi"]
characters = ["Akira Kurusu", "Futaba Sakura"]
parodies = ["Persona 5 / ペルソナ5"]
lists = ["to-read"]
assoc_cols = b.get_associated_columns()
assert assoc_cols["tag"] == tags
assert assoc_cols["artist"] == artists
assert assoc_cols["category"] == category
assert assoc_cols["groups"] == []
assert assoc_cols["list"] == lists
assert assoc_cols["character"] == characters
assert assoc_cols["collection"] == []
assert assoc_cols["parody"] == parodies
assert assoc_cols["ext_infos"] == [ei1, ei2]
# upd
# changes
b.groups = ["delchange1", "delchange"]
b.artist = ["tartist"]
b.update_assoc_columns_from_db()
# changes should be reset
assert not b._committed_state
assert b.tag == tags
assert b.artist == artists
assert b.category == category
assert b.groups == []
assert b.list == lists
assert b.character == characters
assert b.collection == []
assert b.parody == parodies
assert b.ext_infos == [ei1, ei2]
def test_diff(monkeypatch, setup_mdb_dir):
tmpdir = setup_mdb_dir
os.chdir(tmpdir)
mdb_file = os.path.join(TESTS_DIR, "all_test_files", "manga_db.sqlite.sql")
memdb = load_db_from_sql_file(mdb_file, ":memory:", True)
monkeypatch.setattr("manga_db.manga_db.MangaDB._load_or_create_sql_db",
lambda x, y, z: (memdb, None))
mdb = MangaDB(tmpdir, mdb_file)
# not testing change_str
b1_data = dict(
id=None,
title_eng="Same",
title_foreign="Different1",
language_id=1,
pages=25,
status_id=1,
my_rating=4.3,
category=["Manga"],
collection=["Diff collection1"],
groups=["Artistgroup"],
artist=["Diff1", "Diff2"],
parody=["Blabla"],
character=["Char1", "Char2", "Char3"],
list=["to-read", "to-download"],
tag=["Tag1", "Tag2", "Tag3"],
ext_infos=None,
last_change=datetime.date(2018, 6, 3),
note=None,
favorite=0
)
b1 = Book(mdb, **b1_data)
b2_data = dict(
id=None,
title_eng="Same",
title_foreign="Different2",
language_id=1,
pages=27,
status_id=1,
my_rating=None,
category=["Manga"],
collection=["Diff collection2"],
groups=["Artistgroup"],
artist=["Diff", "Diff2", "Diff3"],
parody=["Blabla"],
character=["Char1", "Char5", "Char3"],
list=["to-read", "to-download"],
tag=["Tag1", "Tag2", "Tag3"],
ext_infos=None,
last_change=datetime.date(2018, 4, 3),
note=None,
favorite=1
)
b2 = Book(mdb, **b2_data)
changes, change_str = b1.diff(b2)
changes_expected = dict(
title_foreign="Different2",
pages=27,
my_rating=None,
# added removed
collection=({"Diff collection2"}, {"Diff collection1"}),
artist=({"Diff", "Diff3"}, {"Diff1"}),
character=({"Char5"}, {"Char2"}),
last_change=datetime.date(2018, 4, 3),
favorite=1
)
assert changes == changes_expected
def test_add_rem_assoc(monkeypatch, setup_mdb_dir):
# _add/_remove assoc col
tmpdir = setup_mdb_dir
os.chdir(tmpdir)
mdb_file = os.path.join(TESTS_DIR, "all_test_files", "manga_db.sqlite.sql")
memdb = load_db_from_sql_file(mdb_file, ":memory:", True)
monkeypatch.setattr("manga_db.manga_db.MangaDB._load_or_create_sql_db",
lambda x, y, z: (memdb, None))
mdb = MangaDB(tmpdir, mdb_file)
db_con = memdb
b = mdb.get_book(5)
tag_before = b.tag.copy()
tag_change = ["Test1", "Test2", "Blabla"]
# _add_associated_column_values doesnt commit
with mdb.db_con:
b._add_associated_column_values("tag", tag_change)
tag = db_con.execute("""
SELECT group_concat(Tag.name, ';')
FROM Books, BookTag bt, Tag
WHERE Books.id = bt.book_id
AND Tag.id = bt.tag_id
AND Books.id = 5""").fetchone()
assert tag[0].split(";")[-3:] == tag_change
with mdb.db_con:
b._remove_associated_column_values("tag", tag_change)
tag = db_con.execute("""
SELECT group_concat(Tag.name, ';')
FROM Books, BookTag bt, Tag
WHERE Books.id = bt.book_id
AND Tag.id = bt.tag_id
AND Books.id = 5""").fetchone()
assert tag[0].split(";") == tag_before
def test_static_db_methods(monkeypatch, setup_mdb_dir):
# static db methods
tmpdir = setup_mdb_dir
os.chdir(tmpdir)
mdb_file = os.path.join(TESTS_DIR, "all_test_files", "manga_db.sqlite.sql")
memdb = load_db_from_sql_file(mdb_file, ":memory:", True)
monkeypatch.setattr("manga_db.manga_db.MangaDB._load_or_create_sql_db",
lambda x, y, z: (memdb, None))
mdb = MangaDB(tmpdir, mdb_file)
db_con = memdb
tag_before = "Large Breasts;Nakadashi;Blowjob;Threesome;Bikini;Group Sex;Swimsuit".split(";")
tag_change = ["Test1", "Test2", "Blabla"]
# before is last arg so staticmethod can set attr on book if its loaded (in id_map)
Book.add_assoc_col_on_book_id(mdb, 13, "tag", tag_change, tag_before)
tag = db_con.execute("""
SELECT group_concat(Tag.name, ';')
FROM Books, BookTag bt, Tag
WHERE Books.id = bt.book_id
AND Tag.id = bt.tag_id
AND Books.id = 13""").fetchone()
assert tag[0].split(";")[-3:] == tag_change
Book.remove_assoc_col_on_book_id(mdb, 13, "tag", tag_change, tag_before + tag_change)
tag = db_con.execute("""
SELECT group_concat(Tag.name, ';')
FROM Books, BookTag bt, Tag
WHERE Books.id = bt.book_id
AND Tag.id = bt.tag_id
AND Books.id = 13""").fetchone()
assert tag[0].split(";") == tag_before
# load book so its in id_map and make sure add_remove_assoc also sets attr on book
b = mdb.get_book(16)
tag_before = ("Blowjob;Ahegao;Megane;Happy Sex;Threesome;Group Sex;"
"Layer Cake;Selfcest".split(";"))
tag_change = ["Test3", "Test4", "Blablabla"]
# before is last arg so staticmethod can set attr on book if its loaded (in id_map)
Book.add_assoc_col_on_book_id(mdb, 16, "tag", tag_change, tag_before)
tag = db_con.execute("""
SELECT group_concat(Tag.name, ';')
FROM Books, BookTag bt, Tag
WHERE Books.id = bt.book_id
AND Tag.id = bt.tag_id
AND Books.id = 16""").fetchone()
assert tag[0].split(";")[-3:] == tag_change
# also set attr on book
assert b.tag[-3:] == tag_change
Book.remove_assoc_col_on_book_id(mdb, 16, "tag", tag_change, tag_before + tag_change)
tag = db_con.execute("""
SELECT group_concat(Tag.name, ';')
FROM Books, BookTag bt, Tag
WHERE Books.id = bt.book_id
AND Tag.id = bt.tag_id
AND Books.id = 16""").fetchone()
assert tag[0].split(";") == tag_before
# also set attr on book
assert b.tag == tag_before
Book.set_favorite_id(mdb, 2, 1)
fav = db_con.execute("SELECT favorite FROM Books WHERE id = 2").fetchone()
assert 1 == fav[0]
b = mdb.get_book(7)
Book.set_favorite_id(mdb, 7, 1)
fav = db_con.execute("SELECT favorite FROM Books WHERE id = 7").fetchone()
assert 1 == fav[0]
# also set on book
assert b.favorite == 1
Book.rate_book_id(mdb, 3, 3.5)
rat = db_con.execute("SELECT my_rating FROM Books WHERE id = 3").fetchone()
assert 3.5 == rat[0]
b = mdb.get_book(8)
Book.rate_book_id(mdb, 8, 4.25)
rat = db_con.execute("SELECT my_rating FROM Books WHERE id = 8").fetchone()
assert 4.25 == rat[0]
# also set on book
assert b.my_rating == 4.25
def test_remove_book(monkeypatch, setup_mdb_dir):
tmpdir = setup_mdb_dir
os.chdir(tmpdir)
mdb_file = os.path.join(TESTS_DIR, "all_test_files", "manga_db.sqlite.sql")
memdb = load_db_from_sql_file(mdb_file, ":memory:", True)
monkeypatch.setattr("manga_db.manga_db.MangaDB._load_or_create_sql_db",
lambda x, y, z: (memdb, None))
mdb = MangaDB(tmpdir, mdb_file)
import shutil
# copy cover
os.makedirs(os.path.join(tmpdir, "thumbs"))
cover_path = os.path.join(tmpdir, "thumbs", "16")
shutil.copyfile(os.path.join(tmpdir, os.pardir, "book_test_files", "16"), cover_path)
db_con = memdb
# book removed and all ext infos
b = mdb.get_book(16)
b.remove()
assert b._in_db is False
# deleted from id map
with pytest.raises(KeyError):
mdb.id_map[b.key]
b_row = db_con.execute("SELECT id FROM Books WHERE id = 16").fetchall()
assert not b_row
ei_rows = db_con.execute("SELECT id FROM ExternalInfo WHERE id IN (16, 18)").fetchall()
assert not ei_rows
# cover deleted
assert not os.path.exists(cover_path)
def test_remove_extinfo(monkeypatch, setup_mdb_dir, caplog):
tmpdir = setup_mdb_dir
os.chdir(tmpdir)
mdb_file = os.path.join(TESTS_DIR, "all_test_files", "manga_db.sqlite.sql")
memdb = load_db_from_sql_file(mdb_file, ":memory:", True)
monkeypatch.setattr("manga_db.manga_db.MangaDB._load_or_create_sql_db",
lambda x, y, z: (memdb, None))
mdb = MangaDB(tmpdir, mdb_file)
b = mdb.get_book(16)
caplog.clear()
assert b.remove_ext_info(99) is None
assert caplog.record_tuples == [
("manga_db.manga", logging.ERROR, "No external info with id 99 found!")
]
assert b.remove_ext_info(18) == "https://www.tsumino.com/entry/43454"
assert len(b.ext_infos) == 1
assert b.ext_infos[0].id == 16
assert b.remove_ext_info(16)
assert not b.ext_infos
caplog.clear()
assert b.remove_ext_info(4939) is None
assert caplog.record_tuples == [
("manga_db.manga", logging.WARNING, "No external infos on book with id 16 or not"
" fetched from DB yet!")
]
def test_save_book(monkeypatch, setup_mdb_dir, caplog):
# save: _add _update
# incl! _update_assoc_cols -> "
tmpdir = setup_mdb_dir
os.chdir(tmpdir)
mdb_file = os.path.join(TESTS_DIR, "all_test_files", "manga_db.sqlite.sql")
memdb = load_db_from_sql_file(mdb_file, ":memory:", True)
monkeypatch.setattr("manga_db.manga_db.MangaDB._load_or_create_sql_db",
lambda x, y, z: (memdb, None))
mdb = MangaDB(tmpdir, mdb_file)
db_con = memdb
# _add
ei_data = dict(
id=None,
book_id=None,
url="http://test1.com",
id_onpage='1111',
imported_from=1,
upload_date=datetime.date(2018, 4, 13),
uploader="Uploader",
censor_id=1,
rating=4.19,
ratings=165,
favorites=300,
downloaded=None,
last_update=None,
outdated=None,
)
b1_data = dict(
id=None,
title_eng="Add1",
title_foreign="Foreign1",
language_id=1,
pages=25,
chapter_status="Vol. 2 Ch. 14",
read_status=13,
status_id=1,
my_rating=None,
category=["Manga"],
collection=None,
groups=["Artistgroup"],
artist=["Diff1", "Diff2"],
parody=["Blabla"],
character=["Char1", "Char2", "Char3"],
list=["to-read", "to-download"],
tag=["Tag1", "Tag2", "Tag3"],
ext_infos=None,
last_change=datetime.date(2018, 6, 3),
note=None,
favorite=None,
cover_timestamp=None,
nsfw=1
)
b1 = Book(mdb, **b1_data)
# since we later check that cover_timestamp gets saved as 0.0 if None
b1_data['cover_timestamp'] = 0.0
ei1 = ExternalInfo(mdb, b1, **ei_data)
ei2 = ExternalInfo(mdb, b1, **ei_data)
# will outdate extinfo 8
ei2.id_onpage = '43506'
b1.ext_infos = [ei1, ei2]
assert b1._in_db is False
bid, outdated = b1.save()
assert bid == 18
assert b1.id == 18
# in_db + id_map, committed reset
assert b1._in_db is True
assert mdb.id_map[b1.key] is b1
assert not b1._committed_state
book_info_db = all_book_info(db_con, 18, include_id=True)
assert len(book_info_db) == 2
# fav set correctly
assert book_info_db[0]["favorite"] == 0
assert b1.favorite == 0
compare_cols_row_book_data(b1, book_info_db[0], b1_data, special={"favorite": 0})
# outdated, list of ext info ids that outdated others
assert outdated == [20]
# extinfo saved
eis = db_con.execute("SELECT id, book_id, id_onpage FROM ExternalInfo "
"WHERE id > 18").fetchall()
assert len(eis) == 2
assert eis[0]["book_id"] == 18
assert eis[1]["book_id"] == 18
assert eis[0]["id_onpage"] == '1111'
assert eis[1]["id_onpage"] == '43506'
# add book with new lang
b2 = Book(mdb, title_eng="Test2", favorite=1, pages=11, status_id=1, nsfw=0)
b2.language = "Krababbl"
bid, _ = b2.save()
assert bid == 19
assert b2.id == 19
# /2 since we have double indirection id->name name->id
expected_lang_id = len(LANG_IDS) / 2 + 1
assert b2.language_id == expected_lang_id
lang = db_con.execute("SELECT id FROM Languages WHERE name = 'Krababbl'").fetchall()
assert lang
assert lang[0][0] == expected_lang_id
brow = db_con.execute("SELECT title_eng, favorite FROM Books WHERE id = 19").fetchone()
assert brow[0] == "Test2"
assert brow["favorite"] == 1
assert b2.favorite == 1
assert b2._in_db is True
assert not b2._committed_state
assert mdb.id_map[b2.key] is b2
# _update
bu1 = Book(mdb, id=None, title_eng="Kangofu-san ni Kintama Sakusei Saremashita",
title_foreign="看護婦さんにキンタマ搾精されました", in_db=False)
bu1.in_db = True
# test not updating when block_update kwarg is true
caplog.clear()
assert bu1.save(block_update=True) == (None, None)
assert caplog.record_tuples == [
("manga_db.manga", logging.DEBUG,
f"Book was found in DB(id 15) but saving was blocked due to "
"block_update option!")
]
bu2 = mdb.get_book(11)
# dont do anything if no changes
caplog.clear()
assert not bu2._committed_state
assert bu2.save() == (11, None)
assert caplog.record_tuples == [
("manga_db.manga", logging.DEBUG, "No changes to save for book with id 11")
]
assert not bu2._committed_state
before = bu2.export_for_db()
# empty assoc list to None
before.update({col: getattr(bu2, col) if getattr(bu2, col) else None
for col in bu2.ASSOCIATED_COLUMNS})
bu2.language = "adlalad"
change = {
"title_eng": "Altered",
"language_id": 3,
"my_rating": 4.75,
"favorite": 1,
# removed and added
"tag": ("Large Breasts;Test33;Nakadashi;Ahegao;Gender Bender;Dark Skin;Elf;Body Swap"
";Bondage;Filming;Test Tag".split(";")),
# added
"artist": ["Taniguchi-san", "Newartist"],
# same
"category": ["Manga"],
# none added
"character": ["Char111", "Char222"]
}
bu2.update_from_dict(change)
before.update(change)
bid, _ = bu2.save()
book_info_db = all_book_info(db_con, 11, include_id=True)
compare_cols_row_book_data(bu2, book_info_db, before,
special={"last_change": datetime.date.today()})
# committed reset
assert not bu2._committed_state
# last_change
assert bu2.last_change == datetime.date.today()
assert book_info_db["last_change"] == datetime.date.today()
bu3 = mdb.get_book(7)
assert not bu3._committed_state
before = bu3.export_for_db()
# empty assoc list to None
before.update({col: getattr(bu3, col) if getattr(bu3, col) else None
for col in bu3.ASSOCIATED_COLUMNS})
change = {
"title_foreign": "ForeignAltered",
"pages": 13,
"note": "Note blabla",
# set None
"tag": None,
# set None
"artist": None,
# changed
"category": ["Manga"],
# none added
"collection": ["Col1", "Col2"],
"groups": ["Grp1", "Grp2", "Senpenbankashiki"]
}
bu3.update_from_dict(change)
before.update(change)
bid, _ = bu3.save()
book_info_db = all_book_info(db_con, 7, include_id=True)
compare_cols_row_book_data(bu3, book_info_db, before,
special={"last_change": datetime.date.today()})
# committed reset
assert not bu3._committed_state
# last_change
assert bu3.last_change == datetime.date.today()
assert book_info_db["last_change"] == datetime.date.today()
assoc_concat = {
"tag": "tags", "artist": "artists", "category": "categories", "character": "characters",
"collection": "collections", "groups": "groups", "list": "lists", "parody": "parodies"
}
def compare_cols_row_book_data(book, row, data, special=None):
if special is None:
special = {}
for col in Book.COLUMNS:
row_val = row[col]
data_val = data[col]
if col in special:
# specific values that are incorrect in data
assert row_val == special[col]
assert getattr(book, col) == special[col]
elif data_val is None:
# use is comparison for None
assert row_val is None
assert getattr(book, col) is None
else:
assert row_val == data_val
assert getattr(book, col) == data_val
for col in Book.ASSOCIATED_COLUMNS:
if col == "ext_infos":
continue
# look up plural of col to get name of concat assoc col
col_assoc_concat = assoc_concat[col]
row_val = row[col_assoc_concat]
if row_val is not None:
# row_val is concatted values
# need sorted to compare (or use set)
row_val = sorted(row_val.split(";")) if ";" in row_val else [row_val]
# need sorted to compare (or use set)
data_val = sorted(data[col]) if data[col] else None
book_val = getattr(book, col)
book_val = sorted(book_val) if book_val else book_val
if col in special:
# specific values that are incorrect in data
assert row_val == special[col]
assert book_val == special[col]
elif data_val is None:
# assoc col doesnt return None only empty trackable
assert row_val is None
assert book_val == []
else:
assert row_val == data_val
assert book_val == data_val
| 36.960938
| 97
| 0.61023
|
import os
import datetime
import logging
import sqlite3
import pytest
from utils import setup_mdb_dir, all_book_info, load_db_from_sql_file, TESTS_DIR
from manga_db.manga_db import MangaDB
from manga_db.manga import Book
from manga_db.ext_info import ExternalInfo
from manga_db.constants import LANG_IDS
@pytest.mark.parametrize("title_eng, title_foreign, expected", [
("English", "Foreign", "English / Foreign"),
("English", None, "English"),
(None, "Foreign", "Foreign")])
def test_build_title(title_eng, title_foreign, expected):
assert Book.build_title(title_eng, title_foreign) == expected
def test_fetch_extinfo(monkeypatch, setup_mdb_dir):
tmpdir = setup_mdb_dir
os.chdir(tmpdir)
mdb_file = os.path.join(TESTS_DIR, "all_test_files", "manga_db.sqlite.sql")
memdb = load_db_from_sql_file(mdb_file, ":memory:", True)
monkeypatch.setattr("manga_db.manga_db.MangaDB._load_or_create_sql_db",
lambda x, y, z: (memdb, None))
mdb = MangaDB(tmpdir, mdb_file)
b = Book(mdb, in_db=False, id=16)
assert b.ext_infos == []
db_con = memdb
ei_rows_man = db_con.execute("SELECT * FROM ExternalInfo WHERE id IN (16, 18)").fetchall()
ei1 = ExternalInfo(mdb, b, **ei_rows_man[0])
ei2 = ExternalInfo(mdb, b, **ei_rows_man[1])
assert b._fetch_external_infos() == [ei1, ei2]
def test_fetch_assoc_col(monkeypatch, setup_mdb_dir):
tmpdir = setup_mdb_dir
os.chdir(tmpdir)
mdb_file = os.path.join(TESTS_DIR, "all_test_files", "manga_db.sqlite.sql")
memdb = load_db_from_sql_file(mdb_file, ":memory:", True)
monkeypatch.setattr("manga_db.manga_db.MangaDB._load_or_create_sql_db",
lambda x, y, z: (memdb, None))
mdb = MangaDB(tmpdir, mdb_file)
b = Book(mdb, in_db=False, id=14)
tags = ["Ahegao", "Anal", "Collar", "Large Breasts", "Maid", "Mind Break",
"Mind Control", "Nakadashi", "Office Lady", "Pantyhose", "Rape", "Stockings",
"X-ray"]
assert sorted(b._fetch_associated_column("tag")) == sorted(tags)
assert b._fetch_associated_column("character") == []
assert b._fetch_associated_column("artist") == ["Fan no Hitori"]
def test_upd_assoc_col(monkeypatch, setup_mdb_dir):
tmpdir = setup_mdb_dir
os.chdir(tmpdir)
mdb_file = os.path.join(TESTS_DIR, "all_test_files", "manga_db.sqlite.sql")
memdb = load_db_from_sql_file(mdb_file, ":memory:", True)
monkeypatch.setattr("manga_db.manga_db.MangaDB._load_or_create_sql_db",
lambda x, y, z: (memdb, None))
mdb = MangaDB(tmpdir, mdb_file)
db_con = memdb
b = Book(mdb, in_db=False, id=12, last_change=datetime.date.today())
ei_row = db_con.execute("SELECT * FROM ExternalInfo WHERE id = 12").fetchone()
ei = ExternalInfo(mdb, b, **ei_row)
tags = ("Anal;Femdom;Large Breasts;Nakadashi;Straight Shota;Big Ass;Short Hair;Hat"
";Royalty;Dark Skin;Huge Penis;Big Areola;Defloration;Double Penetration;"
"Elder Sister;Tall Girl".split(";"))
artists = ["Kaneda Asou"]
category = ["Doujinshi"]
groups = ["Dokumushi Shokeitai"]
lists = ["to-read"]
assoc_cols = b.get_associated_columns()
assert assoc_cols["tag"] == tags
assert assoc_cols["artist"] == artists
assert assoc_cols["category"] == category
assert assoc_cols["groups"] == groups
assert assoc_cols["list"] == lists
assert assoc_cols["character"] == []
assert assoc_cols["collection"] == []
assert assoc_cols["parody"] == []
assert assoc_cols["ext_infos"] == [ei]
b.tag = ["delchange1", "delchange"]
b.category = ["testcat"]
b.update_assoc_columns_from_db()
assert not b._committed_state
assert b.tag == tags
assert b.artist == artists
assert b.category == category
assert b.groups == groups
assert b.list == lists
assert b.character == []
assert b.collection == []
assert b.parody == []
assert b.ext_infos == [ei]
b = Book(mdb, in_db=False, id=16, last_change=datetime.date.today())
ei_rows = db_con.execute("SELECT * FROM ExternalInfo WHERE id IN (16, 18)").fetchall()
ei1 = ExternalInfo(mdb, b, **ei_rows[0])
ei2 = ExternalInfo(mdb, b, **ei_rows[1])
tags = ("Blowjob;Ahegao;Megane;Happy Sex;Threesome;Group Sex;Layer Cake;Selfcest".split(";"))
artists = ["bariun"]
category = ["Doujinshi"]
characters = ["Akira Kurusu", "Futaba Sakura"]
parodies = ["Persona 5 / ペルソナ5"]
lists = ["to-read"]
assoc_cols = b.get_associated_columns()
assert assoc_cols["tag"] == tags
assert assoc_cols["artist"] == artists
assert assoc_cols["category"] == category
assert assoc_cols["groups"] == []
assert assoc_cols["list"] == lists
assert assoc_cols["character"] == characters
assert assoc_cols["collection"] == []
assert assoc_cols["parody"] == parodies
assert assoc_cols["ext_infos"] == [ei1, ei2]
b.groups = ["delchange1", "delchange"]
b.artist = ["tartist"]
b.update_assoc_columns_from_db()
assert not b._committed_state
assert b.tag == tags
assert b.artist == artists
assert b.category == category
assert b.groups == []
assert b.list == lists
assert b.character == characters
assert b.collection == []
assert b.parody == parodies
assert b.ext_infos == [ei1, ei2]
def test_diff(monkeypatch, setup_mdb_dir):
tmpdir = setup_mdb_dir
os.chdir(tmpdir)
mdb_file = os.path.join(TESTS_DIR, "all_test_files", "manga_db.sqlite.sql")
memdb = load_db_from_sql_file(mdb_file, ":memory:", True)
monkeypatch.setattr("manga_db.manga_db.MangaDB._load_or_create_sql_db",
lambda x, y, z: (memdb, None))
mdb = MangaDB(tmpdir, mdb_file)
b1_data = dict(
id=None,
title_eng="Same",
title_foreign="Different1",
language_id=1,
pages=25,
status_id=1,
my_rating=4.3,
category=["Manga"],
collection=["Diff collection1"],
groups=["Artistgroup"],
artist=["Diff1", "Diff2"],
parody=["Blabla"],
character=["Char1", "Char2", "Char3"],
list=["to-read", "to-download"],
tag=["Tag1", "Tag2", "Tag3"],
ext_infos=None,
last_change=datetime.date(2018, 6, 3),
note=None,
favorite=0
)
b1 = Book(mdb, **b1_data)
b2_data = dict(
id=None,
title_eng="Same",
title_foreign="Different2",
language_id=1,
pages=27,
status_id=1,
my_rating=None,
category=["Manga"],
collection=["Diff collection2"],
groups=["Artistgroup"],
artist=["Diff", "Diff2", "Diff3"],
parody=["Blabla"],
character=["Char1", "Char5", "Char3"],
list=["to-read", "to-download"],
tag=["Tag1", "Tag2", "Tag3"],
ext_infos=None,
last_change=datetime.date(2018, 4, 3),
note=None,
favorite=1
)
b2 = Book(mdb, **b2_data)
changes, change_str = b1.diff(b2)
changes_expected = dict(
title_foreign="Different2",
pages=27,
my_rating=None,
collection=({"Diff collection2"}, {"Diff collection1"}),
artist=({"Diff", "Diff3"}, {"Diff1"}),
character=({"Char5"}, {"Char2"}),
last_change=datetime.date(2018, 4, 3),
favorite=1
)
assert changes == changes_expected
def test_add_rem_assoc(monkeypatch, setup_mdb_dir):
tmpdir = setup_mdb_dir
os.chdir(tmpdir)
mdb_file = os.path.join(TESTS_DIR, "all_test_files", "manga_db.sqlite.sql")
memdb = load_db_from_sql_file(mdb_file, ":memory:", True)
monkeypatch.setattr("manga_db.manga_db.MangaDB._load_or_create_sql_db",
lambda x, y, z: (memdb, None))
mdb = MangaDB(tmpdir, mdb_file)
db_con = memdb
b = mdb.get_book(5)
tag_before = b.tag.copy()
tag_change = ["Test1", "Test2", "Blabla"]
with mdb.db_con:
b._add_associated_column_values("tag", tag_change)
tag = db_con.execute("""
SELECT group_concat(Tag.name, ';')
FROM Books, BookTag bt, Tag
WHERE Books.id = bt.book_id
AND Tag.id = bt.tag_id
AND Books.id = 5""").fetchone()
assert tag[0].split(";")[-3:] == tag_change
with mdb.db_con:
b._remove_associated_column_values("tag", tag_change)
tag = db_con.execute("""
SELECT group_concat(Tag.name, ';')
FROM Books, BookTag bt, Tag
WHERE Books.id = bt.book_id
AND Tag.id = bt.tag_id
AND Books.id = 5""").fetchone()
assert tag[0].split(";") == tag_before
def test_static_db_methods(monkeypatch, setup_mdb_dir):
tmpdir = setup_mdb_dir
os.chdir(tmpdir)
mdb_file = os.path.join(TESTS_DIR, "all_test_files", "manga_db.sqlite.sql")
memdb = load_db_from_sql_file(mdb_file, ":memory:", True)
monkeypatch.setattr("manga_db.manga_db.MangaDB._load_or_create_sql_db",
lambda x, y, z: (memdb, None))
mdb = MangaDB(tmpdir, mdb_file)
db_con = memdb
tag_before = "Large Breasts;Nakadashi;Blowjob;Threesome;Bikini;Group Sex;Swimsuit".split(";")
tag_change = ["Test1", "Test2", "Blabla"]
Book.add_assoc_col_on_book_id(mdb, 13, "tag", tag_change, tag_before)
tag = db_con.execute("""
SELECT group_concat(Tag.name, ';')
FROM Books, BookTag bt, Tag
WHERE Books.id = bt.book_id
AND Tag.id = bt.tag_id
AND Books.id = 13""").fetchone()
assert tag[0].split(";")[-3:] == tag_change
Book.remove_assoc_col_on_book_id(mdb, 13, "tag", tag_change, tag_before + tag_change)
tag = db_con.execute("""
SELECT group_concat(Tag.name, ';')
FROM Books, BookTag bt, Tag
WHERE Books.id = bt.book_id
AND Tag.id = bt.tag_id
AND Books.id = 13""").fetchone()
assert tag[0].split(";") == tag_before
b = mdb.get_book(16)
tag_before = ("Blowjob;Ahegao;Megane;Happy Sex;Threesome;Group Sex;"
"Layer Cake;Selfcest".split(";"))
tag_change = ["Test3", "Test4", "Blablabla"]
Book.add_assoc_col_on_book_id(mdb, 16, "tag", tag_change, tag_before)
tag = db_con.execute("""
SELECT group_concat(Tag.name, ';')
FROM Books, BookTag bt, Tag
WHERE Books.id = bt.book_id
AND Tag.id = bt.tag_id
AND Books.id = 16""").fetchone()
assert tag[0].split(";")[-3:] == tag_change
assert b.tag[-3:] == tag_change
Book.remove_assoc_col_on_book_id(mdb, 16, "tag", tag_change, tag_before + tag_change)
tag = db_con.execute("""
SELECT group_concat(Tag.name, ';')
FROM Books, BookTag bt, Tag
WHERE Books.id = bt.book_id
AND Tag.id = bt.tag_id
AND Books.id = 16""").fetchone()
assert tag[0].split(";") == tag_before
assert b.tag == tag_before
Book.set_favorite_id(mdb, 2, 1)
fav = db_con.execute("SELECT favorite FROM Books WHERE id = 2").fetchone()
assert 1 == fav[0]
b = mdb.get_book(7)
Book.set_favorite_id(mdb, 7, 1)
fav = db_con.execute("SELECT favorite FROM Books WHERE id = 7").fetchone()
assert 1 == fav[0]
assert b.favorite == 1
Book.rate_book_id(mdb, 3, 3.5)
rat = db_con.execute("SELECT my_rating FROM Books WHERE id = 3").fetchone()
assert 3.5 == rat[0]
b = mdb.get_book(8)
Book.rate_book_id(mdb, 8, 4.25)
rat = db_con.execute("SELECT my_rating FROM Books WHERE id = 8").fetchone()
assert 4.25 == rat[0]
assert b.my_rating == 4.25
def test_remove_book(monkeypatch, setup_mdb_dir):
tmpdir = setup_mdb_dir
os.chdir(tmpdir)
mdb_file = os.path.join(TESTS_DIR, "all_test_files", "manga_db.sqlite.sql")
memdb = load_db_from_sql_file(mdb_file, ":memory:", True)
monkeypatch.setattr("manga_db.manga_db.MangaDB._load_or_create_sql_db",
lambda x, y, z: (memdb, None))
mdb = MangaDB(tmpdir, mdb_file)
import shutil
os.makedirs(os.path.join(tmpdir, "thumbs"))
cover_path = os.path.join(tmpdir, "thumbs", "16")
shutil.copyfile(os.path.join(tmpdir, os.pardir, "book_test_files", "16"), cover_path)
db_con = memdb
b = mdb.get_book(16)
b.remove()
assert b._in_db is False
with pytest.raises(KeyError):
mdb.id_map[b.key]
b_row = db_con.execute("SELECT id FROM Books WHERE id = 16").fetchall()
assert not b_row
ei_rows = db_con.execute("SELECT id FROM ExternalInfo WHERE id IN (16, 18)").fetchall()
assert not ei_rows
assert not os.path.exists(cover_path)
def test_remove_extinfo(monkeypatch, setup_mdb_dir, caplog):
tmpdir = setup_mdb_dir
os.chdir(tmpdir)
mdb_file = os.path.join(TESTS_DIR, "all_test_files", "manga_db.sqlite.sql")
memdb = load_db_from_sql_file(mdb_file, ":memory:", True)
monkeypatch.setattr("manga_db.manga_db.MangaDB._load_or_create_sql_db",
lambda x, y, z: (memdb, None))
mdb = MangaDB(tmpdir, mdb_file)
b = mdb.get_book(16)
caplog.clear()
assert b.remove_ext_info(99) is None
assert caplog.record_tuples == [
("manga_db.manga", logging.ERROR, "No external info with id 99 found!")
]
assert b.remove_ext_info(18) == "https://www.tsumino.com/entry/43454"
assert len(b.ext_infos) == 1
assert b.ext_infos[0].id == 16
assert b.remove_ext_info(16)
assert not b.ext_infos
caplog.clear()
assert b.remove_ext_info(4939) is None
assert caplog.record_tuples == [
("manga_db.manga", logging.WARNING, "No external infos on book with id 16 or not"
" fetched from DB yet!")
]
def test_save_book(monkeypatch, setup_mdb_dir, caplog):
tmpdir = setup_mdb_dir
os.chdir(tmpdir)
mdb_file = os.path.join(TESTS_DIR, "all_test_files", "manga_db.sqlite.sql")
memdb = load_db_from_sql_file(mdb_file, ":memory:", True)
monkeypatch.setattr("manga_db.manga_db.MangaDB._load_or_create_sql_db",
lambda x, y, z: (memdb, None))
mdb = MangaDB(tmpdir, mdb_file)
db_con = memdb
# _add
ei_data = dict(
id=None,
book_id=None,
url="http://test1.com",
id_onpage='1111',
imported_from=1,
upload_date=datetime.date(2018, 4, 13),
uploader="Uploader",
censor_id=1,
rating=4.19,
ratings=165,
favorites=300,
downloaded=None,
last_update=None,
outdated=None,
)
b1_data = dict(
id=None,
title_eng="Add1",
title_foreign="Foreign1",
language_id=1,
pages=25,
chapter_status="Vol. 2 Ch. 14",
read_status=13,
status_id=1,
my_rating=None,
category=["Manga"],
collection=None,
groups=["Artistgroup"],
artist=["Diff1", "Diff2"],
parody=["Blabla"],
character=["Char1", "Char2", "Char3"],
list=["to-read", "to-download"],
tag=["Tag1", "Tag2", "Tag3"],
ext_infos=None,
last_change=datetime.date(2018, 6, 3),
note=None,
favorite=None,
cover_timestamp=None,
nsfw=1
)
b1 = Book(mdb, **b1_data)
# since we later check that cover_timestamp gets saved as 0.0 if None
b1_data['cover_timestamp'] = 0.0
ei1 = ExternalInfo(mdb, b1, **ei_data)
ei2 = ExternalInfo(mdb, b1, **ei_data)
# will outdate extinfo 8
ei2.id_onpage = '43506'
b1.ext_infos = [ei1, ei2]
assert b1._in_db is False
bid, outdated = b1.save()
assert bid == 18
assert b1.id == 18
# in_db + id_map, committed reset
assert b1._in_db is True
assert mdb.id_map[b1.key] is b1
assert not b1._committed_state
book_info_db = all_book_info(db_con, 18, include_id=True)
assert len(book_info_db) == 2
# fav set correctly
assert book_info_db[0]["favorite"] == 0
assert b1.favorite == 0
compare_cols_row_book_data(b1, book_info_db[0], b1_data, special={"favorite": 0})
# outdated, list of ext info ids that outdated others
assert outdated == [20]
# extinfo saved
eis = db_con.execute("SELECT id, book_id, id_onpage FROM ExternalInfo "
"WHERE id > 18").fetchall()
assert len(eis) == 2
assert eis[0]["book_id"] == 18
assert eis[1]["book_id"] == 18
assert eis[0]["id_onpage"] == '1111'
assert eis[1]["id_onpage"] == '43506'
# add book with new lang
b2 = Book(mdb, title_eng="Test2", favorite=1, pages=11, status_id=1, nsfw=0)
b2.language = "Krababbl"
bid, _ = b2.save()
assert bid == 19
assert b2.id == 19
# /2 since we have double indirection id->name name->id
expected_lang_id = len(LANG_IDS) / 2 + 1
assert b2.language_id == expected_lang_id
lang = db_con.execute("SELECT id FROM Languages WHERE name = 'Krababbl'").fetchall()
assert lang
assert lang[0][0] == expected_lang_id
brow = db_con.execute("SELECT title_eng, favorite FROM Books WHERE id = 19").fetchone()
assert brow[0] == "Test2"
assert brow["favorite"] == 1
assert b2.favorite == 1
assert b2._in_db is True
assert not b2._committed_state
assert mdb.id_map[b2.key] is b2
# _update
bu1 = Book(mdb, id=None, title_eng="Kangofu-san ni Kintama Sakusei Saremashita",
title_foreign="看護婦さんにキンタマ搾精されました", in_db=False)
bu1.in_db = True
# test not updating when block_update kwarg is true
caplog.clear()
assert bu1.save(block_update=True) == (None, None)
assert caplog.record_tuples == [
("manga_db.manga", logging.DEBUG,
f"Book was found in DB(id 15) but saving was blocked due to "
"block_update option!")
]
bu2 = mdb.get_book(11)
# dont do anything if no changes
caplog.clear()
assert not bu2._committed_state
assert bu2.save() == (11, None)
assert caplog.record_tuples == [
("manga_db.manga", logging.DEBUG, "No changes to save for book with id 11")
]
assert not bu2._committed_state
before = bu2.export_for_db()
# empty assoc list to None
before.update({col: getattr(bu2, col) if getattr(bu2, col) else None
for col in bu2.ASSOCIATED_COLUMNS})
bu2.language = "adlalad"
change = {
"title_eng": "Altered",
"language_id": 3,
"my_rating": 4.75,
"favorite": 1,
# removed and added
"tag": ("Large Breasts;Test33;Nakadashi;Ahegao;Gender Bender;Dark Skin;Elf;Body Swap"
";Bondage;Filming;Test Tag".split(";")),
# added
"artist": ["Taniguchi-san", "Newartist"],
# same
"category": ["Manga"],
# none added
"character": ["Char111", "Char222"]
}
bu2.update_from_dict(change)
before.update(change)
bid, _ = bu2.save()
book_info_db = all_book_info(db_con, 11, include_id=True)
compare_cols_row_book_data(bu2, book_info_db, before,
special={"last_change": datetime.date.today()})
# committed reset
assert not bu2._committed_state
# last_change
assert bu2.last_change == datetime.date.today()
assert book_info_db["last_change"] == datetime.date.today()
bu3 = mdb.get_book(7)
assert not bu3._committed_state
before = bu3.export_for_db()
# empty assoc list to None
before.update({col: getattr(bu3, col) if getattr(bu3, col) else None
for col in bu3.ASSOCIATED_COLUMNS})
change = {
"title_foreign": "ForeignAltered",
"pages": 13,
"note": "Note blabla",
# set None
"tag": None,
# set None
"artist": None,
# changed
"category": ["Manga"],
# none added
"collection": ["Col1", "Col2"],
"groups": ["Grp1", "Grp2", "Senpenbankashiki"]
}
bu3.update_from_dict(change)
before.update(change)
bid, _ = bu3.save()
book_info_db = all_book_info(db_con, 7, include_id=True)
compare_cols_row_book_data(bu3, book_info_db, before,
special={"last_change": datetime.date.today()})
# committed reset
assert not bu3._committed_state
# last_change
assert bu3.last_change == datetime.date.today()
assert book_info_db["last_change"] == datetime.date.today()
assoc_concat = {
"tag": "tags", "artist": "artists", "category": "categories", "character": "characters",
"collection": "collections", "groups": "groups", "list": "lists", "parody": "parodies"
}
def compare_cols_row_book_data(book, row, data, special=None):
if special is None:
special = {}
for col in Book.COLUMNS:
row_val = row[col]
data_val = data[col]
if col in special:
# specific values that are incorrect in data
assert row_val == special[col]
assert getattr(book, col) == special[col]
elif data_val is None:
# use is comparison for None
assert row_val is None
assert getattr(book, col) is None
else:
assert row_val == data_val
assert getattr(book, col) == data_val
for col in Book.ASSOCIATED_COLUMNS:
if col == "ext_infos":
continue
# look up plural of col to get name of concat assoc col
col_assoc_concat = assoc_concat[col]
row_val = row[col_assoc_concat]
if row_val is not None:
# row_val is concatted values
# need sorted to compare (or use set)
row_val = sorted(row_val.split(";")) if ";" in row_val else [row_val]
# need sorted to compare (or use set)
data_val = sorted(data[col]) if data[col] else None
book_val = getattr(book, col)
book_val = sorted(book_val) if book_val else book_val
if col in special:
# specific values that are incorrect in data
assert row_val == special[col]
assert book_val == special[col]
elif data_val is None:
# assoc col doesnt return None only empty trackable
assert row_val is None
assert book_val == []
else:
assert row_val == data_val
assert book_val == data_val
| true
| true
|
79055b433e8de7ec996a07b3b57b7d4a49623c67
| 40,771
|
py
|
Python
|
utils/analyzer/exploded-graph-rewriter.py
|
Alan-love/clang
|
aa231e4be75ac4759c236b755c57876f76e3cf05
|
[
"Apache-2.0"
] | 3,102
|
2015-01-04T02:28:35.000Z
|
2022-03-30T12:53:41.000Z
|
utils/analyzer/exploded-graph-rewriter.py
|
Alan-love/clang
|
aa231e4be75ac4759c236b755c57876f76e3cf05
|
[
"Apache-2.0"
] | 31
|
2015-01-27T20:39:41.000Z
|
2020-04-23T16:24:20.000Z
|
utils/analyzer/exploded-graph-rewriter.py
|
Alan-love/clang
|
aa231e4be75ac4759c236b755c57876f76e3cf05
|
[
"Apache-2.0"
] | 1,868
|
2015-01-03T04:27:11.000Z
|
2022-03-25T13:37:35.000Z
|
#!/usr/bin/env python
#
#===- exploded-graph-rewriter.py - ExplodedGraph dump tool -----*- python -*--#
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===-----------------------------------------------------------------------===#
from __future__ import print_function
import argparse
import collections
import difflib
import json
import logging
import os
import re
#===-----------------------------------------------------------------------===#
# These data structures represent a deserialized ExplodedGraph.
#===-----------------------------------------------------------------------===#
# A helper function for finding the difference between two dictionaries.
def diff_dicts(curr, prev):
removed = [k for k in prev if k not in curr or curr[k] != prev[k]]
added = [k for k in curr if k not in prev or curr[k] != prev[k]]
return (removed, added)
# Represents any program state trait that is a dictionary of key-value pairs.
class GenericMap(object):
def __init__(self, items):
self.generic_map = collections.OrderedDict(items)
def diff(self, prev):
return diff_dicts(self.generic_map, prev.generic_map)
def is_different(self, prev):
removed, added = self.diff(prev)
return len(removed) != 0 or len(added) != 0
# A deserialized source location.
class SourceLocation(object):
def __init__(self, json_loc):
super(SourceLocation, self).__init__()
logging.debug('json: %s' % json_loc)
self.line = json_loc['line']
self.col = json_loc['column']
self.filename = os.path.basename(json_loc['file']) \
if 'file' in json_loc else '(main file)'
self.spelling = SourceLocation(json_loc['spelling']) \
if 'spelling' in json_loc else None
def is_macro(self):
return self.spelling is not None
# A deserialized program point.
class ProgramPoint(object):
def __init__(self, json_pp):
super(ProgramPoint, self).__init__()
self.kind = json_pp['kind']
self.tag = json_pp['tag']
self.node_id = json_pp['node_id']
self.is_sink = bool(json_pp['is_sink'])
self.has_report = bool(json_pp['has_report'])
if self.kind == 'Edge':
self.src_id = json_pp['src_id']
self.dst_id = json_pp['dst_id']
elif self.kind == 'Statement':
logging.debug(json_pp)
self.stmt_kind = json_pp['stmt_kind']
self.cast_kind = json_pp['cast_kind'] \
if 'cast_kind' in json_pp else None
self.stmt_point_kind = json_pp['stmt_point_kind']
self.stmt_id = json_pp['stmt_id']
self.pointer = json_pp['pointer']
self.pretty = json_pp['pretty']
self.loc = SourceLocation(json_pp['location']) \
if json_pp['location'] is not None else None
elif self.kind == 'BlockEntrance':
self.block_id = json_pp['block_id']
# A single expression acting as a key in a deserialized Environment.
class EnvironmentBindingKey(object):
def __init__(self, json_ek):
super(EnvironmentBindingKey, self).__init__()
# CXXCtorInitializer is not a Stmt!
self.stmt_id = json_ek['stmt_id'] if 'stmt_id' in json_ek \
else json_ek['init_id']
self.pretty = json_ek['pretty']
self.kind = json_ek['kind'] if 'kind' in json_ek else None
def _key(self):
return self.stmt_id
def __eq__(self, other):
return self._key() == other._key()
def __hash__(self):
return hash(self._key())
# Deserialized description of a location context.
class LocationContext(object):
def __init__(self, json_frame):
super(LocationContext, self).__init__()
self.lctx_id = json_frame['lctx_id']
self.caption = json_frame['location_context']
self.decl = json_frame['calling']
self.loc = SourceLocation(json_frame['location']) \
if json_frame['location'] is not None else None
def _key(self):
return self.lctx_id
def __eq__(self, other):
return self._key() == other._key()
def __hash__(self):
return hash(self._key())
# A group of deserialized Environment bindings that correspond to a specific
# location context.
class EnvironmentFrame(object):
def __init__(self, json_frame):
super(EnvironmentFrame, self).__init__()
self.location_context = LocationContext(json_frame)
self.bindings = collections.OrderedDict(
[(EnvironmentBindingKey(b),
b['value']) for b in json_frame['items']]
if json_frame['items'] is not None else [])
def diff_bindings(self, prev):
return diff_dicts(self.bindings, prev.bindings)
def is_different(self, prev):
removed, added = self.diff_bindings(prev)
return len(removed) != 0 or len(added) != 0
# A deserialized Environment. This class can also hold other entities that
# are similar to Environment, such as Objects Under Construction.
class GenericEnvironment(object):
def __init__(self, json_e):
super(GenericEnvironment, self).__init__()
self.frames = [EnvironmentFrame(f) for f in json_e]
def diff_frames(self, prev):
# TODO: It's difficult to display a good diff when frame numbers shift.
if len(self.frames) != len(prev.frames):
return None
updated = []
for i in range(len(self.frames)):
f = self.frames[i]
prev_f = prev.frames[i]
if f.location_context == prev_f.location_context:
if f.is_different(prev_f):
updated.append(i)
else:
# We have the whole frame replaced with another frame.
# TODO: Produce a nice diff.
return None
# TODO: Add support for added/removed.
return updated
def is_different(self, prev):
updated = self.diff_frames(prev)
return updated is None or len(updated) > 0
# A single binding key in a deserialized RegionStore cluster.
class StoreBindingKey(object):
def __init__(self, json_sk):
super(StoreBindingKey, self).__init__()
self.kind = json_sk['kind']
self.offset = json_sk['offset']
def _key(self):
return (self.kind, self.offset)
def __eq__(self, other):
return self._key() == other._key()
def __hash__(self):
return hash(self._key())
# A single cluster of the deserialized RegionStore.
class StoreCluster(object):
def __init__(self, json_sc):
super(StoreCluster, self).__init__()
self.base_region = json_sc['cluster']
self.bindings = collections.OrderedDict(
[(StoreBindingKey(b), b['value']) for b in json_sc['items']])
def diff_bindings(self, prev):
return diff_dicts(self.bindings, prev.bindings)
def is_different(self, prev):
removed, added = self.diff_bindings(prev)
return len(removed) != 0 or len(added) != 0
# A deserialized RegionStore.
class Store(object):
def __init__(self, json_s):
super(Store, self).__init__()
self.ptr = json_s['pointer']
self.clusters = collections.OrderedDict(
[(c['pointer'], StoreCluster(c)) for c in json_s['items']])
def diff_clusters(self, prev):
removed = [k for k in prev.clusters if k not in self.clusters]
added = [k for k in self.clusters if k not in prev.clusters]
updated = [k for k in prev.clusters if k in self.clusters
and prev.clusters[k].is_different(self.clusters[k])]
return (removed, added, updated)
def is_different(self, prev):
removed, added, updated = self.diff_clusters(prev)
return len(removed) != 0 or len(added) != 0 or len(updated) != 0
# Deserialized messages from a single checker in a single program state.
# Basically a list of raw strings.
class CheckerLines(object):
def __init__(self, json_lines):
super(CheckerLines, self).__init__()
self.lines = json_lines
def diff_lines(self, prev):
lines = difflib.ndiff(prev.lines, self.lines)
return [l.strip() for l in lines
if l.startswith('+') or l.startswith('-')]
def is_different(self, prev):
return len(self.diff_lines(prev)) > 0
# Deserialized messages of all checkers, separated by checker.
class CheckerMessages(object):
def __init__(self, json_m):
super(CheckerMessages, self).__init__()
self.items = collections.OrderedDict(
[(m['checker'], CheckerLines(m['messages'])) for m in json_m])
def diff_messages(self, prev):
removed = [k for k in prev.items if k not in self.items]
added = [k for k in self.items if k not in prev.items]
updated = [k for k in prev.items if k in self.items
and prev.items[k].is_different(self.items[k])]
return (removed, added, updated)
def is_different(self, prev):
removed, added, updated = self.diff_messages(prev)
return len(removed) != 0 or len(added) != 0 or len(updated) != 0
# A deserialized program state.
class ProgramState(object):
def __init__(self, state_id, json_ps):
super(ProgramState, self).__init__()
logging.debug('Adding ProgramState ' + str(state_id))
if json_ps is None:
json_ps = {
'store': None,
'environment': None,
'constraints': None,
'dynamic_types': None,
'constructing_objects': None,
'checker_messages': None
}
self.state_id = state_id
self.store = Store(json_ps['store']) \
if json_ps['store'] is not None else None
self.environment = \
GenericEnvironment(json_ps['environment']['items']) \
if json_ps['environment'] is not None else None
self.constraints = GenericMap([
(c['symbol'], c['range']) for c in json_ps['constraints']
]) if json_ps['constraints'] is not None else None
self.dynamic_types = GenericMap([
(t['region'], '%s%s' % (t['dyn_type'],
' (or a sub-class)'
if t['sub_classable'] else ''))
for t in json_ps['dynamic_types']]) \
if json_ps['dynamic_types'] is not None else None
self.constructing_objects = \
GenericEnvironment(json_ps['constructing_objects']) \
if json_ps['constructing_objects'] is not None else None
self.checker_messages = CheckerMessages(json_ps['checker_messages']) \
if json_ps['checker_messages'] is not None else None
# A deserialized exploded graph node. Has a default constructor because it
# may be referenced as part of an edge before its contents are deserialized,
# and in this moment we already need a room for predecessors and successors.
class ExplodedNode(object):
def __init__(self):
super(ExplodedNode, self).__init__()
self.predecessors = []
self.successors = []
def construct(self, node_id, json_node):
logging.debug('Adding ' + node_id)
self.ptr = node_id[4:]
self.points = [ProgramPoint(p) for p in json_node['program_points']]
self.node_id = self.points[-1].node_id
self.state = ProgramState(json_node['state_id'],
json_node['program_state']
if json_node['program_state'] is not None else None);
assert self.node_name() == node_id
def node_name(self):
return 'Node' + self.ptr
# A deserialized ExplodedGraph. Constructed by consuming a .dot file
# line-by-line.
class ExplodedGraph(object):
# Parse .dot files with regular expressions.
node_re = re.compile(
'^(Node0x[0-9a-f]*) \\[shape=record,.*label="{(.*)\\\\l}"\\];$')
edge_re = re.compile(
'^(Node0x[0-9a-f]*) -> (Node0x[0-9a-f]*);$')
def __init__(self):
super(ExplodedGraph, self).__init__()
self.nodes = collections.defaultdict(ExplodedNode)
self.root_id = None
self.incomplete_line = ''
def add_raw_line(self, raw_line):
if raw_line.startswith('//'):
return
# Allow line breaks by waiting for ';'. This is not valid in
# a .dot file, but it is useful for writing tests.
if len(raw_line) > 0 and raw_line[-1] != ';':
self.incomplete_line += raw_line
return
raw_line = self.incomplete_line + raw_line
self.incomplete_line = ''
# Apply regexps one by one to see if it's a node or an edge
# and extract contents if necessary.
logging.debug('Line: ' + raw_line)
result = self.edge_re.match(raw_line)
if result is not None:
logging.debug('Classified as edge line.')
pred = result.group(1)
succ = result.group(2)
self.nodes[pred].successors.append(succ)
self.nodes[succ].predecessors.append(pred)
return
result = self.node_re.match(raw_line)
if result is not None:
logging.debug('Classified as node line.')
node_id = result.group(1)
if len(self.nodes) == 0:
self.root_id = node_id
# Note: when writing tests you don't need to escape everything,
# even though in a valid dot file everything is escaped.
node_label = result.group(2).replace('\\l', '') \
.replace(' ', '') \
.replace('\\"', '"') \
.replace('\\{', '{') \
.replace('\\}', '}') \
.replace('\\\\', '\\') \
.replace('\\|', '|') \
.replace('\\<', '\\\\<') \
.replace('\\>', '\\\\>') \
.rstrip(',')
logging.debug(node_label)
json_node = json.loads(node_label)
self.nodes[node_id].construct(node_id, json_node)
return
logging.debug('Skipping.')
#===-----------------------------------------------------------------------===#
# Visitors traverse a deserialized ExplodedGraph and do different things
# with every node and edge.
#===-----------------------------------------------------------------------===#
# A visitor that dumps the ExplodedGraph into a DOT file with fancy HTML-based
# syntax highlighing.
class DotDumpVisitor(object):
def __init__(self, do_diffs, dark_mode, gray_mode,
topo_mode, dump_dot_only):
super(DotDumpVisitor, self).__init__()
self._do_diffs = do_diffs
self._dark_mode = dark_mode
self._gray_mode = gray_mode
self._topo_mode = topo_mode
self._dump_dot_only = dump_dot_only
self._output = []
def _dump_raw(self, s):
if self._dump_dot_only:
print(s, end='')
else:
self._output.append(s)
def output(self):
assert not self._dump_dot_only
return ''.join(self._output)
def _dump(self, s):
s = s.replace('&', '&') \
.replace('{', '\\{') \
.replace('}', '\\}') \
.replace('\\<', '<') \
.replace('\\>', '>') \
.replace('\\l', '<br />') \
.replace('|', '\\|')
if self._gray_mode:
s = re.sub(r'<font color="[a-z0-9]*">', '', s)
s = re.sub(r'</font>', '', s)
self._dump_raw(s)
@staticmethod
def _diff_plus_minus(is_added):
if is_added is None:
return ''
if is_added:
return '<font color="forestgreen">+</font>'
return '<font color="red">-</font>'
@staticmethod
def _short_pretty(s):
if s is None:
return None
if len(s) < 20:
return s
left = s.find('{')
right = s.rfind('}')
if left == -1 or right == -1 or left >= right:
return s
candidate = s[0:left + 1] + ' ... ' + s[right:]
if len(candidate) >= len(s):
return s
return candidate
@staticmethod
def _make_sloc(loc):
if loc is None:
return '<i>Invalid Source Location</i>'
def make_plain_loc(loc):
return '%s:<b>%s</b>:<b>%s</b>' \
% (loc.filename, loc.line, loc.col)
if loc.is_macro():
return '%s <font color="royalblue1">' \
'(<i>spelling at </i> %s)</font>' \
% (make_plain_loc(loc), make_plain_loc(loc.spelling))
return make_plain_loc(loc)
def visit_begin_graph(self, graph):
self._graph = graph
self._dump_raw('digraph "ExplodedGraph" {\n')
if self._dark_mode:
self._dump_raw('bgcolor="gray10";\n')
self._dump_raw('label="";\n')
def visit_program_point(self, p):
if p.kind in ['Edge', 'BlockEntrance', 'BlockExit']:
color = 'gold3'
elif p.kind in ['PreStmtPurgeDeadSymbols',
'PostStmtPurgeDeadSymbols']:
color = 'red'
elif p.kind in ['CallEnter', 'CallExitBegin', 'CallExitEnd']:
color = 'dodgerblue' if self._dark_mode else 'blue'
elif p.kind in ['Statement']:
color = 'cyan4'
else:
color = 'forestgreen'
self._dump('<tr><td align="left">%s.</td>' % p.node_id)
if p.kind == 'Statement':
# This avoids pretty-printing huge statements such as CompoundStmt.
# Such statements show up only at [Pre|Post]StmtPurgeDeadSymbols
skip_pretty = 'PurgeDeadSymbols' in p.stmt_point_kind
stmt_color = 'cyan3'
self._dump('<td align="left" width="0">%s:</td>'
'<td align="left" width="0"><font color="%s">'
'%s</font> </td>'
'<td align="left"><i>S%s</i></td>'
'<td align="left"><font color="%s">%s</font></td>'
'<td align="left">%s</td></tr>'
% (self._make_sloc(p.loc), color,
'%s (%s)' % (p.stmt_kind, p.cast_kind)
if p.cast_kind is not None else p.stmt_kind,
p.stmt_id, stmt_color, p.stmt_point_kind,
self._short_pretty(p.pretty)
if not skip_pretty else ''))
elif p.kind == 'Edge':
self._dump('<td width="0"></td>'
'<td align="left" width="0">'
'<font color="%s">%s</font></td><td align="left">'
'[B%d] -\\> [B%d]</td></tr>'
% (color, 'BlockEdge', p.src_id, p.dst_id))
elif p.kind == 'BlockEntrance':
self._dump('<td width="0"></td>'
'<td align="left" width="0">'
'<font color="%s">%s</font></td>'
'<td align="left">[B%d]</td></tr>'
% (color, p.kind, p.block_id))
else:
# TODO: Print more stuff for other kinds of points.
self._dump('<td width="0"></td>'
'<td align="left" width="0" colspan="2">'
'<font color="%s">%s</font></td></tr>'
% (color, p.kind))
if p.tag is not None:
self._dump('<tr><td width="0"></td><td width="0"></td>'
'<td colspan="3" align="left">'
'<b>Tag: </b> <font color="crimson">'
'%s</font></td></tr>' % p.tag)
if p.has_report:
self._dump('<tr><td width="0"></td><td width="0"></td>'
'<td colspan="3" align="left">'
'<font color="red"><b>Bug Report Attached'
'</b></font></td></tr>')
if p.is_sink:
self._dump('<tr><td width="0"></td><td width="0"></td>'
'<td colspan="3" align="left">'
'<font color="cornflowerblue"><b>Sink Node'
'</b></font></td></tr>')
def visit_environment(self, e, prev_e=None):
self._dump('<table border="0">')
def dump_location_context(lc, is_added=None):
self._dump('<tr><td>%s</td>'
'<td align="left"><b>%s</b></td>'
'<td align="left" colspan="2">'
'<font color="gray60">%s </font>'
'%s</td></tr>'
% (self._diff_plus_minus(is_added),
lc.caption, lc.decl,
('(%s)' % self._make_sloc(lc.loc))
if lc.loc is not None else ''))
def dump_binding(f, b, is_added=None):
self._dump('<tr><td>%s</td>'
'<td align="left"><i>S%s</i></td>'
'%s'
'<td align="left">%s</td>'
'<td align="left">%s</td></tr>'
% (self._diff_plus_minus(is_added),
b.stmt_id,
'<td align="left"><font color="%s"><i>'
'%s</i></font></td>' % (
'lavender' if self._dark_mode else 'darkgreen',
('(%s)' % b.kind) if b.kind is not None else ' '
),
self._short_pretty(b.pretty), f.bindings[b]))
frames_updated = e.diff_frames(prev_e) if prev_e is not None else None
if frames_updated:
for i in frames_updated:
f = e.frames[i]
prev_f = prev_e.frames[i]
dump_location_context(f.location_context)
bindings_removed, bindings_added = f.diff_bindings(prev_f)
for b in bindings_removed:
dump_binding(prev_f, b, False)
for b in bindings_added:
dump_binding(f, b, True)
else:
for f in e.frames:
dump_location_context(f.location_context)
for b in f.bindings:
dump_binding(f, b)
self._dump('</table>')
def visit_environment_in_state(self, selector, title, s, prev_s=None):
e = getattr(s, selector)
prev_e = getattr(prev_s, selector) if prev_s is not None else None
if e is None and prev_e is None:
return
self._dump('<hr /><tr><td align="left"><b>%s: </b>' % title)
if e is None:
self._dump('<i> Nothing!</i>')
else:
if prev_e is not None:
if e.is_different(prev_e):
self._dump('</td></tr><tr><td align="left">')
self.visit_environment(e, prev_e)
else:
self._dump('<i> No changes!</i>')
else:
self._dump('</td></tr><tr><td align="left">')
self.visit_environment(e)
self._dump('</td></tr>')
def visit_store(self, s, prev_s=None):
self._dump('<table border="0">')
def dump_binding(s, c, b, is_added=None):
self._dump('<tr><td>%s</td>'
'<td align="left">%s</td>'
'<td align="left">%s</td>'
'<td align="left">%s</td>'
'<td align="left">%s</td></tr>'
% (self._diff_plus_minus(is_added),
s.clusters[c].base_region, b.offset,
'(<i>Default</i>)' if b.kind == 'Default'
else '',
s.clusters[c].bindings[b]))
if prev_s is not None:
clusters_removed, clusters_added, clusters_updated = \
s.diff_clusters(prev_s)
for c in clusters_removed:
for b in prev_s.clusters[c].bindings:
dump_binding(prev_s, c, b, False)
for c in clusters_updated:
bindings_removed, bindings_added = \
s.clusters[c].diff_bindings(prev_s.clusters[c])
for b in bindings_removed:
dump_binding(prev_s, c, b, False)
for b in bindings_added:
dump_binding(s, c, b, True)
for c in clusters_added:
for b in s.clusters[c].bindings:
dump_binding(s, c, b, True)
else:
for c in s.clusters:
for b in s.clusters[c].bindings:
dump_binding(s, c, b)
self._dump('</table>')
def visit_store_in_state(self, s, prev_s=None):
st = s.store
prev_st = prev_s.store if prev_s is not None else None
if st is None and prev_st is None:
return
self._dump('<hr /><tr><td align="left"><b>Store: </b>')
if st is None:
self._dump('<i> Nothing!</i>')
else:
if self._dark_mode:
self._dump(' <font color="gray30">(%s)</font>' % st.ptr)
else:
self._dump(' <font color="gray">(%s)</font>' % st.ptr)
if prev_st is not None:
if s.store.is_different(prev_st):
self._dump('</td></tr><tr><td align="left">')
self.visit_store(st, prev_st)
else:
self._dump('<i> No changes!</i>')
else:
self._dump('</td></tr><tr><td align="left">')
self.visit_store(st)
self._dump('</td></tr>')
def visit_generic_map(self, m, prev_m=None):
self._dump('<table border="0">')
def dump_pair(m, k, is_added=None):
self._dump('<tr><td>%s</td>'
'<td align="left">%s</td>'
'<td align="left">%s</td></tr>'
% (self._diff_plus_minus(is_added),
k, m.generic_map[k]))
if prev_m is not None:
removed, added = m.diff(prev_m)
for k in removed:
dump_pair(prev_m, k, False)
for k in added:
dump_pair(m, k, True)
else:
for k in m.generic_map:
dump_pair(m, k, None)
self._dump('</table>')
def visit_generic_map_in_state(self, selector, title, s, prev_s=None):
m = getattr(s, selector)
prev_m = getattr(prev_s, selector) if prev_s is not None else None
if m is None and prev_m is None:
return
self._dump('<hr />')
self._dump('<tr><td align="left">'
'<b>%s: </b>' % title)
if m is None:
self._dump('<i> Nothing!</i>')
else:
if prev_m is not None:
if m.is_different(prev_m):
self._dump('</td></tr><tr><td align="left">')
self.visit_generic_map(m, prev_m)
else:
self._dump('<i> No changes!</i>')
else:
self._dump('</td></tr><tr><td align="left">')
self.visit_generic_map(m)
self._dump('</td></tr>')
def visit_checker_messages(self, m, prev_m=None):
self._dump('<table border="0">')
def dump_line(l, is_added=None):
self._dump('<tr><td>%s</td>'
'<td align="left">%s</td></tr>'
% (self._diff_plus_minus(is_added), l))
def dump_chk(chk, is_added=None):
dump_line('<i>%s</i>:' % chk, is_added)
if prev_m is not None:
removed, added, updated = m.diff_messages(prev_m)
for chk in removed:
dump_chk(chk, False)
for l in prev_m.items[chk].lines:
dump_line(l, False)
for chk in updated:
dump_chk(chk)
for l in m.items[chk].diff_lines(prev_m.items[chk]):
dump_line(l[1:], l.startswith('+'))
for chk in added:
dump_chk(chk, True)
for l in m.items[chk].lines:
dump_line(l, True)
else:
for chk in m.items:
dump_chk(chk)
for l in m.items[chk].lines:
dump_line(l)
self._dump('</table>')
def visit_checker_messages_in_state(self, s, prev_s=None):
m = s.checker_messages
prev_m = prev_s.checker_messages if prev_s is not None else None
if m is None and prev_m is None:
return
self._dump('<hr />')
self._dump('<tr><td align="left">'
'<b>Checker State: </b>')
if m is None:
self._dump('<i> Nothing!</i>')
else:
if prev_m is not None:
if m.is_different(prev_m):
self._dump('</td></tr><tr><td align="left">')
self.visit_checker_messages(m, prev_m)
else:
self._dump('<i> No changes!</i>')
else:
self._dump('</td></tr><tr><td align="left">')
self.visit_checker_messages(m)
self._dump('</td></tr>')
def visit_state(self, s, prev_s):
self.visit_store_in_state(s, prev_s)
self.visit_environment_in_state('environment', 'Expressions',
s, prev_s)
self.visit_generic_map_in_state('constraints', 'Ranges',
s, prev_s)
self.visit_generic_map_in_state('dynamic_types', 'Dynamic Types',
s, prev_s)
self.visit_environment_in_state('constructing_objects',
'Objects Under Construction',
s, prev_s)
self.visit_checker_messages_in_state(s, prev_s)
def visit_node(self, node):
self._dump('%s [shape=record,'
% (node.node_name()))
if self._dark_mode:
self._dump('color="white",fontcolor="gray80",')
self._dump('label=<<table border="0">')
self._dump('<tr><td bgcolor="%s"><b>State %s</b></td></tr>'
% ("gray20" if self._dark_mode else "gray70",
node.state.state_id
if node.state is not None else 'Unspecified'))
if not self._topo_mode:
self._dump('<tr><td align="left" width="0">')
if len(node.points) > 1:
self._dump('<b>Program points:</b></td></tr>')
else:
self._dump('<b>Program point:</b></td></tr>')
self._dump('<tr><td align="left" width="0">'
'<table border="0" align="left" width="0">')
for p in node.points:
self.visit_program_point(p)
self._dump('</table></td></tr>')
if node.state is not None and not self._topo_mode:
prev_s = None
# Do diffs only when we have a unique predecessor.
# Don't do diffs on the leaf nodes because they're
# the important ones.
if self._do_diffs and len(node.predecessors) == 1 \
and len(node.successors) > 0:
prev_s = self._graph.nodes[node.predecessors[0]].state
self.visit_state(node.state, prev_s)
self._dump_raw('</table>>];\n')
def visit_edge(self, pred, succ):
self._dump_raw('%s -> %s%s;\n' % (
pred.node_name(), succ.node_name(),
' [color="white"]' if self._dark_mode else ''
))
def visit_end_of_graph(self):
self._dump_raw('}\n')
if not self._dump_dot_only:
import sys
import tempfile
def write_temp_file(suffix, data):
fd, filename = tempfile.mkstemp(suffix=suffix)
print('Writing "%s"...' % filename)
with os.fdopen(fd, 'w') as fp:
fp.write(data)
print('Done! Please remember to remove the file.')
return filename
try:
import graphviz
except ImportError:
# The fallback behavior if graphviz is not installed!
print('Python graphviz not found. Please invoke')
print(' $ pip install graphviz')
print('in order to enable automatic conversion to HTML.')
print()
print('You may also convert DOT to SVG manually via')
print(' $ dot -Tsvg input.dot -o output.svg')
print()
write_temp_file('.dot', self.output())
return
svg = graphviz.pipe('dot', 'svg', self.output())
filename = write_temp_file(
'.html', '<html><body bgcolor="%s">%s</body></html>' % (
'#1a1a1a' if self._dark_mode else 'white', svg))
if sys.platform == 'win32':
os.startfile(filename)
elif sys.platform == 'darwin':
os.system('open "%s"' % filename)
else:
os.system('xdg-open "%s"' % filename)
#===-----------------------------------------------------------------------===#
# Explorers know how to traverse the ExplodedGraph in a certain order.
# They would invoke a Visitor on every node or edge they encounter.
#===-----------------------------------------------------------------------===#
# BasicExplorer explores the whole graph in no particular order.
class BasicExplorer(object):
def __init__(self):
super(BasicExplorer, self).__init__()
def explore(self, graph, visitor):
visitor.visit_begin_graph(graph)
for node in sorted(graph.nodes):
logging.debug('Visiting ' + node)
visitor.visit_node(graph.nodes[node])
for succ in sorted(graph.nodes[node].successors):
logging.debug('Visiting edge: %s -> %s ' % (node, succ))
visitor.visit_edge(graph.nodes[node], graph.nodes[succ])
visitor.visit_end_of_graph()
#===-----------------------------------------------------------------------===#
# Trimmers cut out parts of the ExplodedGraph so that to focus on other parts.
# Trimmers can be combined together by applying them sequentially.
#===-----------------------------------------------------------------------===#
# SinglePathTrimmer keeps only a single path - the leftmost path from the root.
# Useful when the trimmed graph is still too large.
class SinglePathTrimmer(object):
def __init__(self):
super(SinglePathTrimmer, self).__init__()
def trim(self, graph):
visited_nodes = set()
node_id = graph.root_id
while True:
visited_nodes.add(node_id)
node = graph.nodes[node_id]
if len(node.successors) > 0:
succ_id = node.successors[0]
succ = graph.nodes[succ_id]
node.successors = [succ_id]
succ.predecessors = [node_id]
if succ_id in visited_nodes:
break
node_id = succ_id
else:
break
graph.nodes = {node_id: graph.nodes[node_id]
for node_id in visited_nodes}
# TargetedTrimmer keeps paths that lead to specific nodes and discards all
# other paths. Useful when you cannot use -trim-egraph (e.g. when debugging
# a crash).
class TargetedTrimmer(object):
def __init__(self, target_nodes):
super(TargetedTrimmer, self).__init__()
self._target_nodes = target_nodes
@staticmethod
def parse_target_node(node, graph):
if node.startswith('0x'):
ret = 'Node' + node
assert ret in graph.nodes
return ret
else:
for other_id in graph.nodes:
other = graph.nodes[other_id]
if other.node_id == int(node):
return other_id
@staticmethod
def parse_target_nodes(target_nodes, graph):
return [TargetedTrimmer.parse_target_node(node, graph)
for node in target_nodes.split(',')]
def trim(self, graph):
queue = self._target_nodes
visited_nodes = set()
while len(queue) > 0:
node_id = queue.pop()
visited_nodes.add(node_id)
node = graph.nodes[node_id]
for pred_id in node.predecessors:
if pred_id not in visited_nodes:
queue.append(pred_id)
graph.nodes = {node_id: graph.nodes[node_id]
for node_id in visited_nodes}
for node_id in graph.nodes:
node = graph.nodes[node_id]
node.successors = [succ_id for succ_id in node.successors
if succ_id in visited_nodes]
node.predecessors = [succ_id for succ_id in node.predecessors
if succ_id in visited_nodes]
#===-----------------------------------------------------------------------===#
# The entry point to the script.
#===-----------------------------------------------------------------------===#
def main():
parser = argparse.ArgumentParser(
description='Display and manipulate Exploded Graph dumps.')
parser.add_argument('filename', type=str,
help='the .dot file produced by the Static Analyzer')
parser.add_argument('-v', '--verbose', action='store_const',
dest='loglevel', const=logging.DEBUG,
default=logging.WARNING,
help='enable info prints')
parser.add_argument('-d', '--diff', action='store_const', dest='diff',
const=True, default=False,
help='display differences between states')
parser.add_argument('-t', '--topology', action='store_const',
dest='topology', const=True, default=False,
help='only display program points, omit states')
parser.add_argument('-s', '--single-path', action='store_const',
dest='single_path', const=True, default=False,
help='only display the leftmost path in the graph '
'(useful for trimmed graphs that still '
'branch too much)')
parser.add_argument('--to', type=str, default=None,
help='only display execution paths from the root '
'to the given comma-separated list of nodes '
'identified by a pointer or a stable ID; '
'compatible with --single-path')
parser.add_argument('--dark', action='store_const', dest='dark',
const=True, default=False,
help='dark mode')
parser.add_argument('--gray', action='store_const', dest='gray',
const=True, default=False,
help='black-and-white mode')
parser.add_argument('--dump-dot-only', action='store_const',
dest='dump_dot_only', const=True, default=False,
help='instead of writing an HTML file and immediately '
'displaying it, dump the rewritten dot file '
'to stdout')
args = parser.parse_args()
logging.basicConfig(level=args.loglevel)
graph = ExplodedGraph()
with open(args.filename) as fd:
for raw_line in fd:
raw_line = raw_line.strip()
graph.add_raw_line(raw_line)
trimmers = []
if args.to is not None:
trimmers.append(TargetedTrimmer(
TargetedTrimmer.parse_target_nodes(args.to, graph)))
if args.single_path:
trimmers.append(SinglePathTrimmer())
explorer = BasicExplorer()
visitor = DotDumpVisitor(args.diff, args.dark, args.gray, args.topology,
args.dump_dot_only)
for trimmer in trimmers:
trimmer.trim(graph)
explorer.explore(graph, visitor)
if __name__ == '__main__':
main()
| 38.535917
| 79
| 0.522921
|
from __future__ import print_function
import argparse
import collections
import difflib
import json
import logging
import os
import re
def diff_dicts(curr, prev):
removed = [k for k in prev if k not in curr or curr[k] != prev[k]]
added = [k for k in curr if k not in prev or curr[k] != prev[k]]
return (removed, added)
class GenericMap(object):
def __init__(self, items):
self.generic_map = collections.OrderedDict(items)
def diff(self, prev):
return diff_dicts(self.generic_map, prev.generic_map)
def is_different(self, prev):
removed, added = self.diff(prev)
return len(removed) != 0 or len(added) != 0
class SourceLocation(object):
def __init__(self, json_loc):
super(SourceLocation, self).__init__()
logging.debug('json: %s' % json_loc)
self.line = json_loc['line']
self.col = json_loc['column']
self.filename = os.path.basename(json_loc['file']) \
if 'file' in json_loc else '(main file)'
self.spelling = SourceLocation(json_loc['spelling']) \
if 'spelling' in json_loc else None
def is_macro(self):
return self.spelling is not None
class ProgramPoint(object):
def __init__(self, json_pp):
super(ProgramPoint, self).__init__()
self.kind = json_pp['kind']
self.tag = json_pp['tag']
self.node_id = json_pp['node_id']
self.is_sink = bool(json_pp['is_sink'])
self.has_report = bool(json_pp['has_report'])
if self.kind == 'Edge':
self.src_id = json_pp['src_id']
self.dst_id = json_pp['dst_id']
elif self.kind == 'Statement':
logging.debug(json_pp)
self.stmt_kind = json_pp['stmt_kind']
self.cast_kind = json_pp['cast_kind'] \
if 'cast_kind' in json_pp else None
self.stmt_point_kind = json_pp['stmt_point_kind']
self.stmt_id = json_pp['stmt_id']
self.pointer = json_pp['pointer']
self.pretty = json_pp['pretty']
self.loc = SourceLocation(json_pp['location']) \
if json_pp['location'] is not None else None
elif self.kind == 'BlockEntrance':
self.block_id = json_pp['block_id']
class EnvironmentBindingKey(object):
def __init__(self, json_ek):
super(EnvironmentBindingKey, self).__init__()
self.stmt_id = json_ek['stmt_id'] if 'stmt_id' in json_ek \
else json_ek['init_id']
self.pretty = json_ek['pretty']
self.kind = json_ek['kind'] if 'kind' in json_ek else None
def _key(self):
return self.stmt_id
def __eq__(self, other):
return self._key() == other._key()
def __hash__(self):
return hash(self._key())
class LocationContext(object):
def __init__(self, json_frame):
super(LocationContext, self).__init__()
self.lctx_id = json_frame['lctx_id']
self.caption = json_frame['location_context']
self.decl = json_frame['calling']
self.loc = SourceLocation(json_frame['location']) \
if json_frame['location'] is not None else None
def _key(self):
return self.lctx_id
def __eq__(self, other):
return self._key() == other._key()
def __hash__(self):
return hash(self._key())
class EnvironmentFrame(object):
def __init__(self, json_frame):
super(EnvironmentFrame, self).__init__()
self.location_context = LocationContext(json_frame)
self.bindings = collections.OrderedDict(
[(EnvironmentBindingKey(b),
b['value']) for b in json_frame['items']]
if json_frame['items'] is not None else [])
def diff_bindings(self, prev):
return diff_dicts(self.bindings, prev.bindings)
def is_different(self, prev):
removed, added = self.diff_bindings(prev)
return len(removed) != 0 or len(added) != 0
class GenericEnvironment(object):
def __init__(self, json_e):
super(GenericEnvironment, self).__init__()
self.frames = [EnvironmentFrame(f) for f in json_e]
def diff_frames(self, prev):
if len(self.frames) != len(prev.frames):
return None
updated = []
for i in range(len(self.frames)):
f = self.frames[i]
prev_f = prev.frames[i]
if f.location_context == prev_f.location_context:
if f.is_different(prev_f):
updated.append(i)
else:
# We have the whole frame replaced with another frame.
# TODO: Produce a nice diff.
return None
# TODO: Add support for added/removed.
return updated
def is_different(self, prev):
updated = self.diff_frames(prev)
return updated is None or len(updated) > 0
# A single binding key in a deserialized RegionStore cluster.
class StoreBindingKey(object):
def __init__(self, json_sk):
super(StoreBindingKey, self).__init__()
self.kind = json_sk['kind']
self.offset = json_sk['offset']
def _key(self):
return (self.kind, self.offset)
def __eq__(self, other):
return self._key() == other._key()
def __hash__(self):
return hash(self._key())
# A single cluster of the deserialized RegionStore.
class StoreCluster(object):
def __init__(self, json_sc):
super(StoreCluster, self).__init__()
self.base_region = json_sc['cluster']
self.bindings = collections.OrderedDict(
[(StoreBindingKey(b), b['value']) for b in json_sc['items']])
def diff_bindings(self, prev):
return diff_dicts(self.bindings, prev.bindings)
def is_different(self, prev):
removed, added = self.diff_bindings(prev)
return len(removed) != 0 or len(added) != 0
# A deserialized RegionStore.
class Store(object):
def __init__(self, json_s):
super(Store, self).__init__()
self.ptr = json_s['pointer']
self.clusters = collections.OrderedDict(
[(c['pointer'], StoreCluster(c)) for c in json_s['items']])
def diff_clusters(self, prev):
removed = [k for k in prev.clusters if k not in self.clusters]
added = [k for k in self.clusters if k not in prev.clusters]
updated = [k for k in prev.clusters if k in self.clusters
and prev.clusters[k].is_different(self.clusters[k])]
return (removed, added, updated)
def is_different(self, prev):
removed, added, updated = self.diff_clusters(prev)
return len(removed) != 0 or len(added) != 0 or len(updated) != 0
# Deserialized messages from a single checker in a single program state.
# Basically a list of raw strings.
class CheckerLines(object):
def __init__(self, json_lines):
super(CheckerLines, self).__init__()
self.lines = json_lines
def diff_lines(self, prev):
lines = difflib.ndiff(prev.lines, self.lines)
return [l.strip() for l in lines
if l.startswith('+') or l.startswith('-')]
def is_different(self, prev):
return len(self.diff_lines(prev)) > 0
# Deserialized messages of all checkers, separated by checker.
class CheckerMessages(object):
def __init__(self, json_m):
super(CheckerMessages, self).__init__()
self.items = collections.OrderedDict(
[(m['checker'], CheckerLines(m['messages'])) for m in json_m])
def diff_messages(self, prev):
removed = [k for k in prev.items if k not in self.items]
added = [k for k in self.items if k not in prev.items]
updated = [k for k in prev.items if k in self.items
and prev.items[k].is_different(self.items[k])]
return (removed, added, updated)
def is_different(self, prev):
removed, added, updated = self.diff_messages(prev)
return len(removed) != 0 or len(added) != 0 or len(updated) != 0
# A deserialized program state.
class ProgramState(object):
def __init__(self, state_id, json_ps):
super(ProgramState, self).__init__()
logging.debug('Adding ProgramState ' + str(state_id))
if json_ps is None:
json_ps = {
'store': None,
'environment': None,
'constraints': None,
'dynamic_types': None,
'constructing_objects': None,
'checker_messages': None
}
self.state_id = state_id
self.store = Store(json_ps['store']) \
if json_ps['store'] is not None else None
self.environment = \
GenericEnvironment(json_ps['environment']['items']) \
if json_ps['environment'] is not None else None
self.constraints = GenericMap([
(c['symbol'], c['range']) for c in json_ps['constraints']
]) if json_ps['constraints'] is not None else None
self.dynamic_types = GenericMap([
(t['region'], '%s%s' % (t['dyn_type'],
' (or a sub-class)'
if t['sub_classable'] else ''))
for t in json_ps['dynamic_types']]) \
if json_ps['dynamic_types'] is not None else None
self.constructing_objects = \
GenericEnvironment(json_ps['constructing_objects']) \
if json_ps['constructing_objects'] is not None else None
self.checker_messages = CheckerMessages(json_ps['checker_messages']) \
if json_ps['checker_messages'] is not None else None
# A deserialized exploded graph node. Has a default constructor because it
# may be referenced as part of an edge before its contents are deserialized,
# and in this moment we already need a room for predecessors and successors.
class ExplodedNode(object):
def __init__(self):
super(ExplodedNode, self).__init__()
self.predecessors = []
self.successors = []
def construct(self, node_id, json_node):
logging.debug('Adding ' + node_id)
self.ptr = node_id[4:]
self.points = [ProgramPoint(p) for p in json_node['program_points']]
self.node_id = self.points[-1].node_id
self.state = ProgramState(json_node['state_id'],
json_node['program_state']
if json_node['program_state'] is not None else None);
assert self.node_name() == node_id
def node_name(self):
return 'Node' + self.ptr
# A deserialized ExplodedGraph. Constructed by consuming a .dot file
# line-by-line.
class ExplodedGraph(object):
# Parse .dot files with regular expressions.
node_re = re.compile(
'^(Node0x[0-9a-f]*) \\[shape=record,.*label="{(.*)\\\\l}"\\];$')
edge_re = re.compile(
'^(Node0x[0-9a-f]*) -> (Node0x[0-9a-f]*);$')
def __init__(self):
super(ExplodedGraph, self).__init__()
self.nodes = collections.defaultdict(ExplodedNode)
self.root_id = None
self.incomplete_line = ''
def add_raw_line(self, raw_line):
if raw_line.startswith('//'):
return
# Allow line breaks by waiting for ';'. This is not valid in
# a .dot file, but it is useful for writing tests.
if len(raw_line) > 0 and raw_line[-1] != ';':
self.incomplete_line += raw_line
return
raw_line = self.incomplete_line + raw_line
self.incomplete_line = ''
# Apply regexps one by one to see if it's a node or an edge
logging.debug('Line: ' + raw_line)
result = self.edge_re.match(raw_line)
if result is not None:
logging.debug('Classified as edge line.')
pred = result.group(1)
succ = result.group(2)
self.nodes[pred].successors.append(succ)
self.nodes[succ].predecessors.append(pred)
return
result = self.node_re.match(raw_line)
if result is not None:
logging.debug('Classified as node line.')
node_id = result.group(1)
if len(self.nodes) == 0:
self.root_id = node_id
# even though in a valid dot file everything is escaped.
node_label = result.group(2).replace('\\l', '') \
.replace(' ', '') \
.replace('\\"', '"') \
.replace('\\{', '{') \
.replace('\\}', '}') \
.replace('\\\\', '\\') \
.replace('\\|', '|') \
.replace('\\<', '\\\\<') \
.replace('\\>', '\\\\>') \
.rstrip(',')
logging.debug(node_label)
json_node = json.loads(node_label)
self.nodes[node_id].construct(node_id, json_node)
return
logging.debug('Skipping.')
#===-----------------------------------------------------------------------===#
# Visitors traverse a deserialized ExplodedGraph and do different things
# with every node and edge.
#===-----------------------------------------------------------------------===#
# A visitor that dumps the ExplodedGraph into a DOT file with fancy HTML-based
# syntax highlighing.
class DotDumpVisitor(object):
def __init__(self, do_diffs, dark_mode, gray_mode,
topo_mode, dump_dot_only):
super(DotDumpVisitor, self).__init__()
self._do_diffs = do_diffs
self._dark_mode = dark_mode
self._gray_mode = gray_mode
self._topo_mode = topo_mode
self._dump_dot_only = dump_dot_only
self._output = []
def _dump_raw(self, s):
if self._dump_dot_only:
print(s, end='')
else:
self._output.append(s)
def output(self):
assert not self._dump_dot_only
return ''.join(self._output)
def _dump(self, s):
s = s.replace('&', '&') \
.replace('{', '\\{') \
.replace('}', '\\}') \
.replace('\\<', '<') \
.replace('\\>', '>') \
.replace('\\l', '<br />') \
.replace('|', '\\|')
if self._gray_mode:
s = re.sub(r'<font color="[a-z0-9]*">', '', s)
s = re.sub(r'</font>', '', s)
self._dump_raw(s)
@staticmethod
def _diff_plus_minus(is_added):
if is_added is None:
return ''
if is_added:
return '<font color="forestgreen">+</font>'
return '<font color="red">-</font>'
@staticmethod
def _short_pretty(s):
if s is None:
return None
if len(s) < 20:
return s
left = s.find('{')
right = s.rfind('}')
if left == -1 or right == -1 or left >= right:
return s
candidate = s[0:left + 1] + ' ... ' + s[right:]
if len(candidate) >= len(s):
return s
return candidate
@staticmethod
def _make_sloc(loc):
if loc is None:
return '<i>Invalid Source Location</i>'
def make_plain_loc(loc):
return '%s:<b>%s</b>:<b>%s</b>' \
% (loc.filename, loc.line, loc.col)
if loc.is_macro():
return '%s <font color="royalblue1">' \
'(<i>spelling at </i> %s)</font>' \
% (make_plain_loc(loc), make_plain_loc(loc.spelling))
return make_plain_loc(loc)
def visit_begin_graph(self, graph):
self._graph = graph
self._dump_raw('digraph "ExplodedGraph" {\n')
if self._dark_mode:
self._dump_raw('bgcolor="gray10";\n')
self._dump_raw('label="";\n')
def visit_program_point(self, p):
if p.kind in ['Edge', 'BlockEntrance', 'BlockExit']:
color = 'gold3'
elif p.kind in ['PreStmtPurgeDeadSymbols',
'PostStmtPurgeDeadSymbols']:
color = 'red'
elif p.kind in ['CallEnter', 'CallExitBegin', 'CallExitEnd']:
color = 'dodgerblue' if self._dark_mode else 'blue'
elif p.kind in ['Statement']:
color = 'cyan4'
else:
color = 'forestgreen'
self._dump('<tr><td align="left">%s.</td>' % p.node_id)
if p.kind == 'Statement':
# This avoids pretty-printing huge statements such as CompoundStmt.
# Such statements show up only at [Pre|Post]StmtPurgeDeadSymbols
skip_pretty = 'PurgeDeadSymbols' in p.stmt_point_kind
stmt_color = 'cyan3'
self._dump('<td align="left" width="0">%s:</td>'
'<td align="left" width="0"><font color="%s">'
'%s</font> </td>'
'<td align="left"><i>S%s</i></td>'
'<td align="left"><font color="%s">%s</font></td>'
'<td align="left">%s</td></tr>'
% (self._make_sloc(p.loc), color,
'%s (%s)' % (p.stmt_kind, p.cast_kind)
if p.cast_kind is not None else p.stmt_kind,
p.stmt_id, stmt_color, p.stmt_point_kind,
self._short_pretty(p.pretty)
if not skip_pretty else ''))
elif p.kind == 'Edge':
self._dump('<td width="0"></td>'
'<td align="left" width="0">'
'<font color="%s">%s</font></td><td align="left">'
'[B%d] -\\> [B%d]</td></tr>'
% (color, 'BlockEdge', p.src_id, p.dst_id))
elif p.kind == 'BlockEntrance':
self._dump('<td width="0"></td>'
'<td align="left" width="0">'
'<font color="%s">%s</font></td>'
'<td align="left">[B%d]</td></tr>'
% (color, p.kind, p.block_id))
else:
# TODO: Print more stuff for other kinds of points.
self._dump('<td width="0"></td>'
'<td align="left" width="0" colspan="2">'
'<font color="%s">%s</font></td></tr>'
% (color, p.kind))
if p.tag is not None:
self._dump('<tr><td width="0"></td><td width="0"></td>'
'<td colspan="3" align="left">'
'<b>Tag: </b> <font color="crimson">'
'%s</font></td></tr>' % p.tag)
if p.has_report:
self._dump('<tr><td width="0"></td><td width="0"></td>'
'<td colspan="3" align="left">'
'<font color="red"><b>Bug Report Attached'
'</b></font></td></tr>')
if p.is_sink:
self._dump('<tr><td width="0"></td><td width="0"></td>'
'<td colspan="3" align="left">'
'<font color="cornflowerblue"><b>Sink Node'
'</b></font></td></tr>')
def visit_environment(self, e, prev_e=None):
self._dump('<table border="0">')
def dump_location_context(lc, is_added=None):
self._dump('<tr><td>%s</td>'
'<td align="left"><b>%s</b></td>'
'<td align="left" colspan="2">'
'<font color="gray60">%s </font>'
'%s</td></tr>'
% (self._diff_plus_minus(is_added),
lc.caption, lc.decl,
('(%s)' % self._make_sloc(lc.loc))
if lc.loc is not None else ''))
def dump_binding(f, b, is_added=None):
self._dump('<tr><td>%s</td>'
'<td align="left"><i>S%s</i></td>'
'%s'
'<td align="left">%s</td>'
'<td align="left">%s</td></tr>'
% (self._diff_plus_minus(is_added),
b.stmt_id,
'<td align="left"><font color="%s"><i>'
'%s</i></font></td>' % (
'lavender' if self._dark_mode else 'darkgreen',
('(%s)' % b.kind) if b.kind is not None else ' '
),
self._short_pretty(b.pretty), f.bindings[b]))
frames_updated = e.diff_frames(prev_e) if prev_e is not None else None
if frames_updated:
for i in frames_updated:
f = e.frames[i]
prev_f = prev_e.frames[i]
dump_location_context(f.location_context)
bindings_removed, bindings_added = f.diff_bindings(prev_f)
for b in bindings_removed:
dump_binding(prev_f, b, False)
for b in bindings_added:
dump_binding(f, b, True)
else:
for f in e.frames:
dump_location_context(f.location_context)
for b in f.bindings:
dump_binding(f, b)
self._dump('</table>')
def visit_environment_in_state(self, selector, title, s, prev_s=None):
e = getattr(s, selector)
prev_e = getattr(prev_s, selector) if prev_s is not None else None
if e is None and prev_e is None:
return
self._dump('<hr /><tr><td align="left"><b>%s: </b>' % title)
if e is None:
self._dump('<i> Nothing!</i>')
else:
if prev_e is not None:
if e.is_different(prev_e):
self._dump('</td></tr><tr><td align="left">')
self.visit_environment(e, prev_e)
else:
self._dump('<i> No changes!</i>')
else:
self._dump('</td></tr><tr><td align="left">')
self.visit_environment(e)
self._dump('</td></tr>')
def visit_store(self, s, prev_s=None):
self._dump('<table border="0">')
def dump_binding(s, c, b, is_added=None):
self._dump('<tr><td>%s</td>'
'<td align="left">%s</td>'
'<td align="left">%s</td>'
'<td align="left">%s</td>'
'<td align="left">%s</td></tr>'
% (self._diff_plus_minus(is_added),
s.clusters[c].base_region, b.offset,
'(<i>Default</i>)' if b.kind == 'Default'
else '',
s.clusters[c].bindings[b]))
if prev_s is not None:
clusters_removed, clusters_added, clusters_updated = \
s.diff_clusters(prev_s)
for c in clusters_removed:
for b in prev_s.clusters[c].bindings:
dump_binding(prev_s, c, b, False)
for c in clusters_updated:
bindings_removed, bindings_added = \
s.clusters[c].diff_bindings(prev_s.clusters[c])
for b in bindings_removed:
dump_binding(prev_s, c, b, False)
for b in bindings_added:
dump_binding(s, c, b, True)
for c in clusters_added:
for b in s.clusters[c].bindings:
dump_binding(s, c, b, True)
else:
for c in s.clusters:
for b in s.clusters[c].bindings:
dump_binding(s, c, b)
self._dump('</table>')
def visit_store_in_state(self, s, prev_s=None):
st = s.store
prev_st = prev_s.store if prev_s is not None else None
if st is None and prev_st is None:
return
self._dump('<hr /><tr><td align="left"><b>Store: </b>')
if st is None:
self._dump('<i> Nothing!</i>')
else:
if self._dark_mode:
self._dump(' <font color="gray30">(%s)</font>' % st.ptr)
else:
self._dump(' <font color="gray">(%s)</font>' % st.ptr)
if prev_st is not None:
if s.store.is_different(prev_st):
self._dump('</td></tr><tr><td align="left">')
self.visit_store(st, prev_st)
else:
self._dump('<i> No changes!</i>')
else:
self._dump('</td></tr><tr><td align="left">')
self.visit_store(st)
self._dump('</td></tr>')
def visit_generic_map(self, m, prev_m=None):
self._dump('<table border="0">')
def dump_pair(m, k, is_added=None):
self._dump('<tr><td>%s</td>'
'<td align="left">%s</td>'
'<td align="left">%s</td></tr>'
% (self._diff_plus_minus(is_added),
k, m.generic_map[k]))
if prev_m is not None:
removed, added = m.diff(prev_m)
for k in removed:
dump_pair(prev_m, k, False)
for k in added:
dump_pair(m, k, True)
else:
for k in m.generic_map:
dump_pair(m, k, None)
self._dump('</table>')
def visit_generic_map_in_state(self, selector, title, s, prev_s=None):
m = getattr(s, selector)
prev_m = getattr(prev_s, selector) if prev_s is not None else None
if m is None and prev_m is None:
return
self._dump('<hr />')
self._dump('<tr><td align="left">'
'<b>%s: </b>' % title)
if m is None:
self._dump('<i> Nothing!</i>')
else:
if prev_m is not None:
if m.is_different(prev_m):
self._dump('</td></tr><tr><td align="left">')
self.visit_generic_map(m, prev_m)
else:
self._dump('<i> No changes!</i>')
else:
self._dump('</td></tr><tr><td align="left">')
self.visit_generic_map(m)
self._dump('</td></tr>')
def visit_checker_messages(self, m, prev_m=None):
self._dump('<table border="0">')
def dump_line(l, is_added=None):
self._dump('<tr><td>%s</td>'
'<td align="left">%s</td></tr>'
% (self._diff_plus_minus(is_added), l))
def dump_chk(chk, is_added=None):
dump_line('<i>%s</i>:' % chk, is_added)
if prev_m is not None:
removed, added, updated = m.diff_messages(prev_m)
for chk in removed:
dump_chk(chk, False)
for l in prev_m.items[chk].lines:
dump_line(l, False)
for chk in updated:
dump_chk(chk)
for l in m.items[chk].diff_lines(prev_m.items[chk]):
dump_line(l[1:], l.startswith('+'))
for chk in added:
dump_chk(chk, True)
for l in m.items[chk].lines:
dump_line(l, True)
else:
for chk in m.items:
dump_chk(chk)
for l in m.items[chk].lines:
dump_line(l)
self._dump('</table>')
def visit_checker_messages_in_state(self, s, prev_s=None):
m = s.checker_messages
prev_m = prev_s.checker_messages if prev_s is not None else None
if m is None and prev_m is None:
return
self._dump('<hr />')
self._dump('<tr><td align="left">'
'<b>Checker State: </b>')
if m is None:
self._dump('<i> Nothing!</i>')
else:
if prev_m is not None:
if m.is_different(prev_m):
self._dump('</td></tr><tr><td align="left">')
self.visit_checker_messages(m, prev_m)
else:
self._dump('<i> No changes!</i>')
else:
self._dump('</td></tr><tr><td align="left">')
self.visit_checker_messages(m)
self._dump('</td></tr>')
def visit_state(self, s, prev_s):
self.visit_store_in_state(s, prev_s)
self.visit_environment_in_state('environment', 'Expressions',
s, prev_s)
self.visit_generic_map_in_state('constraints', 'Ranges',
s, prev_s)
self.visit_generic_map_in_state('dynamic_types', 'Dynamic Types',
s, prev_s)
self.visit_environment_in_state('constructing_objects',
'Objects Under Construction',
s, prev_s)
self.visit_checker_messages_in_state(s, prev_s)
def visit_node(self, node):
self._dump('%s [shape=record,'
% (node.node_name()))
if self._dark_mode:
self._dump('color="white",fontcolor="gray80",')
self._dump('label=<<table border="0">')
self._dump('<tr><td bgcolor="%s"><b>State %s</b></td></tr>'
% ("gray20" if self._dark_mode else "gray70",
node.state.state_id
if node.state is not None else 'Unspecified'))
if not self._topo_mode:
self._dump('<tr><td align="left" width="0">')
if len(node.points) > 1:
self._dump('<b>Program points:</b></td></tr>')
else:
self._dump('<b>Program point:</b></td></tr>')
self._dump('<tr><td align="left" width="0">'
'<table border="0" align="left" width="0">')
for p in node.points:
self.visit_program_point(p)
self._dump('</table></td></tr>')
if node.state is not None and not self._topo_mode:
prev_s = None
# Do diffs only when we have a unique predecessor.
# Don't do diffs on the leaf nodes because they're
# the important ones.
if self._do_diffs and len(node.predecessors) == 1 \
and len(node.successors) > 0:
prev_s = self._graph.nodes[node.predecessors[0]].state
self.visit_state(node.state, prev_s)
self._dump_raw('</table>>];\n')
def visit_edge(self, pred, succ):
self._dump_raw('%s -> %s%s;\n' % (
pred.node_name(), succ.node_name(),
' [color="white"]' if self._dark_mode else ''
))
def visit_end_of_graph(self):
self._dump_raw('}\n')
if not self._dump_dot_only:
import sys
import tempfile
def write_temp_file(suffix, data):
fd, filename = tempfile.mkstemp(suffix=suffix)
print('Writing "%s"...' % filename)
with os.fdopen(fd, 'w') as fp:
fp.write(data)
print('Done! Please remember to remove the file.')
return filename
try:
import graphviz
except ImportError:
# The fallback behavior if graphviz is not installed!
print('Python graphviz not found. Please invoke')
print(' $ pip install graphviz')
print('in order to enable automatic conversion to HTML.')
print()
print('You may also convert DOT to SVG manually via')
print(' $ dot -Tsvg input.dot -o output.svg')
print()
write_temp_file('.dot', self.output())
return
svg = graphviz.pipe('dot', 'svg', self.output())
filename = write_temp_file(
'.html', '<html><body bgcolor="%s">%s</body></html>' % (
'
if sys.platform == 'win32':
os.startfile(filename)
elif sys.platform == 'darwin':
os.system('open "%s"' % filename)
else:
os.system('xdg-open "%s"' % filename)
#===-----------------------------------------------------------------------===#
# Explorers know how to traverse the ExplodedGraph in a certain order.
# They would invoke a Visitor on every node or edge they encounter.
#===-----------------------------------------------------------------------===#
# BasicExplorer explores the whole graph in no particular order.
class BasicExplorer(object):
def __init__(self):
super(BasicExplorer, self).__init__()
def explore(self, graph, visitor):
visitor.visit_begin_graph(graph)
for node in sorted(graph.nodes):
logging.debug('Visiting ' + node)
visitor.visit_node(graph.nodes[node])
for succ in sorted(graph.nodes[node].successors):
logging.debug('Visiting edge: %s -> %s ' % (node, succ))
visitor.visit_edge(graph.nodes[node], graph.nodes[succ])
visitor.visit_end_of_graph()
#===-----------------------------------------------------------------------===#
# Trimmers cut out parts of the ExplodedGraph so that to focus on other parts.
# Trimmers can be combined together by applying them sequentially.
#===-----------------------------------------------------------------------===#
# SinglePathTrimmer keeps only a single path - the leftmost path from the root.
# Useful when the trimmed graph is still too large.
class SinglePathTrimmer(object):
def __init__(self):
super(SinglePathTrimmer, self).__init__()
def trim(self, graph):
visited_nodes = set()
node_id = graph.root_id
while True:
visited_nodes.add(node_id)
node = graph.nodes[node_id]
if len(node.successors) > 0:
succ_id = node.successors[0]
succ = graph.nodes[succ_id]
node.successors = [succ_id]
succ.predecessors = [node_id]
if succ_id in visited_nodes:
break
node_id = succ_id
else:
break
graph.nodes = {node_id: graph.nodes[node_id]
for node_id in visited_nodes}
# TargetedTrimmer keeps paths that lead to specific nodes and discards all
# other paths. Useful when you cannot use -trim-egraph (e.g. when debugging
# a crash).
class TargetedTrimmer(object):
def __init__(self, target_nodes):
super(TargetedTrimmer, self).__init__()
self._target_nodes = target_nodes
@staticmethod
def parse_target_node(node, graph):
if node.startswith('0x'):
ret = 'Node' + node
assert ret in graph.nodes
return ret
else:
for other_id in graph.nodes:
other = graph.nodes[other_id]
if other.node_id == int(node):
return other_id
@staticmethod
def parse_target_nodes(target_nodes, graph):
return [TargetedTrimmer.parse_target_node(node, graph)
for node in target_nodes.split(',')]
def trim(self, graph):
queue = self._target_nodes
visited_nodes = set()
while len(queue) > 0:
node_id = queue.pop()
visited_nodes.add(node_id)
node = graph.nodes[node_id]
for pred_id in node.predecessors:
if pred_id not in visited_nodes:
queue.append(pred_id)
graph.nodes = {node_id: graph.nodes[node_id]
for node_id in visited_nodes}
for node_id in graph.nodes:
node = graph.nodes[node_id]
node.successors = [succ_id for succ_id in node.successors
if succ_id in visited_nodes]
node.predecessors = [succ_id for succ_id in node.predecessors
if succ_id in visited_nodes]
#===-----------------------------------------------------------------------===#
# The entry point to the script.
#===-----------------------------------------------------------------------===#
def main():
parser = argparse.ArgumentParser(
description='Display and manipulate Exploded Graph dumps.')
parser.add_argument('filename', type=str,
help='the .dot file produced by the Static Analyzer')
parser.add_argument('-v', '--verbose', action='store_const',
dest='loglevel', const=logging.DEBUG,
default=logging.WARNING,
help='enable info prints')
parser.add_argument('-d', '--diff', action='store_const', dest='diff',
const=True, default=False,
help='display differences between states')
parser.add_argument('-t', '--topology', action='store_const',
dest='topology', const=True, default=False,
help='only display program points, omit states')
parser.add_argument('-s', '--single-path', action='store_const',
dest='single_path', const=True, default=False,
help='only display the leftmost path in the graph '
'(useful for trimmed graphs that still '
'branch too much)')
parser.add_argument('--to', type=str, default=None,
help='only display execution paths from the root '
'to the given comma-separated list of nodes '
'identified by a pointer or a stable ID; '
'compatible with --single-path')
parser.add_argument('--dark', action='store_const', dest='dark',
const=True, default=False,
help='dark mode')
parser.add_argument('--gray', action='store_const', dest='gray',
const=True, default=False,
help='black-and-white mode')
parser.add_argument('--dump-dot-only', action='store_const',
dest='dump_dot_only', const=True, default=False,
help='instead of writing an HTML file and immediately '
'displaying it, dump the rewritten dot file '
'to stdout')
args = parser.parse_args()
logging.basicConfig(level=args.loglevel)
graph = ExplodedGraph()
with open(args.filename) as fd:
for raw_line in fd:
raw_line = raw_line.strip()
graph.add_raw_line(raw_line)
trimmers = []
if args.to is not None:
trimmers.append(TargetedTrimmer(
TargetedTrimmer.parse_target_nodes(args.to, graph)))
if args.single_path:
trimmers.append(SinglePathTrimmer())
explorer = BasicExplorer()
visitor = DotDumpVisitor(args.diff, args.dark, args.gray, args.topology,
args.dump_dot_only)
for trimmer in trimmers:
trimmer.trim(graph)
explorer.explore(graph, visitor)
if __name__ == '__main__':
main()
| true
| true
|
79055b89596cab0ff251e02f305cffa5b4924fa6
| 447
|
py
|
Python
|
app/email.py
|
ruthjelimo/Pitch-app
|
c70258bd5dfc99520ed662276ef405137597cb1f
|
[
"MIT"
] | null | null | null |
app/email.py
|
ruthjelimo/Pitch-app
|
c70258bd5dfc99520ed662276ef405137597cb1f
|
[
"MIT"
] | null | null | null |
app/email.py
|
ruthjelimo/Pitch-app
|
c70258bd5dfc99520ed662276ef405137597cb1f
|
[
"MIT"
] | null | null | null |
from flask_mail import Message
from flask import render_template
from . import mail
subject_pref = 'Pitches'
sender_email = "ruthjmimo@gmail.com"
def mail_message(subject,template,to,**kwargs):
sender_email = 'ruthjmimo@gmail.com'
email = Message(subject, sender=sender_email, recipients=[to])
email.body= render_template(template + ".txt",**kwargs)
email.html = render_template(template + ".html",**kwargs)
mail.send(email)
| 31.928571
| 66
| 0.740492
|
from flask_mail import Message
from flask import render_template
from . import mail
subject_pref = 'Pitches'
sender_email = "ruthjmimo@gmail.com"
def mail_message(subject,template,to,**kwargs):
sender_email = 'ruthjmimo@gmail.com'
email = Message(subject, sender=sender_email, recipients=[to])
email.body= render_template(template + ".txt",**kwargs)
email.html = render_template(template + ".html",**kwargs)
mail.send(email)
| true
| true
|
79055c9c0f17de54a96f1a37db4804abe6a4c55b
| 12,783
|
py
|
Python
|
examples/contrib/cifar10/main.py
|
nzare/ignite
|
002b595daa8a8345286c5e096c33e278948686a7
|
[
"BSD-3-Clause"
] | 1
|
2020-08-29T16:49:36.000Z
|
2020-08-29T16:49:36.000Z
|
examples/contrib/cifar10/main.py
|
M3L6H/ignite
|
002b595daa8a8345286c5e096c33e278948686a7
|
[
"BSD-3-Clause"
] | 5
|
2020-08-29T16:49:48.000Z
|
2020-08-29T17:05:54.000Z
|
examples/contrib/cifar10/main.py
|
M3L6H/ignite
|
002b595daa8a8345286c5e096c33e278948686a7
|
[
"BSD-3-Clause"
] | 1
|
2020-10-15T06:21:01.000Z
|
2020-10-15T06:21:01.000Z
|
from pathlib import Path
from datetime import datetime
import fire
import torch
import torch.nn as nn
import torch.optim as optim
import ignite
import ignite.distributed as idist
from ignite.engine import Events, Engine, create_supervised_evaluator
from ignite.metrics import Accuracy, Loss
from ignite.handlers import Checkpoint, DiskSaver
from ignite.utils import manual_seed, setup_logger
from ignite.contrib.engines import common
from ignite.contrib.handlers import PiecewiseLinear
import utils
def training(local_rank, config):
rank = idist.get_rank()
manual_seed(config["seed"] + rank)
device = idist.device()
logger = setup_logger(name="CIFAR10-Training", distributed_rank=local_rank)
log_basic_info(logger, config)
output_path = config["output_path"]
if rank == 0:
if config["stop_iteration"] is None:
now = datetime.now().strftime("%Y%m%d-%H%M%S")
else:
now = "stop-on-{}".format(config["stop_iteration"])
folder_name = "{}_backend-{}-{}_{}".format(config["model"], idist.backend(), idist.get_world_size(), now)
output_path = Path(output_path) / folder_name
if not output_path.exists():
output_path.mkdir(parents=True)
config["output_path"] = output_path.as_posix()
logger.info("Output path: {}".format(config["output_path"]))
if "cuda" in device.type:
config["cuda device name"] = torch.cuda.get_device_name(local_rank)
if config["with_trains"]:
from trains import Task
task = Task.init("CIFAR10-Training", task_name=output_path.stem)
task.connect_configuration(config)
# Log hyper parameters
hyper_params = [
"model",
"batch_size",
"momentum",
"weight_decay",
"num_epochs",
"learning_rate",
"num_warmup_epochs",
]
task.connect({k: config[k] for k in hyper_params})
# Setup dataflow, model, optimizer, criterion
train_loader, test_loader = get_dataflow(config)
config["num_iters_per_epoch"] = len(train_loader)
model, optimizer, criterion, lr_scheduler = initialize(config)
# Create trainer for current task
trainer = create_trainer(model, optimizer, criterion, lr_scheduler, train_loader.sampler, config, logger)
# Let's now setup evaluator engine to perform model's validation and compute metrics
metrics = {
"accuracy": Accuracy(),
"loss": Loss(criterion),
}
# We define two evaluators as they wont have exactly similar roles:
# - `evaluator` will save the best model based on validation score
evaluator = create_supervised_evaluator(model, metrics=metrics, device=device, non_blocking=True)
train_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device, non_blocking=True)
def run_validation(engine):
epoch = trainer.state.epoch
state = train_evaluator.run(train_loader)
log_metrics(logger, epoch, state.times["COMPLETED"], "Train", state.metrics)
state = evaluator.run(test_loader)
log_metrics(logger, epoch, state.times["COMPLETED"], "Test", state.metrics)
trainer.add_event_handler(Events.EPOCH_COMPLETED(every=config["validate_every"]) | Events.COMPLETED, run_validation)
if rank == 0:
# Setup TensorBoard logging on trainer and evaluators. Logged values are:
# - Training metrics, e.g. running average loss values
# - Learning rate
# - Evaluation train/test metrics
evaluators = {"training": train_evaluator, "test": evaluator}
tb_logger = common.setup_tb_logging(output_path, trainer, optimizer, evaluators=evaluators)
# Store 3 best models by validation accuracy:
common.gen_save_best_models_by_val_score(
save_handler=get_save_handler(config),
evaluator=evaluator,
models={"model": model},
metric_name="accuracy",
n_saved=3,
trainer=trainer,
tag="test",
)
# In order to check training resuming we can stop training on a given iteration
if config["stop_iteration"] is not None:
@trainer.on(Events.ITERATION_STARTED(once=config["stop_iteration"]))
def _():
logger.info("Stop training on {} iteration".format(trainer.state.iteration))
trainer.terminate()
try:
trainer.run(train_loader, max_epochs=config["num_epochs"])
except Exception as e:
import traceback
print(traceback.format_exc())
if rank == 0:
tb_logger.close()
def run(
seed=543,
data_path="/tmp/cifar10",
output_path="/tmp/output-cifar10/",
model="resnet18",
batch_size=512,
momentum=0.9,
weight_decay=1e-4,
num_workers=12,
num_epochs=24,
learning_rate=0.4,
num_warmup_epochs=4,
validate_every=3,
checkpoint_every=200,
backend=None,
resume_from=None,
log_every_iters=15,
nproc_per_node=None,
stop_iteration=None,
with_trains=False,
**spawn_kwargs
):
"""Main entry to train an model on CIFAR10 dataset.
Args:
seed (int): random state seed to set. Default, 543.
data_path (str): input dataset path. Default, "/tmp/cifar10".
output_path (str): output path. Default, "/tmp/output-cifar10".
model (str): model name (from torchvision) to setup model to train. Default, "resnet18".
batch_size (int): total batch size. Default, 512.
momentum (float): optimizer's momentum. Default, 0.9.
weight_decay (float): weight decay. Default, 1e-4.
num_workers (int): number of workers in the data loader. Default, 12.
num_epochs (int): number of epochs to train the model. Default, 24.
learning_rate (float): peak of piecewise linear learning rate scheduler. Default, 0.4.
num_warmup_epochs (int): number of warm-up epochs before learning rate decay. Default, 4.
validate_every (int): run model's validation every ``validate_every`` epochs. Default, 3.
checkpoint_every (int): store training checkpoint every ``checkpoint_every`` iterations. Default, 200.
backend (str, optional): backend to use for distributed configuration. Possible values: None, "nccl", "xla-tpu",
"gloo" etc. Default, None.
nproc_per_node (int, optional): optional argument to setup number of processes per node. It is useful,
when main python process is spawning training as child processes.
resume_from (str, optional): path to checkpoint to use to resume the training from. Default, None.
log_every_iters (int): argument to log batch loss every ``log_every_iters`` iterations.
It can be 0 to disable it. Default, 15.
stop_iteration (int, optional): iteration to stop the training. Can be used to check resume from checkpoint.
with_trains (bool): if True, experiment Trains logger is setup. Default, False.
**spawn_kwargs: Other kwargs to spawn run in child processes: master_addr, master_port, node_rank, nnodes
"""
# catch all local parameters
config = locals()
config.update(config["spawn_kwargs"])
del config["spawn_kwargs"]
spawn_kwargs["nproc_per_node"] = nproc_per_node
with idist.Parallel(backend=backend, **spawn_kwargs) as parallel:
parallel.run(training, config)
def get_dataflow(config):
# - Get train/test datasets
if idist.get_rank() > 0:
# Ensure that only rank 0 download the dataset
idist.barrier()
train_dataset, test_dataset = utils.get_train_test_datasets(config["data_path"])
if idist.get_rank() == 0:
# Ensure that only rank 0 download the dataset
idist.barrier()
# Setup data loader also adapted to distributed config: nccl, gloo, xla-tpu
train_loader = idist.auto_dataloader(
train_dataset, batch_size=config["batch_size"], num_workers=config["num_workers"], shuffle=True, drop_last=True,
)
test_loader = idist.auto_dataloader(
test_dataset, batch_size=2 * config["batch_size"], num_workers=config["num_workers"], shuffle=False,
)
return train_loader, test_loader
def initialize(config):
model = utils.get_model(config["model"])
# Adapt model for distributed settings if configured
model = idist.auto_model(model)
optimizer = optim.SGD(
model.parameters(),
lr=config["learning_rate"],
momentum=config["momentum"],
weight_decay=config["weight_decay"],
nesterov=True,
)
optimizer = idist.auto_optim(optimizer)
criterion = nn.CrossEntropyLoss().to(idist.device())
le = config["num_iters_per_epoch"]
milestones_values = [
(0, 0.0),
(le * config["num_warmup_epochs"], config["learning_rate"]),
(le * config["num_epochs"], 0.0),
]
lr_scheduler = PiecewiseLinear(optimizer, param_name="lr", milestones_values=milestones_values)
return model, optimizer, criterion, lr_scheduler
def log_metrics(logger, epoch, elapsed, tag, metrics):
logger.info(
"\nEpoch {} - elapsed: {} - {} metrics:\n {}".format(
epoch, elapsed, tag, "\n".join(["\t{}: {}".format(k, v) for k, v in metrics.items()])
)
)
def log_basic_info(logger, config):
logger.info("Train {} on CIFAR10".format(config["model"]))
logger.info("- PyTorch version: {}".format(torch.__version__))
logger.info("- Ignite version: {}".format(ignite.__version__))
logger.info("\n")
logger.info("Configuration:")
for key, value in config.items():
logger.info("\t{}: {}".format(key, value))
logger.info("\n")
if idist.get_world_size() > 1:
logger.info("\nDistributed setting:")
logger.info("\tbackend: {}".format(idist.backend()))
logger.info("\tworld size: {}".format(idist.get_world_size()))
logger.info("\n")
def create_trainer(model, optimizer, criterion, lr_scheduler, train_sampler, config, logger):
device = idist.device()
# Setup Ignite trainer:
# - let's define training step
# - add other common handlers:
# - TerminateOnNan,
# - handler to setup learning rate scheduling,
# - ModelCheckpoint
# - RunningAverage` on `train_step` output
# - Two progress bars on epochs and optionally on iterations
def train_step(engine, batch):
x, y = batch[0], batch[1]
if x.device != device:
x = x.to(device, non_blocking=True)
y = y.to(device, non_blocking=True)
model.train()
# Supervised part
y_pred = model(x)
loss = criterion(y_pred, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# This can be helpful for XLA to avoid performance slow down if fetch loss.item() every iteration
if config["log_every_iters"] > 0 and (engine.state.iteration - 1) % config["log_every_iters"] == 0:
batch_loss = loss.item()
engine.state.saved_batch_loss = batch_loss
else:
batch_loss = engine.state.saved_batch_loss
return {
"batch loss": batch_loss,
}
trainer = Engine(train_step)
trainer.state.saved_batch_loss = -1.0
trainer.state_dict_user_keys.append("saved_batch_loss")
trainer.logger = logger
to_save = {"trainer": trainer, "model": model, "optimizer": optimizer, "lr_scheduler": lr_scheduler}
metric_names = [
"batch loss",
]
common.setup_common_training_handlers(
trainer=trainer,
train_sampler=train_sampler,
to_save=to_save,
save_every_iters=config["checkpoint_every"],
save_handler=get_save_handler(config),
lr_scheduler=lr_scheduler,
output_names=metric_names if config["log_every_iters"] > 0 else None,
with_pbars=False,
clear_cuda_cache=False,
)
resume_from = config["resume_from"]
if resume_from is not None:
checkpoint_fp = Path(resume_from)
assert checkpoint_fp.exists(), "Checkpoint '{}' is not found".format(checkpoint_fp.as_posix())
logger.info("Resume from a checkpoint: {}".format(checkpoint_fp.as_posix()))
checkpoint = torch.load(checkpoint_fp.as_posix(), map_location="cpu")
Checkpoint.load_objects(to_load=to_save, checkpoint=checkpoint)
return trainer
def get_save_handler(config):
if config["with_trains"]:
from ignite.contrib.handlers.trains_logger import TrainsSaver
return TrainsSaver(dirname=config["output_path"])
return DiskSaver(config["output_path"], require_empty=False)
if __name__ == "__main__":
fire.Fire({"run": run})
| 35.907303
| 120
| 0.662833
|
from pathlib import Path
from datetime import datetime
import fire
import torch
import torch.nn as nn
import torch.optim as optim
import ignite
import ignite.distributed as idist
from ignite.engine import Events, Engine, create_supervised_evaluator
from ignite.metrics import Accuracy, Loss
from ignite.handlers import Checkpoint, DiskSaver
from ignite.utils import manual_seed, setup_logger
from ignite.contrib.engines import common
from ignite.contrib.handlers import PiecewiseLinear
import utils
def training(local_rank, config):
rank = idist.get_rank()
manual_seed(config["seed"] + rank)
device = idist.device()
logger = setup_logger(name="CIFAR10-Training", distributed_rank=local_rank)
log_basic_info(logger, config)
output_path = config["output_path"]
if rank == 0:
if config["stop_iteration"] is None:
now = datetime.now().strftime("%Y%m%d-%H%M%S")
else:
now = "stop-on-{}".format(config["stop_iteration"])
folder_name = "{}_backend-{}-{}_{}".format(config["model"], idist.backend(), idist.get_world_size(), now)
output_path = Path(output_path) / folder_name
if not output_path.exists():
output_path.mkdir(parents=True)
config["output_path"] = output_path.as_posix()
logger.info("Output path: {}".format(config["output_path"]))
if "cuda" in device.type:
config["cuda device name"] = torch.cuda.get_device_name(local_rank)
if config["with_trains"]:
from trains import Task
task = Task.init("CIFAR10-Training", task_name=output_path.stem)
task.connect_configuration(config)
hyper_params = [
"model",
"batch_size",
"momentum",
"weight_decay",
"num_epochs",
"learning_rate",
"num_warmup_epochs",
]
task.connect({k: config[k] for k in hyper_params})
train_loader, test_loader = get_dataflow(config)
config["num_iters_per_epoch"] = len(train_loader)
model, optimizer, criterion, lr_scheduler = initialize(config)
trainer = create_trainer(model, optimizer, criterion, lr_scheduler, train_loader.sampler, config, logger)
metrics = {
"accuracy": Accuracy(),
"loss": Loss(criterion),
}
evaluator = create_supervised_evaluator(model, metrics=metrics, device=device, non_blocking=True)
train_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device, non_blocking=True)
def run_validation(engine):
epoch = trainer.state.epoch
state = train_evaluator.run(train_loader)
log_metrics(logger, epoch, state.times["COMPLETED"], "Train", state.metrics)
state = evaluator.run(test_loader)
log_metrics(logger, epoch, state.times["COMPLETED"], "Test", state.metrics)
trainer.add_event_handler(Events.EPOCH_COMPLETED(every=config["validate_every"]) | Events.COMPLETED, run_validation)
if rank == 0:
evaluators = {"training": train_evaluator, "test": evaluator}
tb_logger = common.setup_tb_logging(output_path, trainer, optimizer, evaluators=evaluators)
common.gen_save_best_models_by_val_score(
save_handler=get_save_handler(config),
evaluator=evaluator,
models={"model": model},
metric_name="accuracy",
n_saved=3,
trainer=trainer,
tag="test",
)
if config["stop_iteration"] is not None:
@trainer.on(Events.ITERATION_STARTED(once=config["stop_iteration"]))
def _():
logger.info("Stop training on {} iteration".format(trainer.state.iteration))
trainer.terminate()
try:
trainer.run(train_loader, max_epochs=config["num_epochs"])
except Exception as e:
import traceback
print(traceback.format_exc())
if rank == 0:
tb_logger.close()
def run(
seed=543,
data_path="/tmp/cifar10",
output_path="/tmp/output-cifar10/",
model="resnet18",
batch_size=512,
momentum=0.9,
weight_decay=1e-4,
num_workers=12,
num_epochs=24,
learning_rate=0.4,
num_warmup_epochs=4,
validate_every=3,
checkpoint_every=200,
backend=None,
resume_from=None,
log_every_iters=15,
nproc_per_node=None,
stop_iteration=None,
with_trains=False,
**spawn_kwargs
):
config = locals()
config.update(config["spawn_kwargs"])
del config["spawn_kwargs"]
spawn_kwargs["nproc_per_node"] = nproc_per_node
with idist.Parallel(backend=backend, **spawn_kwargs) as parallel:
parallel.run(training, config)
def get_dataflow(config):
if idist.get_rank() > 0:
idist.barrier()
train_dataset, test_dataset = utils.get_train_test_datasets(config["data_path"])
if idist.get_rank() == 0:
idist.barrier()
train_loader = idist.auto_dataloader(
train_dataset, batch_size=config["batch_size"], num_workers=config["num_workers"], shuffle=True, drop_last=True,
)
test_loader = idist.auto_dataloader(
test_dataset, batch_size=2 * config["batch_size"], num_workers=config["num_workers"], shuffle=False,
)
return train_loader, test_loader
def initialize(config):
model = utils.get_model(config["model"])
model = idist.auto_model(model)
optimizer = optim.SGD(
model.parameters(),
lr=config["learning_rate"],
momentum=config["momentum"],
weight_decay=config["weight_decay"],
nesterov=True,
)
optimizer = idist.auto_optim(optimizer)
criterion = nn.CrossEntropyLoss().to(idist.device())
le = config["num_iters_per_epoch"]
milestones_values = [
(0, 0.0),
(le * config["num_warmup_epochs"], config["learning_rate"]),
(le * config["num_epochs"], 0.0),
]
lr_scheduler = PiecewiseLinear(optimizer, param_name="lr", milestones_values=milestones_values)
return model, optimizer, criterion, lr_scheduler
def log_metrics(logger, epoch, elapsed, tag, metrics):
logger.info(
"\nEpoch {} - elapsed: {} - {} metrics:\n {}".format(
epoch, elapsed, tag, "\n".join(["\t{}: {}".format(k, v) for k, v in metrics.items()])
)
)
def log_basic_info(logger, config):
logger.info("Train {} on CIFAR10".format(config["model"]))
logger.info("- PyTorch version: {}".format(torch.__version__))
logger.info("- Ignite version: {}".format(ignite.__version__))
logger.info("\n")
logger.info("Configuration:")
for key, value in config.items():
logger.info("\t{}: {}".format(key, value))
logger.info("\n")
if idist.get_world_size() > 1:
logger.info("\nDistributed setting:")
logger.info("\tbackend: {}".format(idist.backend()))
logger.info("\tworld size: {}".format(idist.get_world_size()))
logger.info("\n")
def create_trainer(model, optimizer, criterion, lr_scheduler, train_sampler, config, logger):
device = idist.device()
# - add other common handlers:
# - TerminateOnNan,
# - handler to setup learning rate scheduling,
# - ModelCheckpoint
# - RunningAverage` on `train_step` output
# - Two progress bars on epochs and optionally on iterations
def train_step(engine, batch):
x, y = batch[0], batch[1]
if x.device != device:
x = x.to(device, non_blocking=True)
y = y.to(device, non_blocking=True)
model.train()
# Supervised part
y_pred = model(x)
loss = criterion(y_pred, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# This can be helpful for XLA to avoid performance slow down if fetch loss.item() every iteration
if config["log_every_iters"] > 0 and (engine.state.iteration - 1) % config["log_every_iters"] == 0:
batch_loss = loss.item()
engine.state.saved_batch_loss = batch_loss
else:
batch_loss = engine.state.saved_batch_loss
return {
"batch loss": batch_loss,
}
trainer = Engine(train_step)
trainer.state.saved_batch_loss = -1.0
trainer.state_dict_user_keys.append("saved_batch_loss")
trainer.logger = logger
to_save = {"trainer": trainer, "model": model, "optimizer": optimizer, "lr_scheduler": lr_scheduler}
metric_names = [
"batch loss",
]
common.setup_common_training_handlers(
trainer=trainer,
train_sampler=train_sampler,
to_save=to_save,
save_every_iters=config["checkpoint_every"],
save_handler=get_save_handler(config),
lr_scheduler=lr_scheduler,
output_names=metric_names if config["log_every_iters"] > 0 else None,
with_pbars=False,
clear_cuda_cache=False,
)
resume_from = config["resume_from"]
if resume_from is not None:
checkpoint_fp = Path(resume_from)
assert checkpoint_fp.exists(), "Checkpoint '{}' is not found".format(checkpoint_fp.as_posix())
logger.info("Resume from a checkpoint: {}".format(checkpoint_fp.as_posix()))
checkpoint = torch.load(checkpoint_fp.as_posix(), map_location="cpu")
Checkpoint.load_objects(to_load=to_save, checkpoint=checkpoint)
return trainer
def get_save_handler(config):
if config["with_trains"]:
from ignite.contrib.handlers.trains_logger import TrainsSaver
return TrainsSaver(dirname=config["output_path"])
return DiskSaver(config["output_path"], require_empty=False)
if __name__ == "__main__":
fire.Fire({"run": run})
| true
| true
|
79055cc17652bc0b8bd56d2d115eac0ea2c2e3af
| 60
|
py
|
Python
|
q2_gamma/visualizers/__init__.py
|
ebolyen/q2-gamma
|
e2edd64dd9c1dfafe2c92ffedbab333df732c0d3
|
[
"BSD-3-Clause"
] | 1
|
2018-03-29T16:21:18.000Z
|
2018-03-29T16:21:18.000Z
|
q2_gamma/visualizers/__init__.py
|
ebolyen/q2-gamma
|
e2edd64dd9c1dfafe2c92ffedbab333df732c0d3
|
[
"BSD-3-Clause"
] | null | null | null |
q2_gamma/visualizers/__init__.py
|
ebolyen/q2-gamma
|
e2edd64dd9c1dfafe2c92ffedbab333df732c0d3
|
[
"BSD-3-Clause"
] | 1
|
2019-06-06T20:03:07.000Z
|
2019-06-06T20:03:07.000Z
|
from .plot import plot
from .simple_plot import simple_plot
| 20
| 36
| 0.833333
|
from .plot import plot
from .simple_plot import simple_plot
| true
| true
|
79055d29831e0a256347de5b208f925dca717bb1
| 6,994
|
py
|
Python
|
flask_app/utilities/DataInterfaces/SqlInterface.py
|
cliftbar/flask_app_template
|
e006b68adde6c86f8ee8c262eb0a51d7aac760b5
|
[
"MIT"
] | null | null | null |
flask_app/utilities/DataInterfaces/SqlInterface.py
|
cliftbar/flask_app_template
|
e006b68adde6c86f8ee8c262eb0a51d7aac760b5
|
[
"MIT"
] | null | null | null |
flask_app/utilities/DataInterfaces/SqlInterface.py
|
cliftbar/flask_app_template
|
e006b68adde6c86f8ee8c262eb0a51d7aac760b5
|
[
"MIT"
] | null | null | null |
import logging
import time
from abc import abstractmethod
from enum import Enum
from typing import Dict, Callable, Any, List
from schema import Schema
import sqlalchemy
from sqlalchemy.engine import ResultProxy
from sqlalchemy.orm import Query
from sqlalchemy.schema import Table
from sqlalchemy.engine.base import Engine
from sqlalchemy.engine.base import Connection
from contextlib import contextmanager
from flask_app.utilities.DataInterfaces import ConnectionOptions
logger = logging.getLogger(__name__)
class SqlDialect(Enum):
postgres = "postgres"
sqlite = "sqlite"
@classmethod
def has_value(cls, value) -> bool:
return any(value == item.value for item in cls)
# TODO: Connection Factory
class SqlConnectionOptions(ConnectionOptions):
@staticmethod
def factory(sql_connection_type: SqlDialect, **kwargs) -> 'SqlConnectionOptions':
"""
Function signatures for factory method
Postgres: (dialect: SqlDialects, host: str, port: int, username: str, password: str,
database_name: str, timeout: int = None)
"""
return SqlConnectionFactories.get_factory(sql_connection_type)(**kwargs)
def __init__(self, dialect: SqlDialect, host: str, port: int, username: str, password: str, database_name: str
, timeout_s: int = None):
self.dialect: SqlDialect = dialect
self.host: str = host
self.port: int = port
self.username: str = username
self.password: str = password
self.database_name: str = database_name
self.timeout: int = timeout_s
self.connection_string: str = None
@classmethod
@abstractmethod
def schema_validate_arguments(cls, schema: Schema, parameters: Dict) -> Dict:
pass
class PostgresConnectionOptions(SqlConnectionOptions):
_factory_schema: Schema = Schema(
{
'host': str,
'port': int,
'username': str,
'password': str,
'database_name': str
# 'timeout': int
},
ignore_extra_keys=True
)
def __init__(self,
dialect: SqlDialect,
host: str,
port: int,
username: str,
password: str,
database_name: str,
timeout_s: int = None) -> None:
super().__init__(dialect, host, port, username, password, database_name, timeout_s)
self.connection_string = \
f"postgresql://{self.username}:{self.password}@{self.host}:{self.port}/{self.database_name}"
@classmethod
def schema_validate_arguments(cls, schema: Schema, parameters: Dict) -> Dict:
return schema.validate(parameters)
@classmethod
def factory(cls, **kwargs) -> 'PostgresConnectionOptions':
parameters: Dict = cls.schema_validate_arguments(cls._factory_schema, kwargs)
return cls(SqlDialect.postgres, parameters['host'], parameters['port']
, parameters['username'], parameters['password'], parameters['database_name']
, parameters.get('timeout'))
class SqlConnectionFactories:
_factories: Dict[SqlDialect, Callable] = {
SqlDialect.postgres: PostgresConnectionOptions.factory
# , SqlDialects.sqlite: SqliteConnectionOptions.factory
}
@classmethod
def get_factory(cls, factory_type: SqlDialect) -> Callable:
return cls._factories[factory_type]
class SqlInterface:
"""SQL methods to tack onto SQL based librarians"""
def __init__(self, connection_options: SqlConnectionOptions) -> None:
self.connection_options = connection_options
self.sql_engine: Engine = None
self.sql_metadata: sqlalchemy.MetaData = None
def update(self, schema: str, table: str, column: str, value: Any, sql_connection: Connection) -> None:
raise NotImplementedError
def select(self, schema: str, table: str, sql_connection: Connection) -> List[Dict[str, Any]]:
sql_table: Table = self._get_table_reflection(schema, table)
return self._execute_query(sql_connection, sql_table.select())
def insert(self, schema: str, table: str, values: List[Dict[str, Any]], sql_connection: Connection) -> None:
sql_table: Table = self._get_table_reflection(schema, table)
insert_query = sql_table.insert(values=values)
self._execute_query(sql_connection, insert_query)
def setup_pre_connection(self, connection_options) -> None:
self._build_engine(connection_options)
self._metadata_reflection(self.sql_engine)
def close_connection(self, sql_connection: Connection) -> None:
if sql_connection is not None:
sql_connection.close()
@contextmanager
def managed_connection(self, connection_options: SqlConnectionOptions = None) -> Connection:
if connection_options is None:
connection_options = self.connection_options
self.setup_pre_connection(connection_options)
connection: Connection = None
try:
connection = self.sql_engine.connect()
yield connection
finally:
self.close_connection(connection)
# SQLAlchemy internal methods
def _build_engine(self, connection_options: SqlConnectionOptions) -> None:
self.sql_engine = sqlalchemy.create_engine(connection_options.connection_string)
def _metadata_reflection(self, sql_engine) -> None:
self.sql_metadata = sqlalchemy.MetaData(bind=sql_engine)
def _get_table_reflection(self, schema: str, table: str) -> Table:
return Table(table, self.sql_metadata, schema=schema, autoload=True)
def _validate_write_schema(self, table: Table, values: Dict[str, Any]) -> bool:
table_columns = list(dict(table.columns).keys())
return list(values.keys()) == table_columns
def _parse_result_proxy(self, result) -> List[Dict[str, Any]]:
return list(map(lambda x: dict(x), result))
def _execute_query(self, sql_connection: Connection, sql_query: Query) -> List[Dict[str, Any]]:
start_time: float = time.time()
return_result: List[Dict[str, Any]] = None
try:
result: ResultProxy = sql_connection.execute(sql_query)
if result.returns_rows:
return_result: List[Dict[str, Any]] = self._parse_result_proxy(result)
except Exception as e:
logger.info(f"SQL query failed: {e}")
logger.debug(f"SQL query {str(sql_query.compile())}, connection: {sql_connection.engine} failed with exception {e}")
raise e
finally:
end_time: float = time.time()
query_time: float = end_time - start_time
logger.info(f"SQL execute time: {query_time}")
logger.debug(
f"SQL execute time: {query_time}, query: {str(sql_query.compile())}, connection: {sql_connection.engine}"
)
return return_result
| 37.602151
| 128
| 0.666285
|
import logging
import time
from abc import abstractmethod
from enum import Enum
from typing import Dict, Callable, Any, List
from schema import Schema
import sqlalchemy
from sqlalchemy.engine import ResultProxy
from sqlalchemy.orm import Query
from sqlalchemy.schema import Table
from sqlalchemy.engine.base import Engine
from sqlalchemy.engine.base import Connection
from contextlib import contextmanager
from flask_app.utilities.DataInterfaces import ConnectionOptions
logger = logging.getLogger(__name__)
class SqlDialect(Enum):
postgres = "postgres"
sqlite = "sqlite"
@classmethod
def has_value(cls, value) -> bool:
return any(value == item.value for item in cls)
class SqlConnectionOptions(ConnectionOptions):
@staticmethod
def factory(sql_connection_type: SqlDialect, **kwargs) -> 'SqlConnectionOptions':
return SqlConnectionFactories.get_factory(sql_connection_type)(**kwargs)
def __init__(self, dialect: SqlDialect, host: str, port: int, username: str, password: str, database_name: str
, timeout_s: int = None):
self.dialect: SqlDialect = dialect
self.host: str = host
self.port: int = port
self.username: str = username
self.password: str = password
self.database_name: str = database_name
self.timeout: int = timeout_s
self.connection_string: str = None
@classmethod
@abstractmethod
def schema_validate_arguments(cls, schema: Schema, parameters: Dict) -> Dict:
pass
class PostgresConnectionOptions(SqlConnectionOptions):
_factory_schema: Schema = Schema(
{
'host': str,
'port': int,
'username': str,
'password': str,
'database_name': str
},
ignore_extra_keys=True
)
def __init__(self,
dialect: SqlDialect,
host: str,
port: int,
username: str,
password: str,
database_name: str,
timeout_s: int = None) -> None:
super().__init__(dialect, host, port, username, password, database_name, timeout_s)
self.connection_string = \
f"postgresql://{self.username}:{self.password}@{self.host}:{self.port}/{self.database_name}"
@classmethod
def schema_validate_arguments(cls, schema: Schema, parameters: Dict) -> Dict:
return schema.validate(parameters)
@classmethod
def factory(cls, **kwargs) -> 'PostgresConnectionOptions':
parameters: Dict = cls.schema_validate_arguments(cls._factory_schema, kwargs)
return cls(SqlDialect.postgres, parameters['host'], parameters['port']
, parameters['username'], parameters['password'], parameters['database_name']
, parameters.get('timeout'))
class SqlConnectionFactories:
_factories: Dict[SqlDialect, Callable] = {
SqlDialect.postgres: PostgresConnectionOptions.factory
}
@classmethod
def get_factory(cls, factory_type: SqlDialect) -> Callable:
return cls._factories[factory_type]
class SqlInterface:
def __init__(self, connection_options: SqlConnectionOptions) -> None:
self.connection_options = connection_options
self.sql_engine: Engine = None
self.sql_metadata: sqlalchemy.MetaData = None
def update(self, schema: str, table: str, column: str, value: Any, sql_connection: Connection) -> None:
raise NotImplementedError
def select(self, schema: str, table: str, sql_connection: Connection) -> List[Dict[str, Any]]:
sql_table: Table = self._get_table_reflection(schema, table)
return self._execute_query(sql_connection, sql_table.select())
def insert(self, schema: str, table: str, values: List[Dict[str, Any]], sql_connection: Connection) -> None:
sql_table: Table = self._get_table_reflection(schema, table)
insert_query = sql_table.insert(values=values)
self._execute_query(sql_connection, insert_query)
def setup_pre_connection(self, connection_options) -> None:
self._build_engine(connection_options)
self._metadata_reflection(self.sql_engine)
def close_connection(self, sql_connection: Connection) -> None:
if sql_connection is not None:
sql_connection.close()
@contextmanager
def managed_connection(self, connection_options: SqlConnectionOptions = None) -> Connection:
if connection_options is None:
connection_options = self.connection_options
self.setup_pre_connection(connection_options)
connection: Connection = None
try:
connection = self.sql_engine.connect()
yield connection
finally:
self.close_connection(connection)
def _build_engine(self, connection_options: SqlConnectionOptions) -> None:
self.sql_engine = sqlalchemy.create_engine(connection_options.connection_string)
def _metadata_reflection(self, sql_engine) -> None:
self.sql_metadata = sqlalchemy.MetaData(bind=sql_engine)
def _get_table_reflection(self, schema: str, table: str) -> Table:
return Table(table, self.sql_metadata, schema=schema, autoload=True)
def _validate_write_schema(self, table: Table, values: Dict[str, Any]) -> bool:
table_columns = list(dict(table.columns).keys())
return list(values.keys()) == table_columns
def _parse_result_proxy(self, result) -> List[Dict[str, Any]]:
return list(map(lambda x: dict(x), result))
def _execute_query(self, sql_connection: Connection, sql_query: Query) -> List[Dict[str, Any]]:
start_time: float = time.time()
return_result: List[Dict[str, Any]] = None
try:
result: ResultProxy = sql_connection.execute(sql_query)
if result.returns_rows:
return_result: List[Dict[str, Any]] = self._parse_result_proxy(result)
except Exception as e:
logger.info(f"SQL query failed: {e}")
logger.debug(f"SQL query {str(sql_query.compile())}, connection: {sql_connection.engine} failed with exception {e}")
raise e
finally:
end_time: float = time.time()
query_time: float = end_time - start_time
logger.info(f"SQL execute time: {query_time}")
logger.debug(
f"SQL execute time: {query_time}, query: {str(sql_query.compile())}, connection: {sql_connection.engine}"
)
return return_result
| true
| true
|
79055df8ef88f547225e676f853952c2337d2462
| 1,454
|
py
|
Python
|
cyber_sdk/util/json.py
|
SaveTheAles/cyber.py
|
69211d4f9e861e3c64990725a4a483d2cbee0be1
|
[
"MIT"
] | null | null | null |
cyber_sdk/util/json.py
|
SaveTheAles/cyber.py
|
69211d4f9e861e3c64990725a4a483d2cbee0be1
|
[
"MIT"
] | null | null | null |
cyber_sdk/util/json.py
|
SaveTheAles/cyber.py
|
69211d4f9e861e3c64990725a4a483d2cbee0be1
|
[
"MIT"
] | null | null | null |
import copy
import json
from abc import ABC
from datetime import datetime
from typing import Any
from cyber_sdk.util.converter import to_isoformat
def to_data(x: Any) -> Any:
if "to_data" in dir(x):
return x.to_data()
if isinstance(x, list):
return [to_data(g) for g in x]
if isinstance(x, dict):
return dict_to_data(x)
return x
def to_amino(x: Any) -> Any:
if "to_amino" in dir(x):
return x.to_amino()
if isinstance(x, list):
return [to_data(g) for g in x]
if isinstance(x, dict):
return dict_to_amino(x)
if isinstance(x, int):
return str(x)
if isinstance(x, datetime):
return to_isoformat(x)
def dict_to_amino(d: dict):
return {key: to_amino(d[key]) for key in d}
def dict_to_data(d: dict) -> dict:
"""Recursively calls to_data on dict"""
return {key: to_data(d[key]) for key in d}
class JSONSerializable(ABC):
def to_data(self) -> Any:
"""Converts the object to its JSON-serializable Python data representation."""
return dict_to_data(copy.deepcopy(self.__dict__))
def to_json(self) -> str:
"""Marshals the object into a stringified JSON serialization. Keys are first sorted
and the JSON rendered removes all unnecessary whitespace.
Returns:
str: JSON string representation
"""
return json.dumps(self.to_data(), sort_keys=True, separators=(",", ":"))
| 26.436364
| 91
| 0.644429
|
import copy
import json
from abc import ABC
from datetime import datetime
from typing import Any
from cyber_sdk.util.converter import to_isoformat
def to_data(x: Any) -> Any:
if "to_data" in dir(x):
return x.to_data()
if isinstance(x, list):
return [to_data(g) for g in x]
if isinstance(x, dict):
return dict_to_data(x)
return x
def to_amino(x: Any) -> Any:
if "to_amino" in dir(x):
return x.to_amino()
if isinstance(x, list):
return [to_data(g) for g in x]
if isinstance(x, dict):
return dict_to_amino(x)
if isinstance(x, int):
return str(x)
if isinstance(x, datetime):
return to_isoformat(x)
def dict_to_amino(d: dict):
return {key: to_amino(d[key]) for key in d}
def dict_to_data(d: dict) -> dict:
return {key: to_data(d[key]) for key in d}
class JSONSerializable(ABC):
def to_data(self) -> Any:
return dict_to_data(copy.deepcopy(self.__dict__))
def to_json(self) -> str:
return json.dumps(self.to_data(), sort_keys=True, separators=(",", ":"))
| true
| true
|
79055e5ce17be169760d14eb8f18661e58b1245d
| 89,128
|
py
|
Python
|
akshare/__init__.py
|
LoveRabbit007/akshare
|
725acc58b63fa2ce203f671a18c63713a3621c3b
|
[
"MIT"
] | null | null | null |
akshare/__init__.py
|
LoveRabbit007/akshare
|
725acc58b63fa2ce203f671a18c63713a3621c3b
|
[
"MIT"
] | null | null | null |
akshare/__init__.py
|
LoveRabbit007/akshare
|
725acc58b63fa2ce203f671a18c63713a3621c3b
|
[
"MIT"
] | null | null | null |
"""
AKShare 是基于 Python 的开源财经数据接口库, 实现对股票, 期货, 期权, 基金, 债券, 外汇等金
融产品的量价数据, 基本面数据和另类数据从数据采集, 数据清洗到数据下载的工具, 满足金融数据科学
家, 数据科学爱好者在数据获取方面的需求. 它的特点是利用 AKShare 获取的是基于可信任数据源
发布的原始数据, 广大数据科学家可以利用原始数据进行再加工, 从而得出科学的结论.
"""
"""
版本更新记录:
0.1.13
更新所有基于 fushare 的接口
0.1.14
更新 requirements.txt 文件
0.1.15
自动安装所需要的 packages
0.1.16
修正部分函数命名
0.1.17
更新版本号自动管理
0.1.18
更新说明文档
0.1.19
修正 cot.py 中请求错误
0.1.20
修正 __doc__
0.1.21
修复 __doc__
0.1.22
修复命名和绘图
0.1.23
修复错误机制
0.1.24
增加奇货可查所有指数数据获取接口
0.1.25
修复 qhck 接口
0.1.26
修复代码格式问题
0.1.27
修复说明格式问题
0.1.28
更新说明文档
0.1.29
规范说明文档格式
0.1.30
规范说明文档格式
0.1.31
规范 cot.py 函数说明
0.1.32
update futures_basis.py
0.1.33
增加奇货可查数据三个接口:
get_qhkc_index, get_qhkc_index_trend, get_qhkc_index_profit_loss
使用方法请 help(get_qhkc_index) 查看
0.1.34
增加奇货可查-资金数据三个接口:
get_qhkc_fund_position_change, get_qhkc_fund_bs, get_qhkc_fund_position
使用方法请 help(get_qhkc_fund_position_change) 查看
0.1.35
增加奇货可查-工具-外盘比价接口:
get_qhkc_tool_foreign
使用方法请 help(get_qhkc_tool_foreign) 查看
0.1.36
增加奇货可查-工具-各地区经济数据接口:
get_qhkc_tool_gdp
使用方法请 help(get_qhkc_tool_gdp) 查看
0.1.37
增加中国银行间市场交易商协会-债券接口
get_bond_bank
使用方法请 help(get_bond_bank) 查看
0.1.38
修正
0.1.39
模块化处理
0.1.40
统一接口函数参数 start --> start_day; end --> end_day
0.1.41
更新大连商品交易所-苯乙烯-EB品种
0.1.42
更新上海期货交易所-上海国际能源交易中心-20号胶-NR品种
更新上海期货交易所-不锈钢-SS品种
0.1.43
修复 example --> test.py 函数调用
0.1.44
修复 example --> daily_run.py 函数调用
0.1.45
修复 akdocker.md 函数接口调用说明和感谢单位
0.1.46
修复 akdocker.md 图片显示
0.1.47
修复 akdocker.md 增加说明部分
0.1.48
更新大连商品交易所-粳米-RR品种
0.1.49
增加智道智科-私募指数数据接口
使用方法请 help(get_zdzk_fund_index) 查看
0.1.50
更新 akdocker.md 文件
0.1.51
更新官方文档: https://akshare.readthedocs.io
0.1.52
增加量化策略和量化平台板块
0.1.53
增加期货品种列表和名词解释
0.1.54
修改 AkShare的初衷, 增加管理期货策略指数
0.1.55
新增 99期货(http://www.99qh.com/d/store.aspx) 库存数据接口
0.1.56
修复 99期货(http://www.99qh.com/d/store.aspx) 库存数据接口
0.1.57
更新 md 文件数据接口
0.1.58
更新 md 文件数据接口
0.1.59
更新 md 文件数据接口
0.1.60
更新 致谢部分, 申明借鉴和引用的 package
0.1.61
更新说明文档
0.1.62
提供英为财情-股票指数-全球股指与期货指数数据接口
https://cn.investing.com/indices/
0.1.63
更新说明文档-致谢英为财情
0.1.64
更新 get_country_index 返回格式为日期索引
0.1.65
更新 get_country_index 返回格式数据开盘, 收盘, 高, 低为浮点型
0.1.66
提供英为财情-股票指数-全球股指与期货指数数据接口
https://cn.investing.com/rates-bonds/
新增 get_country_bond 返回格式数据开盘, 收盘, 高, 低为浮点型
0.1.67
更新说明文档-私募指数数据说明
0.1.68
更新说明文档-私募指数数据说明-增加图片
0.1.69
更新说明文档-债券说明格式调整
0.1.70
更新大商所, 郑商所商品期权数据接口
0.1.71
更新大商所, 郑商所, 上期所商品期权数据接口
0.1.72
修改大商所, 郑商所, 上期所商品期权数据接口
增加函数说明
更新说明文档-期权部分
0.1.73
更新说明文档-期权部分
0.1.74
更新说明文档格式调整
0.1.75
新增外汇接口, 银行间债券市场行情数据接口
0.1.76
更新说明文档
0.1.77
新增全球期货历史数据查询接口
0.1.78
新增全球宏观数据-中国宏观数据
年度、月度CPI数据, 年度M2数据
0.1.79
更新说明文档
0.1.80
更新说明文档-刷新
0.1.81
新增全球宏观数据-中国宏观数据
中国年度PPI数据
中国年度PMI数据
中国年度GDP数据
中国年度财新PMI数据
中国外汇储备数据
中国电力能源数据
中国年度非制造业PMI数据
人民币中间报价汇率
0.1.82
新增全球宏观数据-美国宏观数据
美联储利率决议报告
美国非农就业人数报告
美国失业率报告
美国EIA原油库存报告
0.1.83
更新说明文档
0.1.84
新增全球宏观数据-美国宏观数据
美国初请失业金人数报告
美国核心PCE物价指数年率报告
美国CPI月率报告
美联储劳动力市场状况指数报告
美国ADP就业人数报告
美国国内生产总值(GDP)报告
美国原油产量报告
新增全球宏观数据-欧洲宏观数据
欧洲央行决议报告
新增全球宏观数据-机构宏观数据
全球最大黄金ETF—SPDR Gold Trust持仓报告
全球最大白银ETF--iShares Silver Trust持仓报告
欧佩克报告
0.1.85
新增期货-仓单有效期接口
0.1.86
更新说明文档
0.1.87
新增和讯财经-企业社会责任数据接口
0.1.88
更新说明文档
0.1.89
更新requirements.txt
0.1.90
更新setup.py
0.1.91
新增和讯财经-中国概念股行情及日频历史数据接口
0.1.92
更新说明文档
0.1.93
新增交易法门-套利工具-跨期价差(自由价差)数据接口
0.1.94
新增生意社-商品与期货-现期图数据接口
新增西本新干线-指数数据
0.1.95
新增新浪财经-期货-实时数据接口
0.1.96
修正新浪财经-期货-实时数据接口-返回 current_price 字段为实时数据
0.1.97
修正新浪财经-期货-实时数据接口-返回 current_price 和 ask_price 字段为实时数据
0.1.98
修正版本更新错误
0.1.99
增加自动安装 pillow
0.2.1
增加港股当日(时点)行情数据和历史数据(前复权和后复权因子)
0.2.2
增加美股当日(时点)行情数据和历史数据(前复权因子)
0.2.3
增加金融期权
0.2.4
增加加密货币行情接口
0.2.5
增加 AKShare 接口导图
0.2.6
更新港股数据接口和说明文档
0.2.7
更新 qhkc_web 接口注释和说明文档
0.2.8
更新说明文档
0.2.9
更新A+H股数据实时行情数据和历史行情数据(后复权)
0.2.10
更新说明文档
0.2.11
更新说明文档
0.2.12
增加A股实时行情数据和历史行情数据
0.2.13
统一股票接口命名
0.2.14
统一股票接口命名, 去除 get
0.2.15
增加科创板实时行情数据和历史行情数据
0.2.16
增加银保监分局本级行政处罚数据
0.2.17
更新说明文档
0.2.18
修正银保监分局本级行政处罚数据接口字段命名
0.2.19
增加 Nodejs 安装说明
0.2.20
增加 Realized Library 接口
0.2.21
更新说明文档
0.2.22
更新说明文档
0.2.23
修正银保监分局本级行政处罚数据接口反扒升级-修改完成
0.2.24
增加FF多因子模型数据接口
0.2.25
更新说明文档
0.2.26
修正期货-实时行情: 接口命名, 字段补充及限制访问速度
0.2.27
增加新浪-外盘期货实时行情数据接口
0.2.28
修正新浪-外盘期货实时行情数据引入
更新文档
0.2.29
更新文档
0.2.30
监管-银保监: 反扒措施在变化, 更新接口
修正期货-国内-实时行情接口订阅问题
0.2.31
修正期货-国内-金融期货实时行情接口订阅问题
0.2.32
更新说明文档
0.2.33
更新说明文档-期货-外盘
0.2.34
新增新浪-指数实时行情和历史行情接口
0.2.35
新增新浪-指数和A股实时行情列表获取问题
0.2.36
新增腾讯财经-A股分笔行情历史数据
0.2.37
新增金十数据-实时监控接口
0.2.38
更新说明文档
0.2.39
更新说明文档目录结构
增加专题教程-pandas专题-连载
0.2.40
更新专题板块
0.2.41
更新说明文件
0.2.42
更新mindmap
0.2.43
重构说明文档-模块化处理, 将 github 说明文档和 docs 在线文档分开处理
重构私募指数接口
0.2.44
增加日出和日落模块
0.2.45
增加河北空气指数数据
0.2.46
更新 requirements.txt
0.2.47
添加初始化文件
0.2.48
添加 websocket-client
0.2.49
南华期货-南华商品指数
0.2.50
修正英为财情-指数板块的成交量显示问题
0.2.51
消除部分警告信息
0.2.52
基差数据缺失错误提示修正
0.2.53
统一南华期货-商品指数历史走势-收益率指数
新增南华期货-商品指数历史走势-价格指数
新增南华期货-商品指数历史走势-波动率指数
0.2.54
添加 numpy 依赖
0.2.55
更新已实现波动率的说明文档
统一 ff_crr --> article_ff_crr
0.2.56
新增经济政策不确定性(EPU)数据接口
更新说明文档
修改示例说明
0.2.57
修改 air_hebei 接口, 默认返回全部城市
0.2.58
新增微博指数
0.2.59
增加西本新干线说明文档
0.2.60
新增百度指数
0.2.61
修正河北空气数据代码
0.2.62
新增百度搜索指数
新增百度资讯指数
新增百度媒体指数
0.2.63
更新指数-legend代码
0.2.64
fix pillow>=6.2.0
0.2.65
新增谷歌指数
0.2.66
修正南华指数URL硬编码问题
0.2.67
修正 get_futures_index 函数中上海期货交易所
CU 出现 cuefp 数据导致指数合成异常的问题
0.2.68
降低 Python 版本要求
0.2.69
降低python版本要求到 Python3.7.1
0.2.70
适配 VNPY 使用
0.2.71
交易法门数据接口
0.2.72
申万行业一级指数-实时
0.2.73
更新纯碱期货数据接口
0.2.74
新增AQI空气质量数据接口
0.2.75
新增申万一级指数接口
0.2.76
统一交易法门登录和数据获取接口
0.2.77
清除冗余函数
0.2.78
Python 降级
0.2.79
Python 降级
0.2.80
Python 3.6
0.2.81
html5lib
0.2.82
websockets-8.1
0.2.83
修复 weibo_index 函数日期格式问题
0.2.84
修复 baidu_index 接口
0.2.85
临时修复 baidu_index 接口
0.2.86
lxml 降级
0.2.87
lxml 降级
更新安装时的错误处理
0.2.88
pypinyin 降级
0.2.89
全国空气质量数据数据格式规范为数值型
0.2.90
更新注册仓单的产品参数和异常错误
0.2.91
世界五百强公司排名接口
0.2.92
更新中国债券市场行情数据接口
0.2.93
增加自动测试模型
0.2.94
增加私募基金管理人信息公示接口
0.2.95
增加中国证券投资基金业协会-信息公示
0.2.96
修复交易法门登录验证码
由于交易法门-数据部分权限缘故, 需要注册后方可使用
0.2.97
更新说明文档
0.2.98
增加甲醇期权和PTA期权
0.2.99
更新外汇数据接口, 规范格式
0.3.0
猫眼电影实时票房
0.3.1
更新说明文档
0.3.2
更新说明文档
0.3.3
更新外盘期货行情订阅时, 统一字段名称与网页端一致
0.3.4
新增能源-碳排放权数据
0.3.5
新增世界各大城市生活成本数据
0.3.6
商品现货价格指数
0.3.7
修复百度指数日期问题
0.3.8
新增中国宏观数据接口和文档说明
0.3.9
新增中国宏观杠杆率数据
0.3.10
修改金融期权数据接口
0.3.11
修复实时票房数据接口
0.3.12
新增新浪主力连续接口
0.3.13
新增新浪主力连续列表
0.3.14
中国倒闭公司名单
0.3.15
中国独角兽名单
中国千里马名单
0.3.16
东方财富-机构调研
0.3.17
东方财富网-数据中心-特色数据-机构调研
机构调研统计
机构调研详细
0.3.18
修复自动测试接口
0.3.19
修复融资融券字段名匹配问题
增加东方财富网-数据中心-特色数据-股票质押
0.3.20
东方财富网-数据中心-特色数据-股权质押
东方财富网-数据中心-特色数据-股权质押-股权质押市场概况: http://data.eastmoney.com/gpzy/marketProfile.aspx
东方财富网-数据中心-特色数据-股权质押-上市公司质押比例: http://data.eastmoney.com/gpzy/pledgeRatio.aspx
东方财富网-数据中心-特色数据-股权质押-重要股东股权质押明细: http://data.eastmoney.com/gpzy/pledgeDetail.aspx
东方财富网-数据中心-特色数据-股权质押-质押机构分布统计-证券公司: http://data.eastmoney.com/gpzy/distributeStatistics.aspx
东方财富网-数据中心-特色数据-股权质押-质押机构分布统计-银行: http://data.eastmoney.com/gpzy/distributeStatistics.aspx
东方财富网-数据中心-特色数据-股权质押-行业数据: http://data.eastmoney.com/gpzy/industryData.aspx
0.3.21
东方财富网-数据中心-特色数据-商誉
东方财富网-数据中心-特色数据-商誉-A股商誉市场概况: http://data.eastmoney.com/sy/scgk.html
东方财富网-数据中心-特色数据-商誉-商誉减值预期明细: http://data.eastmoney.com/sy/yqlist.html
东方财富网-数据中心-特色数据-商誉-个股商誉减值明细: http://data.eastmoney.com/sy/jzlist.html
东方财富网-数据中心-特色数据-商誉-个股商誉明细: http://data.eastmoney.com/sy/list.html
东方财富网-数据中心-特色数据-商誉-行业商誉: http://data.eastmoney.com/sy/hylist.html
0.3.22
期货规则-交易日历数据表
更新2020交易日历数据
0.3.23
东方财富网-数据中心-特色数据-股票账户统计: http://data.eastmoney.com/cjsj/gpkhsj.html
0.3.24
移除-交易法门系列老函数
因为交易法门网站需要会员登录后访问数据
0.3.25
增加-交易法门-工具-套利分析接口
增加-交易法门-工具-交易规则接口
0.3.26
增加-交易法门-数据-农产品-豆油
增加-交易法门-数据-黑色系-焦煤
增加-交易法门-工具-持仓分析-期货分析
增加-交易法门-工具-持仓分析-持仓分析
0.3.27
交易法门-说明文档
0.3.28
增加-股票指数-股票指数成份股接口
0.3.29
增加-股票指数-股票指数成份股接口-代码注释
0.3.30
增加-义乌小商品指数
0.3.31
修复-银保监分局本级行政处罚数据接口
接口重命名为: bank_fjcf_table_detail
0.3.32
新增-中国电煤价格指数
0.3.33
修复-银保监分局本级行政处罚数据接口-20200108新增字段后适应
0.3.34
增加-交易法门-工具-期限分析-基差日报
增加-交易法门-工具-期限分析-基差分析
增加-交易法门-工具-期限分析-期限结构
增加-交易法门-工具-期限分析-价格季节性
0.3.35
更新说明文档
0.3.36
# 交易法门-工具-仓单分析
增加-交易法门-工具-仓单分析-仓单日报
增加-交易法门-工具-仓单分析-仓单查询
增加-交易法门-工具-仓单分析-虚实盘比查询
# 交易法门-工具-资讯汇总
增加-交易法门-工具-资讯汇总-研报查询
增加-交易法门-工具-资讯汇总-交易日历
# 交易法门-工具-资金分析
增加-交易法门-工具-资金分析-资金流向
0.3.37
更新说明文档
0.3.38
修改-交易法门-工具-资金分析-资金流向函数的字段和说明文档
0.3.39
金十数据中心-经济指标-央行利率-主要央行利率
美联储利率决议报告
欧洲央行决议报告
新西兰联储决议报告
中国央行决议报告
瑞士央行决议报告
英国央行决议报告
澳洲联储决议报告
日本央行决议报告
俄罗斯央行决议报告
印度央行决议报告
巴西央行决议报告
macro_euro_gdp_yoy # 金十数据中心-经济指标-欧元区-国民经济运行状况-经济状况-欧元区季度GDP年率报告
macro_euro_cpi_mom # 金十数据中心-经济指标-欧元区-国民经济运行状况-物价水平-欧元区CPI月率报告
macro_euro_cpi_yoy # 金十数据中心-经济指标-欧元区-国民经济运行状况-物价水平-欧元区CPI年率报告
macro_euro_ppi_mom # 金十数据中心-经济指标-欧元区-国民经济运行状况-物价水平-欧元区PPI月率报告
macro_euro_retail_sales_mom # 金十数据中心-经济指标-欧元区-国民经济运行状况-物价水平-欧元区零售销售月率报告
macro_euro_employment_change_qoq # 金十数据中心-经济指标-欧元区-国民经济运行状况-劳动力市场-欧元区季调后就业人数季率报告
macro_euro_unemployment_rate_mom # 金十数据中心-经济指标-欧元区-国民经济运行状况-劳动力市场-欧元区失业率报告
macro_euro_trade_balance # 金十数据中心-经济指标-欧元区-贸易状况-欧元区未季调贸易帐报告
macro_euro_current_account_mom # 金十数据中心-经济指标-欧元区-贸易状况-欧元区经常帐报告
macro_euro_industrial_production_mom # 金十数据中心-经济指标-欧元区-产业指标-欧元区工业产出月率报告
macro_euro_manufacturing_pmi # 金十数据中心-经济指标-欧元区-产业指标-欧元区制造业PMI初值报告
macro_euro_services_pmi # 金十数据中心-经济指标-欧元区-产业指标-欧元区服务业PMI终值报告
macro_euro_zew_economic_sentiment # 金十数据中心-经济指标-欧元区-领先指标-欧元区ZEW经济景气指数报告
macro_euro_sentix_investor_confidence # 金十数据中心-经济指标-欧元区-领先指标-欧元区Sentix投资者信心指数报告
0.3.40
修复-欧洲央行决议报告
0.3.41
增加-东方财富网-经济数据-银行间拆借利率
0.3.42
# 中国
macro_china_gdp_yearly # 金十数据中心-经济指标-中国-国民经济运行状况-经济状况-中国GDP年率报告
macro_china_cpi_yearly # 金十数据中心-经济指标-中国-国民经济运行状况-物价水平-中国CPI年率报告
macro_china_cpi_monthly # 金十数据中心-经济指标-中国-国民经济运行状况-物价水平-中国CPI月率报告
macro_china_ppi_yearly # 金十数据中心-经济指标-中国-国民经济运行状况-物价水平-中国PPI年率报告
macro_china_exports_yoy # 金十数据中心-经济指标-中国-贸易状况-以美元计算出口年率报告
macro_china_imports_yoy # 金十数据中心-经济指标-中国-贸易状况-以美元计算进口年率
macro_china_trade_balance # 金十数据中心-经济指标-中国-贸易状况-以美元计算贸易帐(亿美元)
macro_china_industrial_production_yoy # 金十数据中心-经济指标-中国-产业指标-规模以上工业增加值年率
macro_china_pmi_yearly # 金十数据中心-经济指标-中国-产业指标-官方制造业PMI
macro_china_cx_pmi_yearly # 金十数据中心-经济指标-中国-产业指标-财新制造业PMI终值
macro_china_cx_services_pmi_yearly # 金十数据中心-经济指标-中国-产业指标-财新服务业PMI
macro_china_non_man_pmi # 金十数据中心-经济指标-中国-产业指标-中国官方非制造业PMI
macro_china_fx_reserves_yearly # 金十数据中心-经济指标-中国-金融指标-外汇储备(亿美元)
macro_china_m2_yearly # 金十数据中心-经济指标-中国-金融指标-M2货币供应年率
macro_china_shibor_all # 金十数据中心-经济指标-中国-金融指标-上海银行业同业拆借报告
macro_china_hk_market_info # 金十数据中心-经济指标-中国-金融指标-人民币香港银行同业拆息
macro_china_daily_energy # 金十数据中心-经济指标-中国-其他-中国日度沿海六大电库存数据
macro_china_rmb # 金十数据中心-经济指标-中国-其他-中国人民币汇率中间价报告
macro_china_market_margin_sz # 金十数据中心-经济指标-中国-其他-深圳融资融券报告
macro_china_market_margin_sh # 金十数据中心-经济指标-中国-其他-上海融资融券报告
macro_china_au_report # 金十数据中心-经济指标-中国-其他-上海黄金交易所报告
macro_china_ctci # 发改委-中国电煤价格指数-全国综合电煤价格指数
macro_china_ctci_detail # 发改委-中国电煤价格指数-各价区电煤价格指数
macro_china_ctci_detail_hist # 发改委-中国电煤价格指数-历史电煤价格指数
macro_china_money_supply # 中国货币供应量
# 美国
macro_usa_gdp_monthly # 金十数据中心-经济指标-美国-经济状况-美国GDP
macro_usa_cpi_monthly # 金十数据中心-经济指标-美国-物价水平-美国CPI月率报告
macro_usa_core_cpi_monthly # 金十数据中心-经济指标-美国-物价水平-美国核心CPI月率报告
macro_usa_personal_spending # 金十数据中心-经济指标-美国-物价水平-美国个人支出月率报告
macro_usa_retail_sales # 金十数据中心-经济指标-美国-物价水平-美国零售销售月率报告
macro_usa_import_price # 金十数据中心-经济指标-美国-物价水平-美国进口物价指数报告
macro_usa_export_price # 金十数据中心-经济指标-美国-物价水平-美国出口价格指数报告
macro_usa_lmci # 金十数据中心-经济指标-美国-劳动力市场-LMCI
macro_usa_unemployment_rate # 金十数据中心-经济指标-美国-劳动力市场-失业率-美国失业率报告
macro_usa_job_cuts # 金十数据中心-经济指标-美国-劳动力市场-失业率-美国挑战者企业裁员人数报告
macro_usa_non_farm # 金十数据中心-经济指标-美国-劳动力市场-就业人口-美国非农就业人数报告
macro_usa_adp_employment # 金十数据中心-经济指标-美国-劳动力市场-就业人口-美国ADP就业人数报告
macro_usa_core_pce_price # 金十数据中心-经济指标-美国-劳动力市场-消费者收入与支出-美国核心PCE物价指数年率报告
macro_usa_real_consumer_spending # 金十数据中心-经济指标-美国-劳动力市场-消费者收入与支出-美国实际个人消费支出季率初值报告
macro_usa_trade_balance # 金十数据中心-经济指标-美国-贸易状况-美国贸易帐报告
macro_usa_current_account # 金十数据中心-经济指标-美国-贸易状况-美国经常帐报告
macro_usa_rig_count # 金十数据中心-经济指标-美国-产业指标-制造业-贝克休斯钻井报告
# 金十数据中心-经济指标-美国-产业指标-制造业-美国个人支出月率报告
macro_usa_ppi # 金十数据中心-经济指标-美国-产业指标-制造业-美国生产者物价指数(PPI)报告
macro_usa_core_ppi # 金十数据中心-经济指标-美国-产业指标-制造业-美国核心生产者物价指数(PPI)报告
macro_usa_api_crude_stock # 金十数据中心-经济指标-美国-产业指标-制造业-美国API原油库存报告
macro_usa_pmi # 金十数据中心-经济指标-美国-产业指标-制造业-美国Markit制造业PMI初值报告
macro_usa_ism_pmi # 金十数据中心-经济指标-美国-产业指标-制造业-美国ISM制造业PMI报告
macro_usa_nahb_house_market_index # 金十数据中心-经济指标-美国-产业指标-房地产-美国NAHB房产市场指数报告
macro_usa_house_starts # 金十数据中心-经济指标-美国-产业指标-房地产-美国新屋开工总数年化报告
macro_usa_new_home_sales # 金十数据中心-经济指标-美国-产业指标-房地产-美国新屋销售总数年化报告
macro_usa_building_permits # 金十数据中心-经济指标-美国-产业指标-房地产-美国营建许可总数报告
macro_usa_exist_home_sales # 金十数据中心-经济指标-美国-产业指标-房地产-美国成屋销售总数年化报告
macro_usa_house_price_index # 金十数据中心-经济指标-美国-产业指标-房地产-美国FHFA房价指数月率报告
macro_usa_spcs20 # 金十数据中心-经济指标-美国-产业指标-房地产-美国S&P/CS20座大城市房价指数年率报告
macro_usa_pending_home_sales # 金十数据中心-经济指标-美国-产业指标-房地产-美国成屋签约销售指数月率报告
macro_usa_cb_consumer_confidence # 金十数据中心-经济指标-美国-领先指标-美国谘商会消费者信心指数报告
macro_usa_nfib_small_business # 金十数据中心-经济指标-美国-领先指标-美国NFIB小型企业信心指数报告
macro_usa_michigan_consumer_sentiment # 金十数据中心-经济指标-美国-领先指标-美国密歇根大学消费者信心指数初值报告
macro_usa_eia_crude_rate # 金十数据中心-经济指标-美国-其他-美国EIA原油库存报告
macro_usa_initial_jobless # 金十数据中心-经济指标-美国-其他-美国初请失业金人数报告
macro_usa_crude_inner # 金十数据中心-经济指标-美国-其他-美国原油产量报告
0.3.43
增加-交易法门-数据-黑色系-焦煤
0.3.44
更新宏观数据
macro_cons_gold_volume # 全球最大黄金ETF—SPDR Gold Trust持仓报告
macro_cons_gold_change # 全球最大黄金ETF—SPDR Gold Trust持仓报告
macro_cons_gold_amount # 全球最大黄金ETF—SPDR Gold Trust持仓报告
macro_cons_silver_volume # 全球最大白银ETF--iShares Silver Trust持仓报告
macro_cons_silver_change # 全球最大白银ETF--iShares Silver Trust持仓报告
macro_cons_silver_amount # 全球最大白银ETF--iShares Silver Trust持仓报告
macro_cons_opec_month # 欧佩克报告-月度
0.3.45
增加中国证券投资基金业协会-信息公示
# 中国证券投资基金业协会-信息公示-会员信息
amac_member_info # 中国证券投资基金业协会-信息公示-会员信息-会员机构综合查询
# 中国证券投资基金业协会-信息公示-从业人员信息
amac_person_org_list # 中国证券投资基金业协会-信息公示-从业人员信息-基金从业人员资格注册信息
# 中国证券投资基金业协会-信息公示-私募基金管理人公示
amac_manager_info # 中国证券投资基金业协会-信息公示-私募基金管理人公示-私募基金管理人综合查询
amac_manager_classify_info # 中国证券投资基金业协会-信息公示-私募基金管理人公示-私募基金管理人分类公示
amac_member_sub_info # 中国证券投资基金业协会-信息公示-私募基金管理人公示-证券公司私募基金子公司管理人信息公示
# 中国证券投资基金业协会-信息公示-基金产品
amac_fund_info # 中国证券投资基金业协会-信息公示-基金产品-私募基金管理人基金产品
amac_securities_info # 中国证券投资基金业协会-信息公示-基金产品-证券公司集合资管产品公示
amac_aoin_info # 中国证券投资基金业协会-信息公示-基金产品-证券公司直投基金
amac_fund_sub_info # 中国证券投资基金业协会-信息公示-基金产品公示-证券公司私募投资基金
amac_fund_account_info # 中国证券投资基金业协会-信息公示-基金产品公示-基金公司及子公司集合资管产品公示
amac_fund_abs # 中国证券投资基金业协会-信息公示-基金产品公示-资产支持专项计划
amac_futures_info # 中国证券投资基金业协会-信息公示-基金产品公示-期货公司集合资管产品公示
# 中国证券投资基金业协会-信息公示-诚信信息
amac_manager_cancelled_info # 中国证券投资基金业协会-信息公示-诚信信息-已注销私募基金管理人名单
0.3.46
更新-商品期权-菜籽粕期权接口
修复 get_sector_futures 字段名问题
0.3.47
增加-商品期权-郑州商品交易所-期权-历史数据
0.3.48
修复 macro_cons_opec_month 接口数据更新问题
0.3.49
新增-交易法门-工具-仓单分析-虚实盘比日报接口
0.3.50
更新-说明文档
0.3.51
修复 macro_cons_opec_month 接口数据更新问题, 统一数据接口跟网页端统一
修复-百度指数-由用户输入cookie来访问数据及说明文档
0.3.52
新增-英为财情-外汇-货币对历史数据
0.3.53
修复-macro_usa_rig_count-接口返回数据
修复-rate_interbank-文档注释
0.3.54
新增-事件接口
新增-事件接口新型冠状病毒-网易
新增-事件接口新型冠状病毒-丁香园
0.3.55
更新-事件接口新型冠状病毒
0.3.56
更新-事件接口新型冠状病毒-全国疫情趋势图
0.3.57
更新-事件接口新型冠状病毒-分省地区
一些细节修复
0.3.58
新增-财富排行榜(英文版)
0.3.59
新增-currency_name_code-接口
0.3.60
修复-财富排行榜(英文版)-索引乱序问题
0.3.61
修复-事件接口新型冠状病毒-hospital-接口
0.3.62
修复-20200203交易日问题
0.3.63
修复-事件接口新型冠状病毒-网易接口
0.3.64
修复-事件接口新型冠状病毒-丁香园接口
0.3.65
修复-calendar.json 问题, 感谢 fxt0706
0.3.66
修复-epu_index-加载问题
0.3.67
修复-option_commodity-json数据加载问题
0.3.68
更名函数 movie_board -> box_office_spot
0.3.69
新增-epidemic_baidu
百度-新型冠状病毒肺炎-疫情实时大数据报告
0.3.70
修复-epidemic_dxy-字段问题
0.3.71
修复-epidemic_dxy-具体省份字段问题
0.3.72
新增-百度迁徙地图接口
0.3.73
修复文字表述
0.3.74
修复-epidemic_163-数据更新问题
0.3.75
修复-epidemic_dxy-图片显示问题
0.3.76
新增-stock_zh_index_daily_tx-补充新浪指数的数据缺失问题
0.3.77
修复-epidemic_163-数据更新问题
0.3.78
新增-bond_china_yield-中国债券信息网-国债及其他债券收益率曲线
0.3.79
修改-bond_china_yield-参数
0.3.80
新增-基金数据接口
0.3.81
新增-基金数据接口-净值
0.3.82
新增-小区查询
新增-相同行程查询
0.3.83
新增-交易法门-工具-套利分析-FullCarry
修改-交易法门-工具-期限分析-基差分析
0.3.84
新增-货币对-投机情绪报告
0.3.85
修复-epidemic_area_detail-增加下载进度提示
0.3.86
修复-epidemic_dxy-完善图片获取
0.3.87
新增-债券质押式回购成交明细数据
新增-细化到地市的疫情历史数据20200123至今
0.3.88
新增-交易法门-工具-持仓分析-持仓季节性
修复-epidemic_163
0.3.89
新增-epidemic_163-数据说明接口
0.3.90
修复-epidemic_dxy
0.3.91
修复-get_receipt-MA数值问题
0.3.92
新增-奇货可查接口测试
0.3.93
新增-奇货可查接口测试-代码补全
0.3.94
修复-epidemic_dxy
0.3.95
新增-债券-沪深债券
新增-债券-沪深可转债
0.3.96
修复-baidu_search_index-异常
0.3.97
新增-特许经营数据
0.3.98
修复-get_receipt-MA数值问题条件判断
0.3.99
修复-air_hebei-代码格式
0.4.1
修复-pandas-版本降级
0.4.2
修复-epidemic_baidu
0.4.3
新增-慈善中国
0.4.4
新增-epidemic_history-疫情所有历史数据
0.4.5
完善-慈善中国-类型注解
0.4.6
修复-charity_china_report
0.4.7
新增-测试接口
0.4.8
修复-epidemic_hist_all
修复-epidemic_hist_city
修复-epidemic_hist_province
0.4.9
新增-option_sina_cffex_hs300_list
新增-option_sina_cffex_hs300_spot
新增-option_sina_cffex_hs300_daily
新增-option_sina_sse_list
新增-option_sina_sse_expire_day
新增-option_sina_sse_codes
新增-option_sina_sse_spot_price
新增-option_sina_sse_underlying_spot_price
新增-option_sina_sse_greeks
新增-option_sina_sse_minute
新增-option_sina_sse_daily
0.4.10
修复-金十数据websocket接口
0.4.11
新增-交易法门-工具-资金分析-资金流向
新增-交易法门-工具-资金分析-沉淀资金
新增-交易法门-工具-资金分析-资金季节性
新增-交易法门-工具-资金分析-成交排名
0.4.12
新增-微博舆情报告
0.4.13
新增-Python3.8.1支持
0.4.14
修复-get_receipt-CZCE问题
0.4.15
修复-hf_subscribe_exchange_symbol-在Linux Python 3.8.1 报错问题
0.4.16
修复-get_js_dc_current
0.4.17
新增-知识图谱
0.4.18: fix: use tqdm replace print hints
0.4.19: fix: use tqdm replace print hints in energy_carbon.py and charity_china.py
0.4.20: add: jyfm_tools_position_structure and jyfm_tools_symbol_handbook
0.4.21: fix: macro_cons_opec_month print hints
0.4.22: fix: add tqdm desc
0.4.23: fix: add tqdm stock_zh_a_spot desc
0.4.24: fix: add get_us_stock_name to get the u.s. stock name
0.4.25: fix: upload setup.py file and set automate release and deploy
0.4.26: fix: bond_spot_quote and docs
0.4.27: test: automate test
0.4.28: test: automate test
0.4.29: feats: add currency interface
0.4.30: fix: futures_roll_yield.py/get_roll_yield: CUefp error
0.4.31: format: format currency.py
0.4.32: fix: china_bond.py
0.4.33: add: jyfm_tools_futures_arbitrage_matrix for jyfm futures
0.4.34: fix: get_czce_rank_table history-20171228 format
0.4.35: fix: get_czce_rank_table history-20071228 format
0.4.36: fix: macro_cons_opec_month
0.4.37: add: get_ine_daily to fetch SC and NR data
0.4.38: add: futures_sgx_daily to fetch futures data from sgx
0.4.39: refactor: covid.py/covid_19_163 interface
0.4.40: refactor: covid.py interface
0.4.41: fix: cot.py get_rank_sum_daily interface
0.4.42: add: wdbank.py test
0.4.43: add: wdbank.py dependencies
0.4.44: add: tool github
0.4.45: add: fund_public file and docs
0.4.46: add: macro_china_lpr
0.4.47: add: stock_em_analyst
0.4.48: add: stock_em_comment
0.4.49: add: stock_em_hsgt
0.4.50: fix: stock_em_sy_yq_list
0.4.51: add: stock_tfp_em
0.4.52: fix: covid.py
0.4.53: fix: futures_hq_sina.py
0.4.54: add: futures_foreign
0.4.55: fix: macro_constitute.py
0.4.56: add: index_vix
0.4.57: fix: covid-19; desc: delete pic show
0.4.58: add: qhkc api
0.4.59: add: jyfm_tools
0.4.60: fix: covid_19_dxy and cot.py
0.4.61: fix: cot.py dict's keys use strip
0.4.62: fix: add PG into cons.py map_dict
0.4.63: add: energy_oil to add energy_oil_hist and energy_oil_detail
0.4.64: add: futures_em_spot_stock
0.4.65: add: futures_global_commodity_name_url_map
0.4.66: fix: fund_em.py timezone transfer
0.4.67: fix: covid covid_19_area_detail
0.4.68: fix: marco_usa
0.4.69: add: futures_cfmmc
0.4.70: add: covid_19 CSSE 数据接口
0.4.71: add: argus
0.4.72: add: stock_zh_tick_163
0.4.73: add: stock_zh_tick_tx_js
0.4.74: fix: stock_zh_tick_163 return tips
0.4.75: fix: nh_index
0.4.76: add: fred_md
0.4.77: fix: get_dce_option_daily
0.4.78: add: internal_flow_history
0.4.79: add: stock_em_dxsyl
0.4.80: fix: covid and docs
0.4.81: add: stock_em_yjyg and stock_em_yysj
0.4.82: fix: futures_xgx_index
0.4.83: fix: fortune_500.py
0.4.84: fix: a and kcb stock return format
0.4.85: fix: a and kcb stock field
0.4.86: add: hf_sp_500
0.4.87: fix: jinshi data update
0.4.88: fix: macro_china
0.4.89: fix: macro_other
0.4.90: fix: stock_zh_a and stock_zh_kcb return adjusted stock price
0.4.91: add: futures_inventory_em
0.4.92: fix: adjust hk_stock_sina, us_stock_sina
0.4.93: fix: air_quality
0.4.94: fix: air_quality path
0.4.95: add: js file
0.4.96: fix: format air interface
0.4.97: fix: interbank_rate_em.py add need_page parameter to control update content
0.4.98: add: mplfinance package
0.4.99: add: fund_em
0.5.1: fix: add PG to futures list
0.5.2: fix: air_zhenqi.py rename air_city_dict to air_city_list
0.5.3: add: add two fields into covid_163
0.5.4: fix: fix request_fun timeout and error type
0.5.5: fix: fund_em_graded_fund_daily return fields
0.5.6: fix: stock_us_sina.py rename columns
0.5.7: fix: import akshare only load functions
0.5.8: add: macro_china_money_supply
0.5.9: add: macro_china_new_house_price, macro_china_enterprise_boom_index, macro_china_national_tax_receipts
0.5.10: fix: zh_stock_ah_tx
0.5.11: fix: fund_em return fields
0.5.12: fix: add date to fund_em daily function
0.5.13: add: stock_fund
0.5.14: add: stock_market_fund_flow, stock_sector_fund_flow, stock_individual_fund_flow_rank
0.5.15: fix: baidu_index
0.5.16: add: fund_em_value_estimation
0.5.17: fix: delete macro_euro zero value
0.5.18: add: stock_financial_abstract, stock_financial_analysis_indicator
0.5.19: add: stock_add_stock, stock_ipo_info, stock_history_dividend_detail, stock_history_dividend
0.5.20: add: stock_restricted_shares, stock_circulate_stock_holder
0.5.21: add: futures_dce_position_rank
0.5.22: fix: fix futures_dce_position_rank return format
0.5.23: add: stock_sector_spot, stock_sector_detail
0.5.24: fix: futures_dce_position_rank
0.5.25: fix: futures_dce_position_rank return fields
0.5.26: add: stock_info
0.5.27: add: stock_em_hsgt_hold_stock
0.5.28: add: stock_fund_stock_holder, stock_main_stock_holder
0.5.29: fix: stock_em_sy
0.5.30: fix: air_zhenqi.py
0.5.31: fix: add futures_dce_position_rank_other to fix futures_dce_position_rank at 20160104
0.5.32: fix: futures_dce_position_rank_other return format
0.5.33: add: zh_bond_cov_sina and set pandas version
0.5.34: fix: set pandas version > 0.25
0.5.35: add: bond_cov_comparison and bond_zh_cov
0.5.36: fix: stock_info_sz_name_code return code format
0.5.37: add: stock_hold
0.5.38: fix: futures_dce_position_rank_other exchange symbol and variety
0.5.39: add: stock_recommend
0.5.40: fix: stock_recommend output format
0.5.41: fix: deprecated requests-html module
0.5.42: fix: reformat investing interface
0.5.43: fix: qhck interface
0.5.44: add: LME holding and stock report
0.5.45: fix: transform the data type of stock_zh_a_spot output
0.5.46: add: CFTC holding and stock
0.5.47: fix: fix index_investing_global interface
0.5.48: fix: fix stock_info_a_code_name interface
0.5.49: fix: fix stock_zh_a_daily interface
0.5.50: fix: fix get_roll_yield_bar interface
0.5.51: add: stock_summary
0.5.52: fix: fix get_roll_yield_bar interface
0.5.53: add: add watch_jinshi_quotes interface
0.5.54: add: add stock_js_price interface
0.5.55: add: add futures_czce_warehouse_receipt interface
0.5.56: add: add futures_dce_warehouse_receipt, futures_shfe_warehouse_receipt interface
0.5.57: fix: fix macro data interface
0.5.58: add: add stock_em_qsjy interface
0.5.59: fix: fix fund interface
0.5.60: fix: add index_bloomberg_billionaires interface
0.5.61: fix: fix futures_rule interface
0.5.62: add: add stock_a_pe, stock_a_pb interface
0.5.63: add: add stock_a_lg_indicator interface
0.5.64: add: add stock_a_high_low_statistics interface
0.5.65: add: add stock_a_below_net_asset_statistics interface
0.5.66: fix: fix stock_zh_a_daily default return unadjusted data
0.5.67: fix: fix R and MATLAB compatibility issues
0.5.68: add: add option_commodity_sina interface
0.5.69: fix: fix option_commodity_sina interface
0.5.70: merge: merge #4048
0.5.71: add: add tool_trade_date_hist interface
0.5.72: add: add fund_etf_category_sina, fund_etf_hist_sina interface
0.5.73: add: add stock_report_disclosure interface
0.5.74: add: add stock_zh_a_minute interface
0.5.75: add: add futures_zh_minute_sina interface
0.5.76: add: add option_sina_finance_minute interface
0.5.77: fix: fix currency_hist interface return data format
0.5.78: add: add hold field in futures_zh_minute_sina interface
0.5.79: add: add stock_report_fund_hold interface
0.5.80: fix: fix PG to futures cons file
0.5.81: add: add stock_zh_index_hist_csindex interface
0.5.82: fix: fix LU to futures cons file
0.5.83: fix: fix qhkc broker_positions_process interface
0.5.84: fix: fix tool_trade_date_hist_sina interface and update calendar.json
0.5.85: add: add index_stock_hist interface
0.5.86: fix: fix code format
0.5.87: fix: fix cot interface
0.5.88: fix: fix stock_em_account interface
0.5.89: add: add macro_china_new_financial_credit interface
0.5.90: add: add stock_sina_lhb interface
0.5.91: fix: fix covid for python3.8
0.5.92: fix: fix futures_daily_bar interface
0.5.93: add: add macro_china_fx_gold interface
0.5.94: add: add stock_zh_index_daily_em, bond_cov_jsl interface
0.5.95: fix: fix get_dce_option_daily interface
0.5.96: add: add stock_em_hsgt_hist interface
0.5.97: fix: fix remove mplfinance package in requirements.txt
0.5.98: add: add stock_hk_eniu_indicator interface
0.5.99: fix: fix stock_zh_ah_daily interface
0.6.1: fix: fix stock_zh_ah_daily interface set default value
0.6.2: fix: fix stock_zh_a_minute interface and add adjust parameter
0.6.3: fix: fix stock_zh_a_minute interface
0.6.4: add: add macro_china interface
0.6.5: add: add macro_china_wbck interface
0.6.6: fix: fix macro_china_wbck interface
0.6.7: add: add index_stock_cons_sina interface
0.6.8: fix: fix option_commodity interface
0.6.9: fix: fix stock_em_gpzy_pledge_ratio interface
0.6.10: add: add macro_china_hb, macro_china_gksccz, macro_china_bond_public interface
0.6.11: fix: fix python version should be 3.7 later
0.6.12: fix: fix stock_em_gpzy_distribute_statistics_company interface
0.6.13: add: add stock_us_fundamental interface
0.6.14: fix: fix stock_us_fundamental interface
0.6.15: fix: fix macro_china_market_margin_sh interface
0.6.16: fix: fix stock_us_daily time period and adjust for specific stock
0.6.17: fix: fix stock_js_weibo_report interface
0.6.18: fix: fix get_shfe_option_daily interface column name
0.6.19: fix: fix stock_hk_daily interface to process non-dividend stock
0.6.20: fix: fix covid_baidu interface
0.6.21: fix: fix futures_hf_spot interface
0.6.22: fix: fix stock_zh_index_daily_tx interface
0.6.23: fix: fix currency_hist interface
0.6.24: fix: fix stock_zh_kcb_spot interface
0.6.25: add: add stock_register_kcb interface
0.6.26: add: add stock_em_sy_list interface
0.6.27: fix: fix stock_sector_detail interface
0.6.28: add: add stock_register_cyb interface
0.6.29: fix: fix stock_zh_a_daily interface
0.6.30: add: add energy interface
0.6.31: fix: fix energy interface
0.6.32: fix: fix docs interface
0.6.33: fix: fix get_roll_yield_bar interface
0.6.34: fix: fix currency_investing and futures_inventory_em interface and add index_stock_cons_csindex interface
0.6.35: fix: fix get_futures_daily interface
0.6.36: fix: fix stock_info_a_code_name interface
0.6.37: fix: fix stock_sector_detail interface
0.6.38: fix: fix get_futures_daily interface
0.6.39: add: add stock_em_xgsglb interface
0.6.40: add: add stock_zh_a_new interface
0.6.41: fix: fix get_ine_daily interface
0.6.42: add: add bond_futures_deliverable_coupons interface
0.6.43: fix: fix bond_futures_deliverable_coupons interface
0.6.44: add: add futures_comex_inventory interface
0.6.45: add: add macro_china_xfzxx interface
0.6.46: add: add macro_china_reserve_requirement_ratio interface
0.6.47: fix: fix franchise_china interface
0.6.48: fix: fix get_rank_sum interface
0.6.49: fix: fix get_dce_rank_table interface
0.6.50: add: add macro_china_hgjck, macro_china_consumer_goods_retail interface
0.6.51: fix: fix macro_china_hgjck interface
0.6.52: add: add macro_china_society_electricity interface
0.6.53: add: add macro_china_society_traffic_volume interface
0.6.54: add: add macro_china_postal_telecommunicational interface
0.6.55: add: add macro_china_international_tourism_fx interface
0.6.56: add: add macro_china_swap_rate interface
0.6.57: fix: fix stock_sina_lhb_detail_daily interface
0.6.58: add: add bond_china_close_return interface
0.6.59: add: add macro_china_passenger_load_factor interface
0.6.60: fix: fix stock_sina_lhb_ggtj interface
0.6.61: fix: fix option_czce_hist interface
0.6.62: fix: fix sunrise_daily interface
0.6.63: fix: fix get_roll_yield_bar interface
0.6.64: add: add macro_china_insurance interface
0.6.65: add: add macro_china_supply_of_money interface
0.6.66: add: add support for python 3.9.0
0.6.67: add: add macro_china_foreign_exchange_gold interface
0.6.68: add: add macro_china_retail_price_index interface
0.6.69: fix: fix box_office_spot interface
0.6.70: fix: fix bond_investing_global interface
0.6.71: fix: fix nh_return_index interface
0.6.72: fix: fix get_receipt interface
0.6.73: add: add news_cctv interface
0.6.74: fix: fix macro and acm interface
0.6.75: add: add movie_boxoffice interface
0.6.76: fix: fix remove execjs dependence
0.6.77: add: add macro_china_real_estate interface
0.6.78: fix: fix movie_boxoffice interface
0.6.79: fix: split movie_boxoffice to single interface
0.6.80: fix: movie_boxoffice interface
0.6.81: fix: fix stock_report_fund_hold interface
0.6.82: fix: fix stock_em_comment interface
0.6.83: add: add crypto_hist and crypto_name_map interface
0.6.84: fix: fix crypto_hist interface
0.6.85: fix: fix stock_a_pb and stock_a_pe interface
0.6.86: fix: fix stock_zh_a_minute interface
0.6.87: fix: remove email interface
0.6.88: fix: fix get_dce_rank_table interface
0.6.89: fix: fix get_dce_rank_table interface
0.6.90: add: add fund_em_rank interface
0.6.91: fix: fix get_futures_daily interface
0.6.92: add: add repo_rate_hist interface
0.6.93: fix: fix stock_report_fund_hold interface
0.6.94: fix: fix docs interface
0.6.95: fix: fix macro_china_market_margin_sh interface
0.6.96: fix: fix stock_zh_a_daily interface
0.6.97: add: add stock_em_hsgt_board_rank interface
0.6.98: fix: fix fortune_rank interface
0.6.99: add: add forbes_rank interface
0.7.1: fix: fix futures_dce_position_rank interface
0.7.2: add: add xincaifu_rank interface
0.7.3: add: add hurun_rank interface
0.7.4: fix: fix hurun_rank interface
0.7.5: add: add currency_pair_map interface
0.7.6: fix: fix stock_em_jgdy_detail interface
0.7.7: fix: fix stock_info interface
0.7.8: fix: fix bond_cov_jsl interface
0.7.9: fix: fix stock_em_jgdy_detail interface
0.7.10: fix: fix match_main_contract interface
0.7.11: fix: fix stock_em_analyst_rank and stock_em_analyst_detail interface
0.7.12: add: add stock_zh_a_cdr_daily interface
0.7.13: fix: fix stock_zh_a_cdr_daily and stock_zh_a_daily interface
0.7.14: fix: fix get_receipt interface
0.7.15: add: add futures_contract_detail interface
0.7.16: fix: fix futures_zh_spot interface
0.7.17: del: del zdzk interface
0.7.18: fix: fix stock_zh_a_daily interface
0.7.19: fix: fix stock_zh_a_daily interface
0.7.20: fix: fix stock_em_jgdy_tj interface
0.7.21: fix: fix zh_stock_kcb_report interface
0.7.22: fix: fix zh_stock_kcb_report interface
0.7.23: fix: fix fund_em_open_fund_info interface
0.7.24: fix: fix futures_spot_price_daily interface
0.7.25: add: add option_current_em interface
0.7.26: fix: fix option_current_em interface
0.7.27: add: add js_news interface
0.7.28: fix: fix js_news interface
0.7.29: fix: fix macro_china_market_margin_sh interface
0.7.30: add: add nlp_answer interface
0.7.31: fix: fix index_sw interface
0.7.32: add: add index_cni interface
0.7.33: add: add more index_cni interface
0.7.34: add: add stock_dzjy_sctj interface
0.7.35: add: add stock_dzjy_mrmx interface
0.7.36: add: add stock_dzjy_mrtj interface
0.7.37: add: add stock_dzjy_hygtj interface
0.7.38: add: add stock_dzjy_hyyybtj interface
0.7.39: add: add stock_dzjy_yybph interface
0.7.40: fix: fix js_news interface
0.7.41: add: add stock_em_yzxdr interface
0.7.42: fix: fix fund_em_etf_fund_daily interface
0.7.43: fix: fix match_main_contract interface
0.7.44: fix: fix stock_hk_daily interface
0.7.45: fix: fix stock_em_yzxdr interface
0.7.46: fix: fix option_czce_hist interface
0.7.47: fix: fix bond_zh_cov interface
0.7.48: fix: fix futures_dce_position_rank interface
0.7.49: fix: fix stock_us_zh_spot interface
0.7.50: fix: fix stock_em_hsgt_stock_statistics interface
0.7.51: fix: fix stock_us_daily interface
0.7.52: fix: fix stock_sector_fund_flow_rank interface
0.7.53: fix: fix stock_em_yzxdr interface
0.7.54: add: add stock_a_code_to_symbol interface
0.7.55: add: add stock_news_em interface
0.7.56: fix: fix stock_news_em interface
0.7.57: fix: fix xlrd support
0.7.58: fix: fix stock_zh_a_tick_tx_js support
0.7.59: fix: fix read_excel support
0.7.60: fix: fix fund_em_open_fund_daily interface
0.7.61: fix: fix calendar.json interface
0.7.62: fix: fix QQ group interface
0.7.63: add: add bond_summary_sse interface
0.7.64: fix: fix macro_cons_gold_volume interface
0.7.65: fix: fix fund_em_value_estimation interface
0.7.66: fix: fix fund_em_value_estimation interface
0.7.67: fix: fix get_dce_daily interface
0.7.68: fix: fix stock_zh_index_spot interface
0.7.69: fix: fix covid_19 interface
0.7.70: fix: fix get_dce_rank_table interface
0.7.71: fix: fix stock_us_daily interface
0.7.72: fix: fix get_ine_daily interface
0.7.73: add: add macro_china_money_supply interface
0.7.74: fix: fix stock_zh_a_minute interface
0.7.75: add: add bond_cash_summary_sse interface
0.7.76: fix: fix get_rank_sum_daily interface
0.7.77: fix: fix get_inventory_data interface
0.7.78: fix: fix futures_inventory_99 interface
0.7.79: fix: fix stock_a_below_net_asset_statistics interface
0.7.80: add: add bank_rank_banker interface
0.7.81: add: add macro_china_stock_market_cap interface
0.7.82: fix: fix macro_china_stock_market_cap interface
0.7.83: fix: fix stock_news_em interface
0.7.84: fix: fix covid_19_dxy interface
0.7.85: add: add futures_spot_price_previous interface
0.7.86: add: add fund_em_hk_rank interface
0.7.87: add: add fund_em_lcx_rank interface
0.7.88: fix: fix bond_repo_zh_tick interface
0.7.89: fix: fix stock_hk_daily interface
0.7.90: fix: fix stock_em_gpzy_pledge_ratio interface
0.7.91: fix: fix stock_report_disclosure interface
0.7.92: add: add fund_em_hk_fund_hist interface
0.7.93: add: add fund_portfolio_hold_em interface
0.7.94: fix: fix futures_spot_price_previous interface
0.7.95: add: add covid_19_trace interface
0.7.96: fix: fix bond_spot_quote interface
0.7.97: fix: fix bond_spot_deal interface
0.7.98: fix: fix stock_report_fund_hold interface
0.7.99: fix: fix stock_zh_a_daily interface
0.8.1: add: add stock_report_fund_hold_detail interface
0.8.2: fix: fix option_finance_board interface
0.8.3: fix: fix stock_zh_a_daily interface
0.8.4: fix: fix option interface
0.8.5: fix: fix bond_investing_global interface
0.8.6: add: add macro_china_shrzgm interface
0.8.7: add: add stock_zh_a_tick_163_now interface
0.8.8: fix: fix add PK to CZCE
0.8.9: add: add futures delivery and spot interface
0.8.10: fix: fix fund_portfolio_hold_em interface
0.8.11: add: add futures_to_spot_dce interface
0.8.12: add: add futures_delivery_shfe interface
0.8.13: fix: fix stock_us_daily interface
0.8.14: fix: fix fund_em_open_fund_rank interface
0.8.15: fix: fix chinese_to_english interface
0.8.16: fix: fix stock_a_pe interface
0.8.17: add: add stock_financial_report_sina interface
0.8.18: fix: fix futures_spot_price_daily interface
0.8.19: add: add stock_margin_sse interface
0.8.20: add: add stock_margin_detail_sse interface
0.8.21: fix: fix stock_szse_summary interface
0.8.22: fix: fix stock_zh_a_daily interface
0.8.23: fix: fix covid_19_dxy interface
0.8.24: fix: fix fund_em_value_estimation interface
0.8.25: fix: fix stock_zh_index_daily_tx interface
0.8.26: fix: fix stock_hk_daily interface
0.8.27: fix: fix get_dce_rank_table interface
0.8.28: fix: fix stock_em_analyst_rank interface
0.8.29: add: add fund_rating interface
0.8.30: add: add fund_manager interface
0.8.31: fix: fix stock_zh_a_minute interface
0.8.32: fix: fix get_dce_rank_table interface
0.8.33: add: add stock_profit_forecast interface
0.8.34: fix: fix index_investing_global interface
0.8.35: add: add bond_zh_us_rate interface
0.8.36: add: add stock_em_fhps interface
0.8.37: add: add stock_em_yjkb interface
0.8.38: fix: fix get_czce_daily interface
0.8.39: add: add stock_board_concept_cons_ths interface
0.8.40: fix: fix stock_board_concept_cons_ths interface
0.8.41: fix: fix energy_carbon_bj interface
0.8.42: fix: fix stock_zh_a_daily interface
0.8.43: fix: fix stock_em_yjyg interface
0.8.44: fix: fix stock_em_comment interface
0.8.45: add: add stock_sse_deal_daily interface
0.8.46: fix: fix stock_board_concept_cons_ths interface
0.8.47: add: add stock_board_concept_info_ths interface
0.8.48: fix: fix fund_rating_sh fund_rating_zs fund_rating_ja interface
0.8.49: add: add stock_em_yjbb interface
0.8.50: fix: fix stock_zh_index_spot interface
0.8.51: fix: fix stock_zh_a_spot interface
0.8.52: add: add stock_em_zcfz, stock_em_lrb, stock_em_xjll interface
0.8.53: fix: fix stock_em_zcfz interface
0.8.54: fix: fix stock_register_kcb interface
0.8.55: add: add stock_ipo_declare interface
0.8.56: fix: fix index_bloomberg_billionaires interface
0.8.57: fix: fix hurun_rank interface
0.8.58: add: add hurun_rank interface
0.8.59: fix: fix get_sector_futures interface
0.8.60: fix: fix currency_hist interface
0.8.61: fix: fix stock_em_hsgt_hold_stock interface
0.8.62: fix: fix stock_zh_a_tick_163 interface
0.8.63: fix: fix futures_zh_daily_sina interface
0.8.64: fix: fix futures_inventory_em interface
0.8.65: fix: fix futures_hq_spot_df interface
0.8.66: fix: fix currency_hist interface
0.8.67: fix: fix requirements.txt interface
0.8.68: fix: fix bond_investing_global interface
0.8.69: fix: fix stock_board_concept_cons_ths interface
0.8.70: add: add stock_board_concept_index_ths interface
0.8.71: fix: fix remove obor fold
0.8.72: fix: fix stock_board_concept_index_ths interface
0.8.73: add: add stock_board_industry_index_ths interface
0.8.74: fix: fix test interface
0.8.75: fix: fix stock_board_industry_index_ths interface
0.8.76: add: add stock_notice_report interface
0.8.77: fix: fix rate_interbank interface
0.8.78: fix: fix stock_board_concept_index_ths interface
0.8.79: add: add stock_lh_yyb_most, stock_lh_yyb_capital, stock_lh_yyb_control interface
0.8.80: fix: fix stock_em_yjkb interface
0.8.81: add: add crypto_bitcoin_hold_report interface
0.8.82: fix: fix energy_carbon_hb interface
0.8.83: fix: fix get_czce_daily interface
0.8.84: fix: fix amac_fund_abs interface
0.8.85: fix: fix rename amac_person_org_list to amac_person_fund_org_list interface
0.8.86: add: add amac_person_bond_org_list interface
0.8.87: add: add stock_fund_flow_concept interface
0.8.88: add: add stock_fund_flow_industry interface
0.8.89: add: add stock_fund_flow_individual interface
0.8.90: add: add stock_fund_flow_big_deal interface
0.8.91: add: add stock_em_ggcg interface
0.8.92: fix: fix stock_zh_a_daily interface
0.8.93: fix: fix bond_spot_deal interface
0.8.94: fix: fix stock_us_daily interface
0.8.95: add: add fund_em_new_found interface
0.8.96: fix: fix get_czce_rank_table interface
0.8.97: add: add stock_wc_hot_top interface
0.8.98: add: add index_kq interface
0.8.99: fix: fix stock_individual_fund_flow_rank interface
0.9.1: fix: fix stock_profit_forecast interface
0.9.2: fix: fix get_futures_daily interface
0.9.3: fix: fix get_futures_daily interface
0.9.4: fix: fix get_shfe_daily interface
0.9.5: add: add stock_wc_hot_rank interface
0.9.6: fix: fix stock_wc_hot_rank interface
0.9.7: fix: fix stock_wc_hot_rank interface
0.9.8: fix: fix forbes_rank interface
0.9.9: fix: fix stock_a_below_net_asset_statistics interface
0.9.10: fix: fix stock_wc_hot_rank interface
0.9.11: add: add drewry_wci_index interface
0.9.12: fix: fix bond_investing_global interface
0.9.13: fix: fix currency_hist interface
0.9.14: fix: fix futures_global_commodity_hist interface
0.9.15: add: add index_kq_fashion interface
0.9.16: add: add index_eri interface
0.9.17: fix: fix futures_global_commodity_hist interface
0.9.18: fix: fix stock_em_dxsyl interface
0.9.19: add: add stock_market_activity_legu interface
0.9.20: fix: fix stock_individual_fund_flow_rank interface
0.9.21: add: add index_cflp_price interface
0.9.22: add: add index_cflp_volume interface
0.9.23: fix: fix index_cflp_volume interface
0.9.24: fix: fix stock_info_sz_name_code interface
0.9.25: add: add car_gasgoo_sale_rank interface
0.9.26: fix: fix stock_hk_daily interface
0.9.27: fix: fix stock_report_fund_hold interface
0.9.28: add: add stock_average_position_legu interface
0.9.29: add: add stock_em_qbzf interface
0.9.30: add: add stock_em_pg interface
0.9.31: fix: fix index_investing_global interface
0.9.32: fix: fix bond_investing_global interface
0.9.33: add: add marco_china_hk interface
0.9.34: fix: fix get_futures_daily interface
0.9.35: fix: fix stock_zh_a_daily interface
0.9.36: fix: fix stock_zh_a_daily hfq and qfq interface
0.9.37: fix: fix stock_wc_hot_rank interface
0.9.38: add: add stock_em_zt_pool interface
0.9.39: fix: fix stock_us_daily interface
0.9.40: fix: fix bond_cov_comparison interface
0.9.41: fix: fix stock_em_zt_pool_previous interface
0.9.42: add: add stock_em_zt_pool_strong interface
0.9.43: fix: fix stock_em_zt_pool_strong interface
0.9.44: fix: fix stock_em_zt_pool_sub_new interface
0.9.45: fix: fix stock_em_zt_pool interface
0.9.46: fix: fix spot_goods interface
0.9.47: fix: fix futures_comex_inventory interface
0.9.48: fix: fix stock_em_zcfz interface
0.9.49: fix: fix stock_hk_daily interface
0.9.50: fix: fix futures_spot_stock interface
0.9.51: fix: fix stock_hk_daily interface
0.9.52: fix: remove internal_flow_history interface
0.9.53: add: add stock_zh_a_alerts_cls interface
0.9.54: fix: fix bond_zh_us_rate interface
0.9.55: fix: fix index_vix interface
0.9.56: fix: fix macro_fx_sentiment interface
0.9.57: fix: fix stock_zh_a_alerts_cls interface
0.9.58: add: add stock_staq_net_stop interface
0.9.59: fix: fix covid_19_baidu interface
0.9.60: fix: fix currency_convert interface
0.9.61: fix: fix stock_info_sz_name_code interface
0.9.62: add: add stock_zh_a_gdhs interface
0.9.63: fix: fix stock_zh_a_gdhs interface
0.9.64: add: add futures_sina_hold_pos interface
0.9.65: fix: fix bond_zh_us_rate interface
0.9.66: fix: fix set urllib3==1.25.11
0.9.67: fix: fix stock_em_hsgt_hold_stock interface
0.9.68: fix: fix stock_zh_a_tick_tx interface
0.9.69: add: add currency_boc_sina interface
0.9.70: add: add stock_zh_a_hist interface
0.9.71: fix: fix stock_zh_a_hist interface
0.9.72: fix: fix stock_zh_a_hist interface
0.9.73: fix: fix stock_zh_a_tick_tx_js interface
0.9.74: add: add stock_changes_em interface
0.9.75: add: add stock_hk_spot_em, stock_hk_hist interface
0.9.76: add: add stock_us_spot_em, stock_us_hist interface
0.9.77: fix: fix stock_us_hist interface
0.9.78: fix: fix rename python file name interface
0.9.79: add: add crypto_bitcoin_cme interface
0.9.80: fix: fix futures_display_main_sina interface
0.9.81: add: add crypto_crix interface
0.9.82: fix: fix crypto_crix interface
0.9.83: fix: fix crypto_crix interface
0.9.84: fix: fix rename futures_hq_spot to futures_foreign_commodity_realtime interface
0.9.85: fix: fix rate_interbank interface
0.9.86: add: add fund_em_aum interface
0.9.87: fix: fix death_company interface
0.9.88: fix: fix stock_financial_analysis_indicator interface
0.9.89: fix: fix fund_manager interface
0.9.90: fix: fix stock_a_below_net_asset_statistics interface
0.9.91: fix: fix stock_em_yjbb interface
0.9.92: fix: fix stock_tfp_em interface
0.9.93: fix: fix stock_zh_a_gdhs interface
0.9.94: add: add macro_china_qyspjg, macro_china_fdi interface
0.9.95: fix: fix stock_board_concept_index_ths interface
0.9.96: fix: fix stock_info_sz_name_code interface
0.9.97: fix: fix urllib3 version at 1.25.8
0.9.98: fix: fix js_news interface
0.9.99: fix: fix news_cctv interface
1.0.1: add: add macro_usa_phs interface
1.0.2: fix: fix macro_usa_phs interface
1.0.3: add: add macro_germany interface
1.0.4: fix: fix macro_china interface
1.0.5: add: add macro_china_gyzjz interface
1.0.6: fix: fix get_receipt interface
1.0.7: fix: fix get_ine_daily interface
1.0.8: fix: fix macro_china_cpi interface
1.0.9: fix: fix stock_zh_a_gdhs interface
1.0.10: fix: fix stock_zh_a_spot_em interface
1.0.11: fix: fix stock_board_industry_name_ths interface
1.0.12: fix: fix macro_china_money_supply interface
1.0.13: fix: fix rename stock_board_concept_index_ths to stock_board_concept_hist_ths interface
1.0.14: add: add stock_board_concept_cons_em and stock_board_concept_hist_em interface
1.0.15: fix: fix stock_hk_hist interface
1.0.16: fix: fix tool_trade_date_hist_sina interface
1.0.17: fix: fix calendar.json interface
1.0.18: fix: fix reformat macro_china_national_tax_receipts, macro_china_hgjck, macro_china_stock_market_cap interface
1.0.19: fix: fix marco_china_hk interface
1.0.20: fix: fix bond_zh_hs_cov_daily interface
1.0.21: fix: fix charity_china interface
1.0.22: fix: fix stock_em_xgsglb interface
1.0.23: fix: fix stock_em_dxsyl interface
1.0.24: fix: fix stock_board_concept_hist_em interface
1.0.25: fix: fix get_receipt interface
1.0.26: add: add energy_carbon_domestic interface
1.0.27: fix: fix get_roll_yield_bar interface
1.0.28: add: add covid_19_baidu interface
1.0.29: fix: fix covid_19_baidu interface
1.0.30: fix: fix option_czce_hist interface
1.0.31: fix: fix futures_foreign_commodity_realtime interface
1.0.32: fix: fix covid_19_baidu interface
1.0.33: fix: fix bond_china_close_return interface
1.0.34: fix: fix bond_china_close_return interface
1.0.35: fix: fix bond_cov_jsl interface
1.0.36: fix: fix stock_em_hsgt_north_net_flow_in interface
1.0.37: add: add macro_swiss interface
1.0.38: add: add macro_japan interface
1.0.39: add: add macro_uk interface
1.0.40: add: add stock_szse_margin interface
1.0.41: add: add macro_australia interface
1.0.42: fix: fix index_stock_hist interface
1.0.43: fix: fix stock_margin_detail_szse interface
1.0.44: fix: fix stock_margin_detail_szse interface
1.0.45: fix: fix option_dce_daily interface and rename interface in option_commodity
1.0.46: add: add futures_pig_info interface
1.0.47: fix: fix futures_pig_info interface
1.0.48: add: add macro_canada interface
1.0.49: fix: fix stock_individual_fund_flow interface
1.0.50: fix: fix stock_em_jgdy_tj interface
1.0.51: add: add sport_olympic_hist interface
1.0.52: add: add stock_financial_hk interface
1.0.53: fix: fix tool_trade_date_hist_sina interface
1.0.54: fix: fix macro_china_gdp_yearly interface
1.0.55: fix: fix macro_china_freight_index interface
1.0.56: add: add stock_a_ttm_lyr interface
1.0.57: add: add stock_a_all_pb interface
1.0.58: add: add futures_pig_rank interface
1.0.59: fix: fix futures_zh_daily_sina interface
1.0.60: fix: fix futures_main_sina interface
1.0.61: fix: fix stock_a_all_pb interface
1.0.62: add: add futures_egg_price interface
1.0.63: fix: fix remove jyfm interface
1.0.64: fix: fix rename zh_stock_kcb_report to stock_zh_kcb_report_em interface
1.0.65: fix: fix stock_em_gpzy_pledge_ratio_detail interface
1.0.66: fix: fix macro_cons_opec_month interface
1.0.67: fix: fix futures_sgx_daily interface
1.0.68: fix: remove agoyal_stock_return interface
1.0.69: fix: remove bank_rank_banker interface
1.0.70: fix: remove watch_jinshi_quotes interface
1.0.71: fix: remove watch_argus interface
1.0.72: fix: fix amac_fund_abs interface
1.0.73: add: add bond_cash_summary_sse interface
1.0.74: fix: fix bond_zh_hs_cov_spot interface
1.0.75: fix: fix bond_futures_deliverable_coupons interface
1.0.76: fix: fix stock_financial_hk_analysis_indicator_em interface
1.0.77: fix: fix macro_china_m2_yearly interface
1.0.78: add: add reits_realtime_em, reits_info_jsl interface
1.0.79: fix: fix news_cctv interface
1.0.80: add: add stock_zh_a_hist_min_em, stock_zh_a_hist_pre_min_em interface
1.0.81: add: add stock_us_hist_min_em, stock_hk_hist_min_em interface
1.0.82: fix: fix bond_zh_cov interface
1.0.83: fix: fix macro_china interface
1.0.84: add: add bond_zh_cov_info interface
1.0.85: fix: fix stock_report_fund_hold interface
1.0.86: fix: fix stock_em_zt_pool_dtgc interface
1.0.87: fix: fix macro_china_swap_rate interface
1.0.88: fix: fix stock_zh_a_hist_min_em interface
1.0.89: fix: fix stock_hk_hist_min_em interface
1.0.90: fix: fix stock_us_hist_min_em interface
1.0.91: fix: fix stock_zh_a_hist_min_em interface
1.0.92: fix: fix stock_zh_a_hist interface
1.0.93: fix: fix stock_hk_hist_min_em interface
1.0.94: fix: fix stock_zh_a_new interface
1.0.95: fix: fix stock_zh_a_daily interface
1.0.96: add: add stock_zh_a_st_em interface
1.0.97: fix: fix futures_spot_stock interface
1.0.98: add: add stock_zh_a_new_em interface
1.0.99: fix: fix stock_wc_hot_rank interface
1.1.1: add: add index_investing_global_from_url interface
1.1.2: add: add stock_us_pink_spot_em interface
1.1.3: add: add stock_us_famous_spot_em interface
1.1.4: fix: fix stock_average_position_legu interface
1.1.5: add: add stock_rank_forecast_cninfo interface
1.1.6: fix: fix futures_zh_minute_sina interface
1.1.7: fix: fix covid_19_trace interface
1.1.8: add: add stock_industry_pe_ratio_cninfo interface
1.1.9: fix: fix stock_js_price interface
1.1.10: fix: fix stock_em_hsgt_hold_stock interface
1.1.11: fix: fix stock_fund_flow_concept interface
1.1.12: fix: fix stock_fund_flow_industry interface
1.1.13: add: add stock_dividents_cninfo interface
1.1.14: fix: fix stock_fund_flow_concept interface
1.1.15: add: add stock_new_gh_cninfo interface
1.1.16: fix: fix stock_em_jgdy_detail interface
1.1.17: fix: fix stock_em_jgdy_tj interface
1.1.18: fix: fix stock_fund_flow_concept and stock_fund_flow_industry interface
1.1.19: add: add stock_new_ipo_cninfo interface
1.1.20: fix: fix stock_a_pe interface
1.1.21 fix: fix setuptools==57.5.0 package
1.1.22 fix: fix remove demjson package
1.1.23 fix: fix update urllib3 package
1.1.24 fix: fix email address
1.1.25 add: add stock_hold_num_cninfo interface
1.1.26 fix: fix stock_fund_flow_concept interface
1.1.27 add: add stock_hold_control_cninfo interface
1.1.28 fix: fix move project to AKFamily interface
1.1.29 fix: fix urllib3>=1.25.8 package
1.1.30 fix: fix stock_zh_index_hist_csindex interface
1.1.31 add: add stock_hold_management_detail_cninfo interface
1.1.32 add: add sw_index_representation_spot interface
1.1.33 fix: fix sw_index_xxx interface
1.1.34 fix: fix drewry_wci_index interface
1.1.35 fix: fix fund_etf_category_sina interface
1.1.36 fix: fix sw_index_daily_indicator interface
1.1.37 fix: fix drewry_wci_index interface
1.1.38 add: add futures_comm_info interface
1.1.39 fix: fix futures_comm_info interface
1.1.40 fix: fix remove covid_19_history interface
1.1.41 add: add stock_zh_b_sina interface
1.1.42 fix: fix stock_zh_a_minute interface
1.1.43 add: add stock_cg_guarantee_cninfo interface
1.1.44 fix: fix stock_zh_index_daily interface
1.1.45 fix: fix stock_zh_index_daily_tx interface
1.1.46 fix: fix remove watch_jinshi_fx interface
1.1.47 fix: fix stock_em_jgdy_tj and stock_em_jgdy_detail interface
1.1.48 fix: fix rename fund_em_portfolio_hold to fund_portfolio_hold_em interface
1.1.49 fix: fix stock_em_jgdy_tj and stock_em_jgdy_detail interface
1.1.50 add: add stock_cg_lawsuit_cninfo interface
1.1.51 fix: fix stock_wc_hot_rank interface
1.1.52 add: add stock_cg_equity_mortgage_cninfo interface
1.1.53 fix: fix index_cni_detail_hist_adjust interface
1.1.54 fix: fix stock_board_concept_hist_ths interface
1.1.55 fix: fix stock_sina_lhb_ggtj and stock_sina_lhb_jgzz interface
1.1.56 add: add fund_em_aum_hist interface
1.1.57 fix: fix stock_sina_lhb_ggtj and stock_sina_lhb_jgzz interface
1.1.58 add: add bond_treasure_issue_cninfo interface
1.1.59 add: add bond_local_government_issue_cninfo interface
1.1.60 add: add bond_corporate_issue_cninfo interface
1.1.61 add: add bond_cov_issue_cninfo interface
1.1.62 fix: fix bond_zh_us_rate interface
1.1.63 add: add bond_cov_stock_issue_cninfo interface
1.1.64 add: add fund_report_stock_cninfo interface
1.1.65 fix: fix stock_notice_report interface
1.1.66 add: add fund_report_industry_allocation_cninfo interface
1.1.67 fix: fix stock_zh_index_hist_csindex interface
1.1.68 fix: fix index_stock_cons_csindex interface
1.1.69 add: add fund_scale_open_sina interface
1.1.70 add: add fund_scale_close_sina interface
1.1.71 add: add fund_scale_structured_sina interface
1.1.72 add: add fund_report_asset_allocation_cninfo interface
1.1.73 add: add stock_zh_index_value_csindex interface
1.1.74 fix: fix fund_em_etf_fund_info interface
1.1.75 add: add index_value_hist_funddb interface
1.1.76 fix: fix amac_fund_info interface
1.1.77 fix: fix stock_zh_a_tick_163_now interface
1.1.78 add: add stock_hsgt_individual_em interface
1.1.79 fix: fix stock_em_jgdy_tj interface
1.1.80 add: add support for Python 3.10 interface
1.1.81 add: add stock_hsgt_individual_detail_em interface
1.1.82 fix: fix stock_tfp_em interface
1. rename stock_em_tfp to stock_tfp_em
2. reformat output data type
1.1.83 add: add stock_ipo_benefit_ths interface
1.1.84 fix: fix stock_board_industry_index_ths interface
1. add start_date and end_date parameters
1.1.85 fix: fix stock_em_hsgt_stock_statistics interface
1.1.86 fix: fix stock_em_hsgt_stock_statistics interface
1.1.87 fix: fix stock_em_hsgt_hist interface
1.1.88 fix: fix stock_sector_spot interface
1.1.89 fix: fix stock_sector_detail interface
1.1.90 fix: fix stock_board_concept_name_ths interface
1.1.91 fix: fix stock_hsgt_individual_detail_em interface
1.1.92 add: add stock_rank_cxg_ths interface
1.1.93 add: add stock_rank_cxd_ths interface
1.1.94 fix: fix fund_portfolio_hold_em interface
1.1.95 fix: fix stock_board_concept_hist_ths interface
1.1.96 add: add bond_zh_hs_cov_min interface
1.1.97 add: add stock_rank_lxsz_ths interface
1.1.98 add: add stock_rank_lxxd_ths interface
1.1.99 add: add stock_rank_cxfl_ths interface
1.2.1 add: add stock_rank_cxsl_ths interface
1.2.2 fix: fix zh_subscribe_exchange_symbol interface
1.2.3 add: add stock_rank_xstp_ths interface
1.2.4 fix: fix fund_portfolio_hold_em interface
1.2.5 fix: fix index_stock_hist interface
1.2.6 add: add stock_rank_xxtp_ths interface
1.2.7 add: add stock_rank_ljqd_ths interface
1.2.8 add: add stock_rank_ljqs_ths interface
1.2.9 fix: fix stock_zh_a_gdhs interface
1.2.10 fix: fix bond_zh_hs_daily interface
1.2.11 add: add stock_zh_a_gdhs_detail_em interface
1.2.12 fix: fix stock_zh_a_gdhs interface
1.2.13 add: add stock_rank_xzjp_ths interface
1.2.14 add: add sw_index_second_spot interface
1.2.15 fix: fix stock_board_industry_name_ths interface
1.2.16 add: add stock_board_cons_ths interface
1.2.17 fix: fix amac_fund_info interface
1.2.18 fix: fix amac interface
1.2.19 fix: fix amac cons.py interface
1.2.20 fix: fix stock_zh_a_spot_em interface
1.2.21 fix: fix stock_zh_a_hist interface
1.2.22 fix: fix amac_fund_info interface
1.2.23 add: add video_tv interface
1.2.24 fix: fix car_gasgoo_sale_rank interface
1.2.25 fix: fix amac_manager_classify_info interface
1.2.26 fix: fix amac interface
1.2.27 add: add online_value_artist interface
1.2.28 add: add club_rank_game interface
1.2.29 add: add player_rank_game interface
1.2.30 add: add business_value_artist interface
1.2.31 fix: fix stock_em_zt_pool interface
1.2.32 add: add video_variety_show interface
1.2.33 add: add fund_fh_em interface
"""
__version__ = "1.2.33"
__author__ = "Albert King"
import sys
if sys.version_info < (3, 7):
print(f"AKShare {__version__} requires Python 3.7+ and 64 bit OS")
sys.exit(1)
del sys
"""
天天基金网-基金数据-分红送配
"""
from akshare.fund.fund_fhsp_em import fund_cf_em, fund_fh_rank_em, fund_fh_em
"""
中国电竞价值排行榜
"""
from akshare.other.other_game import club_rank_game, player_rank_game
"""
艺恩-艺人
"""
from akshare.movie.artist_yien import online_value_artist, business_value_artist
"""
艺恩-视频放映
"""
from akshare.movie.video_yien import video_variety_show, video_tv
"""
同花顺-数据中心-技术选股
"""
from akshare.stock_feature.stock_technology_ths import (
stock_rank_cxg_ths,
stock_rank_cxd_ths,
stock_rank_lxsz_ths,
stock_rank_lxxd_ths,
stock_rank_cxfl_ths,
stock_rank_cxsl_ths,
stock_rank_xstp_ths,
stock_rank_xxtp_ths,
stock_rank_ljqd_ths,
stock_rank_ljqs_ths,
stock_rank_xzjp_ths,
)
"""
沪深港通持股
"""
from akshare.stock_feature.stock_em_hsgt import (
stock_hsgt_individual_em,
stock_hsgt_individual_detail_em,
)
"""
指数估值
"""
from akshare.index.zh_stock_index_csindex import (
index_value_hist_funddb,
index_value_name_funddb,
)
"""
基金规模
"""
from akshare.fund.fund_scale_sina import (
fund_scale_open_sina,
fund_scale_close_sina,
fund_scale_structured_sina,
)
"""
巨潮资讯-数据中心-专题统计-基金报表
"""
from akshare.fund.fund_report_cninfo import (
fund_report_stock_cninfo,
fund_report_industry_allocation_cninfo,
fund_report_asset_allocation_cninfo,
)
"""
巨潮资讯-数据中心-专题统计-债券报表-债券发行
"""
from akshare.bond.bond_issue_cninfo import (
bond_treasure_issue_cninfo,
bond_local_government_issue_cninfo,
bond_corporate_issue_cninfo,
bond_cov_issue_cninfo,
bond_cov_stock_issue_cninfo,
)
"""
巨潮资讯-数据中心-专题统计-公司治理-股权质押
"""
from akshare.stock.stock_cg_equity_mortgage import stock_cg_equity_mortgage_cninfo
"""
巨潮资讯-数据中心-专题统计-公司治理-公司诉讼
"""
from akshare.stock.stock_cg_lawsuit import stock_cg_lawsuit_cninfo
"""
巨潮资讯-数据中心-专题统计-公司治理-对外担保
"""
from akshare.stock.stock_cg_guarantee import stock_cg_guarantee_cninfo
"""
B 股
"""
from akshare.stock.stock_zh_b_sina import (
stock_zh_b_spot,
stock_zh_b_daily,
stock_zh_b_minute,
)
"""
期货手续费
"""
from akshare.futures.futures_comm_qihuo import futures_comm_info
"""
实际控制人持股变动
"""
from akshare.stock.stock_hold_control_cninfo import (
stock_hold_control_cninfo,
stock_hold_management_detail_cninfo,
)
"""
股东人数及持股集中度
"""
from akshare.stock.stock_hold_num_cninfo import stock_hold_num_cninfo
"""
新股过会
"""
from akshare.stock.stock_new_cninfo import stock_new_gh_cninfo, stock_new_ipo_cninfo
"""
个股分红
"""
from akshare.stock.stock_dividents_cninfo import stock_dividents_cninfo
"""
行业市盈率
"""
from akshare.stock.stock_industry_pe_cninfo import stock_industry_pe_ratio_cninfo
"""
投资评级
"""
from akshare.stock.stock_rank_forecast import stock_rank_forecast_cninfo
"""
美股-知名美股
"""
from akshare.stock.stock_us_famous import stock_us_famous_spot_em
"""
美股-粉单市场
"""
from akshare.stock.stock_us_pink import stock_us_pink_spot_em
"""
REITs
"""
from akshare.reits.reits_basic import reits_info_jsl, reits_realtime_em
"""
鸡蛋价格数据
"""
from akshare.futures_derivative.futures_egg import (
futures_egg_price_yearly,
futures_egg_price_area,
futures_egg_price,
)
"""
全部 A 股-等权重市盈率、中位数市盈率
全部 A 股-等权重、中位数市净率
"""
from akshare.stock_feature.stock_ttm_lyr import stock_a_ttm_lyr
from akshare.stock_feature.stock_all_pb import stock_a_all_pb
"""
奥运奖牌
"""
from akshare.sport.sport_olympic import sport_olympic_hist
"""
宏观-加拿大
"""
from akshare.economic.macro_canada import (
macro_canada_cpi_monthly,
macro_canada_core_cpi_monthly,
macro_canada_bank_rate,
macro_canada_core_cpi_yearly,
macro_canada_cpi_yearly,
macro_canada_gdp_monthly,
macro_canada_new_house_rate,
macro_canada_retail_rate_monthly,
macro_canada_trade,
macro_canada_unemployment_rate,
)
"""
猪肉价格信息
"""
from akshare.futures_derivative.futures_pig import futures_pig_info, futures_pig_rank
"""
宏观-澳大利亚
"""
from akshare.economic.macro_australia import (
macro_australia_bank_rate,
macro_australia_unemployment_rate,
macro_australia_trade,
macro_australia_cpi_quarterly,
macro_australia_cpi_yearly,
macro_australia_ppi_quarterly,
macro_australia_retail_rate_monthly,
)
"""
融资融券-深圳
"""
from akshare.stock_feature.stock_szse_margin import (
stock_margin_underlying_info_szse,
stock_margin_detail_szse,
stock_margin_szse,
)
"""
英国-宏观
"""
from akshare.economic.macro_uk import (
macro_uk_gdp_yearly,
macro_uk_gdp_quarterly,
macro_uk_retail_yearly,
macro_uk_rightmove_monthly,
macro_uk_rightmove_yearly,
macro_uk_unemployment_rate,
macro_uk_halifax_monthly,
macro_uk_bank_rate,
macro_uk_core_cpi_monthly,
macro_uk_core_cpi_yearly,
macro_uk_cpi_monthly,
macro_uk_cpi_yearly,
macro_uk_halifax_yearly,
macro_uk_retail_monthly,
macro_uk_trade,
)
"""
日本-宏观
"""
from akshare.economic.macro_japan import (
macro_japan_bank_rate,
macro_japan_core_cpi_yearly,
macro_japan_cpi_yearly,
macro_japan_head_indicator,
macro_japan_unemployment_rate,
)
"""
瑞士-宏观
"""
from akshare.economic.macro_swiss import (
macro_swiss_trade,
macro_swiss_svme,
macro_swiss_cpi_yearly,
macro_swiss_gbd_yearly,
macro_swiss_gbd_bank_rate,
macro_swiss_gdp_quarterly,
)
"""
东方财富-概念板块
"""
from akshare.stock.stock_board_concept_em import (
stock_board_concept_cons_em,
stock_board_concept_hist_em,
stock_board_concept_name_em,
)
"""
德国-经济指标
"""
from akshare.economic.macro_germany import (
macro_germany_gdp,
macro_germany_ifo,
macro_germany_cpi_monthly,
macro_germany_retail_sale_monthly,
macro_germany_trade_adjusted,
macro_germany_retail_sale_yearly,
macro_germany_cpi_yearly,
macro_germany_zew,
)
"""
基金规模和规模趋势
"""
from akshare.fund.fund_em_aum import fund_em_aum, fund_em_aum_trend, fund_em_aum_hist
"""
CRIX 数据
"""
from akshare.crypto.crypto_crix import crypto_crix
"""
CME 比特币成交量
"""
from akshare.crypto.crypto_bitcoin_cme import crypto_bitcoin_cme
"""
盘口异动
"""
from akshare.stock_feature.stock_pankou import stock_changes_em
"""
A 股东方财富
"""
from akshare.stock_feature.stock_em_hist import (
stock_zh_a_spot_em,
stock_zh_a_hist,
stock_hk_spot_em,
stock_hk_hist,
stock_us_spot_em,
stock_us_hist,
stock_zh_a_hist_min_em,
stock_zh_a_hist_pre_min_em,
stock_hk_hist_min_em,
stock_us_hist_min_em,
stock_zh_b_spot_em,
)
"""
中行人民币牌价历史数据查询
"""
from akshare.currency.currency_sina_china_bank import currency_boc_sina
"""
期货持仓
"""
from akshare.futures_derivative.futures_sina_cot import futures_sina_hold_pos
"""
股东户数
"""
from akshare.stock_feature.stock_gdhs import stock_zh_a_gdhs, stock_zh_a_gdhs_detail_em
"""
两网及退市
"""
from akshare.stock.stock_stop import stock_staq_net_stop
"""
每日快讯数据
"""
from akshare.stock_feature.stock_cls_alerts import stock_zh_a_alerts_cls
"""
涨停板行情
"""
from akshare.stock_feature.stock_em_ztb import (
stock_em_zt_pool,
stock_em_zt_pool_previous,
stock_em_zt_pool_dtgc,
stock_em_zt_pool_zbgc,
stock_em_zt_pool_strong,
stock_em_zt_pool_sub_new,
)
"""
中国-香港-宏观
"""
from akshare.economic.macro_china_hk import (
marco_china_hk_cpi,
marco_china_hk_cpi_ratio,
marco_china_hk_trade_diff_ratio,
marco_china_hk_gbp_ratio,
marco_china_hk_building_amount,
marco_china_hk_building_volume,
marco_china_hk_gbp,
marco_china_hk_ppi,
marco_china_hk_rate_of_unemployment,
)
"""
增发和配股
"""
from akshare.stock_feature.stock_zf_pg import stock_em_qbzf, stock_em_pg
"""
平均持仓
"""
from akshare.stock_feature.stock_legu_average_position import (
stock_average_position_legu,
)
"""
汽车销量
"""
from akshare.other.other_car import car_gasgoo_sale_rank, car_cpca_energy_sale
"""
中国公路物流运价、运量指数
"""
from akshare.index.index_cflp import index_cflp_price, index_cflp_volume
"""
赚钱效应分析
"""
from akshare.stock_feature.stock_legu_market import stock_market_activity_legu
"""
浙江省排污权交易指数
"""
from akshare.index.index_eri import index_eri
"""
Drewry 集装箱指数
"""
from akshare.index.drewry_index import drewry_wci_index
"""
柯桥指数
"""
from akshare.index.index_kq_fz import index_kq_fz
from akshare.index.index_kq_ss import index_kq_fashion
"""
问财-热门股票
"""
from akshare.stock_feature.stock_wencai import stock_wc_hot_rank
"""
新发基金
"""
from akshare.fund.fund_em_init import fund_em_new_found
"""
高管持股
"""
from akshare.stock_feature.stock_em_gdzjc import stock_em_ggcg
"""
同花顺-数据中心-资金流向-概念资金流
"""
from akshare.stock_feature.stock_fund_flow import (
stock_fund_flow_concept,
stock_fund_flow_industry,
stock_fund_flow_big_deal,
stock_fund_flow_individual,
)
"""
比特币持仓
"""
from akshare.crypto.crypto_hold import crypto_bitcoin_hold_report
"""
证券交易营业部排行
"""
from akshare.stock_feature.stock_lh_yybpm import (
stock_lh_yyb_capital,
stock_lh_yyb_most,
stock_lh_yyb_control,
)
"""
沪深 A 股公告
"""
from akshare.stock_fundamental.stock_notice import stock_notice_report
"""
首发企业申报
"""
from akshare.stock_fundamental.stock_ipo_declare import stock_ipo_declare
"""
三大报表
"""
from akshare.stock_feature.stock_em_report import (
stock_em_zcfz,
stock_em_lrb,
stock_em_xjll,
)
"""
业绩报告
"""
from akshare.stock_feature.stock_em_yjbb import stock_em_yjbb
"""
同花顺-行业板块
"""
from akshare.stock_feature.stock_board_industry_ths import (
stock_board_industry_cons_ths,
stock_board_industry_name_ths,
stock_board_industry_info_ths,
stock_board_industry_index_ths,
stock_ipo_benefit_ths,
)
"""
同花顺-概念板块
"""
from akshare.stock_feature.stock_board_concept_ths import (
stock_board_concept_cons_ths,
stock_board_concept_name_ths,
stock_board_concept_info_ths,
stock_board_concept_hist_ths,
stock_board_cons_ths,
)
"""
分红配送
"""
from akshare.stock_feature.stock_em_fhps import stock_em_fhps
"""
中美国债收益率
"""
from akshare.bond.bond_em import bond_zh_us_rate
"""
盈利预测
"""
from akshare.stock_fundamental.stock_profit_forecast import stock_profit_forecast
"""
基金经理
"""
from akshare.fund.fund_manager import fund_manager
"""
基金评级
"""
from akshare.fund.fund_rating import (
fund_rating_sh,
fund_rating_zs,
fund_rating_ja,
fund_rating_all,
)
"""
融资融券数据
"""
from akshare.stock_feature.stock_sse_margin import (
stock_margin_detail_sse,
stock_margin_sse,
)
"""
期货交割和期转现
"""
from akshare.futures.futures_to_spot import (
futures_to_spot_czce,
futures_to_spot_shfe,
futures_to_spot_dce,
futures_delivery_dce,
futures_delivery_shfe,
futures_delivery_czce,
futures_delivery_match_dce,
futures_delivery_match_czce,
)
"""
基金持仓
"""
from akshare.fund.fund_em_portfolio import fund_portfolio_hold_em
"""
债券概览
"""
from akshare.bond.bond_summary import bond_deal_summary_sse, bond_cash_summary_sse
"""
新闻-个股新闻
"""
from akshare.news.news_stock import stock_news_em
"""
股票数据-一致行动人
"""
from akshare.stock_feature.stock_em_yzxdr import stock_em_yzxdr
"""
大宗交易
"""
from akshare.stock.stock_dzjy import (
stock_dzjy_sctj,
stock_dzjy_mrmx,
stock_dzjy_mrtj,
stock_dzjy_hygtj,
stock_dzjy_yybph,
stock_dzjy_hyyybtj,
)
"""
国证指数
"""
from akshare.index.index_cni import (
index_cni_hist,
index_cni_all,
index_cni_detail,
index_cni_detail_hist,
index_cni_detail_hist_adjust,
)
"""
金十数据-新闻资讯
"""
from akshare.ws.js_ws_news import js_news
"""
东方财富-期权
"""
from akshare.option.option_em import option_current_em
"""
科创板报告
"""
from akshare.stock.stock_zh_kcb_report import stock_zh_kcb_report_em
"""
期货合约详情
"""
from akshare.futures.futures_contract_detail import futures_contract_detail
"""
胡润排行榜
"""
from akshare.fortune.fortune_hurun import hurun_rank
"""
新财富富豪榜
"""
from akshare.fortune.fortune_xincaifu_500 import xincaifu_rank
"""
福布斯中国榜单
"""
from akshare.fortune.fortune_forbes_500 import forbes_rank
"""
回购定盘利率
"""
from akshare.rate.repo_rate import repo_rate_hist
"""
公募基金排行
"""
from akshare.fund.fund_em_rank import (
fund_em_exchange_rank,
fund_em_money_rank,
fund_em_open_fund_rank,
fund_em_hk_rank,
fund_em_lcx_rank,
)
"""
英为财情-加密货币
"""
from akshare.crypto.crypto_hist_investing import crypto_hist, crypto_name_map
"""
电影票房
"""
from akshare.movie.movie_yien import (
movie_boxoffice_cinema_daily,
movie_boxoffice_cinema_weekly,
movie_boxoffice_weekly,
movie_boxoffice_daily,
movie_boxoffice_monthly,
movie_boxoffice_realtime,
movie_boxoffice_yearly,
movie_boxoffice_yearly_first_week,
)
"""
新闻联播文字稿
"""
from akshare.news.news_cctv import news_cctv
"""
债券收盘收益率曲线历史数据
"""
from akshare.bond.bond_china_money import (
bond_china_close_return,
bond_china_close_return_map,
)
"""
COMEX黄金-白银库存
"""
from akshare.futures.futures_comex import futures_comex_inventory
"""
国债期货可交割券相关指标
"""
from akshare.bond.bond_futures import bond_futures_deliverable_coupons
"""
A 股-特别标的
"""
from akshare.stock.stock_zh_a_special import (
stock_zh_a_new,
stock_zh_a_st_em,
stock_zh_a_new_em,
stock_zh_a_stop_em,
)
"""
东方财富-注册制审核
"""
from akshare.stock_fundamental.stock_register import (
stock_register_kcb,
stock_register_cyb,
stock_register_db,
)
"""
新浪财经-龙虎榜
"""
from akshare.stock_feature.stock_sina_lhb import (
stock_sina_lhb_detail_daily,
stock_sina_lhb_ggtj,
stock_sina_lhb_jgmx,
stock_sina_lhb_jgzz,
stock_sina_lhb_yytj,
)
"""
中证指数
"""
from akshare.index.zh_stock_index_csindex import (
stock_zh_index_hist_csindex,
stock_zh_index_value_csindex,
)
"""
股票基金持仓数据
"""
from akshare.stock.stock_fund_hold import (
stock_report_fund_hold,
stock_report_fund_hold_detail,
)
"""
期货分钟数据
"""
from akshare.futures.futures_zh_sina import (
futures_zh_minute_sina,
futures_zh_daily_sina,
)
"""
股票财务报告预约披露
"""
from akshare.stock_feature.stock_cninfo_yjyg import stock_report_disclosure
"""
基金行情
"""
from akshare.fund.fund_etf import fund_etf_hist_sina, fund_etf_category_sina
"""
交易日历
"""
from akshare.tool.trade_date_hist import tool_trade_date_hist_sina
"""
commodity option
"""
from akshare.option.option_commodity_sina import (
option_sina_commodity_contract_list,
option_sina_commodity_dict,
option_sina_commodity_hist,
)
"""
A 股PE和PB
"""
from akshare.stock_feature.stock_a_pb import stock_a_pb
from akshare.stock_feature.stock_a_pe import stock_a_pe
from akshare.stock_feature.stock_a_indicator import (
stock_a_lg_indicator,
stock_hk_eniu_indicator,
)
from akshare.stock_feature.stock_a_high_low import stock_a_high_low_statistics
from akshare.stock_feature.stock_a_below_net_asset_statistics import (
stock_a_below_net_asset_statistics,
)
"""
彭博亿万富豪指数
"""
from akshare.fortune.fortune_bloomberg import index_bloomberg_billionaires
"""
stock-券商业绩月报
"""
from akshare.stock_feature.stock_em_qsjy import stock_em_qsjy
"""
futures-warehouse-receipt
"""
from akshare.futures.futures_warehouse_receipt import (
futures_czce_warehouse_receipt,
futures_dce_warehouse_receipt,
futures_shfe_warehouse_receipt,
)
"""
stock-js
"""
from akshare.stock.stock_js_us import stock_js_price
"""
stock-summary
"""
from akshare.stock.stock_summary import (
stock_sse_summary,
stock_szse_summary,
stock_sse_deal_daily,
)
"""
股票-机构推荐池
"""
from akshare.stock_fundamental.stock_recommend import (
stock_institute_recommend,
stock_institute_recommend_detail,
)
"""
股票-机构持股
"""
from akshare.stock_fundamental.stock_hold import (
stock_institute_hold_detail,
stock_institute_hold,
)
"""
stock-info
"""
from akshare.stock.stock_info import (
stock_info_sh_delist,
stock_info_sz_delist,
stock_info_a_code_name,
stock_info_sh_name_code,
stock_info_sz_name_code,
stock_info_sz_change_name,
stock_info_change_name,
)
"""
stock-sector
"""
from akshare.stock.stock_industry import stock_sector_spot, stock_sector_detail
"""
stock-fundamental
"""
from akshare.stock_fundamental.stock_finance import (
stock_financial_abstract,
stock_financial_report_sina,
stock_financial_analysis_indicator,
stock_add_stock,
stock_ipo_info,
stock_history_dividend_detail,
stock_history_dividend,
stock_circulate_stock_holder,
stock_restricted_shares,
stock_fund_stock_holder,
stock_main_stock_holder,
)
"""
stock-HK-fundamental
"""
from akshare.stock_fundamental.stock_finance_hk import (
stock_financial_hk_analysis_indicator_em,
stock_financial_hk_report_em,
)
"""
stock_fund
"""
from akshare.stock.stock_fund import (
stock_individual_fund_flow,
stock_market_fund_flow,
stock_sector_fund_flow_rank,
stock_individual_fund_flow_rank,
)
"""
air-quality
"""
from akshare.air.air_zhenqi import (
air_quality_hist,
air_quality_rank,
air_quality_watch_point,
air_city_list,
)
"""
hf
"""
from akshare.hf.hf_sp500 import hf_sp_500
"""
stock_em_yjyg
"""
from akshare.stock_feature.stock_em_yjyg import (
stock_em_yjyg,
stock_em_yysj,
stock_em_yjkb,
)
"""
stock
"""
from akshare.stock_feature.stock_em_dxsyl import stock_em_dxsyl, stock_em_xgsglb
"""
article
"""
from akshare.article.fred_md import fred_md, fred_qd
"""
covid_19 CSSE
"""
from akshare.event.covid import (
covid_19_csse_daily,
covid_19_csse_global_confirmed,
covid_19_csse_global_death,
covid_19_csse_global_recovered,
covid_19_csse_us_death,
covid_19_csse_us_confirmed,
)
"""
futures_cfmmc
"""
from akshare.futures.futures_cfmmc import futures_index_cscidx_map, futures_index_cscidx
"""
futures_em_spot_stock
"""
from akshare.futures.futures_em_spot_stock import futures_spot_stock
"""
energy_oil
"""
from akshare.energy.energy_oil import energy_oil_detail, energy_oil_hist
"""
index-vix
"""
from akshare.economic.macro_other import index_vix
"""
futures-foreign
"""
from akshare.futures.futures_foreign import futures_foreign_detail, futures_foreign_hist
"""
stock-em-tfp
"""
from akshare.stock_feature.stock_em_tfp import stock_tfp_em
"""
stock-em-hsgt
"""
from akshare.stock_feature.stock_em_hsgt import (
stock_em_hsgt_north_acc_flow_in,
stock_em_hsgt_north_cash,
stock_em_hsgt_north_net_flow_in,
stock_em_hsgt_south_acc_flow_in,
stock_em_hsgt_south_cash,
stock_em_hsgt_south_net_flow_in,
stock_em_hsgt_hold_stock,
stock_em_hsgt_hist,
stock_em_hsgt_institution_statistics,
stock_em_hsgt_stock_statistics,
stock_em_hsgt_board_rank,
)
"""
stock-em-comment
"""
from akshare.stock_feature.stock_em_comment import stock_em_comment
"""
stock-em-analyst
"""
from akshare.stock_feature.stock_em_analyst import (
stock_em_analyst_detail,
stock_em_analyst_rank,
)
"""
tool-github
"""
from akshare.tool.tool_github import tool_github_star_list, tool_github_email_address
"""
sgx futures data
"""
from akshare.futures.futures_sgx_daily import futures_sgx_daily
"""
currency interface
"""
from akshare.currency.currency import (
currency_convert,
currency_currencies,
currency_history,
currency_latest,
currency_time_series,
)
"""
知识图谱
"""
from akshare.nlp.nlp_interface import nlp_ownthink, nlp_answer
"""
微博舆情报告
"""
from akshare.stock.stock_weibo_nlp import stock_js_weibo_nlp_time, stock_js_weibo_report
"""
金融期权-新浪
"""
from akshare.option.option_finance_sina import (
option_sina_cffex_hs300_list,
option_sina_cffex_hs300_spot,
option_sina_cffex_hs300_daily,
option_sina_sse_list,
option_sina_sse_expire_day,
option_sina_sse_codes,
option_sina_sse_spot_price,
option_sina_sse_underlying_spot_price,
option_sina_sse_greeks,
option_sina_sse_minute,
option_sina_sse_daily,
option_sina_finance_minute,
)
"""
中国-慈善
"""
from akshare.charity.charity_china import (
charity_china_organization,
charity_china_plan,
charity_china_platform,
charity_china_progress,
charity_china_report,
charity_china_trust,
)
"""
中国-特许经营数据
"""
from akshare.event.franchise import franchise_china
"""
债券-沪深债券
"""
from akshare.bond.bond_zh_sina import bond_zh_hs_daily, bond_zh_hs_spot
from akshare.bond.bond_zh_cov_sina import (
bond_zh_hs_cov_daily,
bond_zh_hs_cov_spot,
bond_cov_comparison,
bond_zh_cov,
bond_zh_cov_info,
bond_zh_hs_cov_min,
)
from akshare.bond.bond_convert import bond_cov_jsl
"""
for pro api
"""
from akshare.pro.data_pro import pro_api
"""
for pro api token set
"""
from akshare.utils.token_process import set_token
"""
债券质押式回购成交明细数据
"""
from akshare.bond.china_repo import bond_repo_zh_tick
"""
新型肺炎
"""
from akshare.event.covid import (
covid_19_trip,
covid_19_trace,
)
"""
基金数据接口
"""
from akshare.fund.fund_em import (
fund_em_open_fund_daily,
fund_em_open_fund_info,
fund_em_etf_fund_daily,
fund_em_etf_fund_info,
fund_em_financial_fund_daily,
fund_em_financial_fund_info,
fund_em_fund_name,
fund_em_graded_fund_daily,
fund_em_graded_fund_info,
fund_em_money_fund_daily,
fund_em_money_fund_info,
fund_em_value_estimation,
fund_em_hk_fund_hist,
)
"""
百度迁徙地图接口
"""
from akshare.event.covid import (
migration_area_baidu,
migration_scale_baidu,
)
"""
新增-事件接口新型冠状病毒接口
"""
from akshare.event.covid import (
covid_19_163,
covid_19_dxy,
covid_19_baidu,
covid_19_hist_city,
covid_19_hist_province,
)
"""
英为财情-外汇-货币对历史数据
"""
from akshare.fx.currency_investing import (
currency_hist,
currency_name_code,
currency_pair_map,
)
"""
商品期权-郑州商品交易所-期权-历史数据
"""
from akshare.option.option_czce import option_czce_hist
"""
宏观-经济数据-银行间拆借利率
"""
from akshare.interest_rate.interbank_rate_em import rate_interbank
"""
东方财富网-经济数据-银行间拆借利率
"""
from akshare.interest_rate.interbank_rate_em import rate_interbank
"""
金十数据中心-外汇情绪
"""
from akshare.economic.macro_other import macro_fx_sentiment
"""
金十数据中心-经济指标-欧元区
"""
from akshare.economic.macro_euro import (
macro_euro_gdp_yoy,
macro_euro_cpi_mom,
macro_euro_cpi_yoy,
macro_euro_current_account_mom,
macro_euro_employment_change_qoq,
macro_euro_industrial_production_mom,
macro_euro_manufacturing_pmi,
macro_euro_ppi_mom,
macro_euro_retail_sales_mom,
macro_euro_sentix_investor_confidence,
macro_euro_services_pmi,
macro_euro_trade_balance,
macro_euro_unemployment_rate_mom,
macro_euro_zew_economic_sentiment,
macro_euro_lme_holding,
macro_euro_lme_stock,
)
"""
金十数据中心-经济指标-央行利率-主要央行利率
"""
from akshare.economic.macro_bank import (
macro_bank_australia_interest_rate,
macro_bank_brazil_interest_rate,
macro_bank_china_interest_rate,
macro_bank_brazil_interest_rate,
macro_bank_english_interest_rate,
macro_bank_euro_interest_rate,
macro_bank_india_interest_rate,
macro_bank_japan_interest_rate,
macro_bank_newzealand_interest_rate,
macro_bank_russia_interest_rate,
macro_bank_switzerland_interest_rate,
macro_bank_usa_interest_rate,
)
"""
义乌小商品指数
"""
from akshare.index.index_yw import index_yw
"""
股票指数-股票指数-成份股
"""
from akshare.index.index_cons import (
index_stock_info,
index_stock_cons,
index_stock_hist,
index_stock_cons_sina,
index_stock_cons_csindex,
stock_a_code_to_symbol,
)
"""
东方财富-股票账户
"""
from akshare.stock_feature.stock_em_account import stock_em_account
"""
期货规则
"""
from akshare.futures.futures_rule import futures_rule
"""
东方财富-商誉专题
"""
from akshare.stock_feature.stock_em_sy import (
stock_em_sy_profile,
stock_em_sy_yq_list,
stock_em_sy_jz_list,
stock_em_sy_list,
stock_em_sy_hy_list,
)
"""
东方财富-股票质押
"""
from akshare.stock_feature.stock_em_gpzy import (
stock_em_gpzy_pledge_ratio,
stock_em_gpzy_profile,
stock_em_gpzy_distribute_statistics_bank,
stock_em_gpzy_distribute_statistics_company,
stock_em_gpzy_industry_data,
stock_em_gpzy_pledge_ratio_detail,
)
"""
东方财富-机构调研
"""
from akshare.stock_feature.stock_em_jgdy import stock_em_jgdy_tj, stock_em_jgdy_detail
"""
IT桔子
"""
from akshare.fortune.fortune_it_juzi import (
death_company,
maxima_company,
nicorn_company,
)
"""
新浪主力连续接口
"""
from akshare.futures_derivative.sina_futures_index import (
futures_main_sina,
futures_display_main_sina,
)
"""
中国宏观杠杆率数据
"""
from akshare.economic.marco_cnbs import macro_cnbs
"""
大宗商品-现货价格指数
"""
from akshare.index.index_spot import spot_goods
"""
成本-世界各大城市生活成本
"""
from akshare.cost.cost_living import cost_living
"""
能源-碳排放权
"""
from akshare.energy.energy_carbon import (
energy_carbon_domestic,
energy_carbon_bj,
energy_carbon_eu,
energy_carbon_gz,
energy_carbon_hb,
energy_carbon_sz,
)
"""
中国证券投资基金业协会-信息公示
"""
from akshare.fund.fund_amac import (
amac_manager_info,
amac_member_info,
amac_member_sub_info,
amac_aoin_info,
amac_fund_account_info,
amac_fund_info,
amac_fund_sub_info,
amac_futures_info,
amac_manager_cancelled_info,
amac_securities_info,
amac_fund_abs,
amac_manager_classify_info,
amac_person_fund_org_list,
amac_person_bond_org_list,
)
"""
世界五百强公司排名接口
"""
from akshare.fortune.fortune_500 import fortune_rank, fortune_rank_eng
"""
申万行业一级
"""
from akshare.index.index_sw import (
sw_index_representation_spot,
sw_index_spot,
sw_index_second_spot,
sw_index_cons,
sw_index_daily,
sw_index_daily_indicator,
)
"""
谷歌指数
"""
from akshare.index.index_google import google_index
"""
百度指数
"""
from akshare.index.index_baidu import (
baidu_search_index,
baidu_info_index,
baidu_media_index,
)
"""
微博指数
"""
from akshare.index.index_weibo import weibo_index
"""
经济政策不确定性指数
"""
from akshare.article.epu_index import article_epu_index
"""
南华期货-南华指数
"""
from akshare.futures_derivative.nh_index_return import (
nh_return_index,
get_nh_list_table,
)
from akshare.futures_derivative.nh_index_price import nh_price_index
from akshare.futures_derivative.nh_index_volatility import nh_volatility_index
"""
空气-河北
"""
from akshare.air.air_hebei import air_quality_hebei
"""
timeanddate-日出和日落
"""
from akshare.air.time_and_date import sunrise_daily, sunrise_monthly
"""
新浪-指数实时行情和历史行情
"""
from akshare.stock.stock_zh_a_tick_tx_163 import (
stock_zh_a_tick_tx,
stock_zh_a_tick_tx_js,
stock_zh_a_tick_163,
stock_zh_a_tick_163_now,
)
"""
新浪-指数实时行情和历史行情
"""
from akshare.index.zh_stock_index_sina import (
stock_zh_index_daily,
stock_zh_index_spot,
stock_zh_index_daily_tx,
stock_zh_index_daily_em,
)
"""
外盘期货实时行情
"""
from akshare.futures.futures_hq_sina import (
futures_foreign_commodity_realtime,
futures_foreign_commodity_subscribe_exchange_symbol,
)
"""
FF多因子数据接口
"""
from akshare.article.ff_factor import article_ff_crr
"""
Realized Library 接口
"""
from akshare.article.risk_rv import (
article_oman_rv,
article_oman_rv_short,
article_rlab_rv,
)
"""
银保监分局本级行政处罚数据
"""
from akshare.bank.bank_cbirc_2020 import bank_fjcf_table_detail
"""
科创板股票
"""
from akshare.stock.stock_zh_kcb_sina import stock_zh_kcb_spot, stock_zh_kcb_daily
"""
A股
"""
from akshare.stock.stock_zh_a_sina import (
stock_zh_a_spot,
stock_zh_a_daily,
stock_zh_a_minute,
stock_zh_a_cdr_daily,
)
"""
A+H股
"""
from akshare.stock.stock_zh_ah_tx import (
stock_zh_ah_spot,
stock_zh_ah_daily,
stock_zh_ah_name,
)
"""
加密货币
"""
from akshare.economic.macro_other import crypto_js_spot
"""
金融期权
"""
from akshare.option.option_finance import (
option_finance_board,
option_finance_underlying,
)
"""
新浪-美股实时行情数据和历史行情数据(前复权)
"""
from akshare.stock.stock_us_sina import (
stock_us_daily,
stock_us_spot,
get_us_stock_name,
stock_us_fundamental,
)
"""
新浪-港股实时行情数据和历史数据(前复权和后复权因子)
"""
from akshare.stock.stock_hk_sina import stock_hk_daily, stock_hk_spot
"""
新浪-期货实时数据
"""
from akshare.futures.futures_zh_sina import futures_zh_spot, match_main_contract
"""
西本新干线-指数数据
"""
from akshare.futures_derivative.futures_xgx import _get_code_pic, futures_xgx_index
"""
生意社-商品与期货-现期图数据
"""
from akshare.futures_derivative.sys_spot_futures import (
get_sys_spot_futures,
get_sys_spot_futures_dict,
)
"""
和讯财经-行情及历史数据
"""
from akshare.stock.stock_us_zh_hx import stock_us_zh_spot, stock_us_zh_daily
"""
和讯财经-企业社会责任
"""
from akshare.stock.stock_zh_zrbg_hx import stock_zh_a_scr_report
"""
全球宏观-机构宏观
"""
from akshare.economic.macro_constitute import (
macro_cons_gold_amount,
macro_cons_gold_change,
macro_cons_gold_volume,
macro_cons_opec_month,
macro_cons_silver_amount,
macro_cons_silver_change,
macro_cons_silver_volume,
)
"""
全球宏观-美国宏观
"""
from akshare.economic.macro_usa import (
macro_usa_eia_crude_rate,
macro_usa_non_farm,
macro_usa_unemployment_rate,
macro_usa_adp_employment,
macro_usa_core_pce_price,
macro_usa_cpi_monthly,
macro_usa_crude_inner,
macro_usa_gdp_monthly,
macro_usa_initial_jobless,
macro_usa_lmci,
macro_usa_api_crude_stock,
macro_usa_building_permits,
macro_usa_business_inventories,
macro_usa_cb_consumer_confidence,
macro_usa_core_cpi_monthly,
macro_usa_core_ppi,
macro_usa_current_account,
macro_usa_durable_goods_orders,
macro_usa_trade_balance,
macro_usa_spcs20,
macro_usa_services_pmi,
macro_usa_rig_count,
macro_usa_retail_sales,
macro_usa_real_consumer_spending,
macro_usa_ppi,
macro_usa_pmi,
macro_usa_personal_spending,
macro_usa_pending_home_sales,
macro_usa_nfib_small_business,
macro_usa_new_home_sales,
macro_usa_nahb_house_market_index,
macro_usa_michigan_consumer_sentiment,
macro_usa_exist_home_sales,
macro_usa_export_price,
macro_usa_factory_orders,
macro_usa_house_price_index,
macro_usa_house_starts,
macro_usa_import_price,
macro_usa_industrial_production,
macro_usa_ism_non_pmi,
macro_usa_ism_pmi,
macro_usa_job_cuts,
macro_usa_cftc_nc_holding,
macro_usa_cftc_c_holding,
macro_usa_cftc_merchant_currency_holding,
macro_usa_cftc_merchant_goods_holding,
macro_usa_phs,
)
"""
全球宏观-中国宏观
"""
from akshare.economic.macro_china import (
macro_china_cpi_monthly,
macro_china_cpi_yearly,
macro_china_m2_yearly,
macro_china_fx_reserves_yearly,
macro_china_cx_pmi_yearly,
macro_china_pmi_yearly,
macro_china_daily_energy,
macro_china_non_man_pmi,
macro_china_rmb,
macro_china_gdp_yearly,
macro_china_shrzgm,
macro_china_ppi_yearly,
macro_china_cx_services_pmi_yearly,
macro_china_market_margin_sh,
macro_china_market_margin_sz,
macro_china_au_report,
macro_china_ctci_detail,
macro_china_ctci_detail_hist,
macro_china_ctci,
macro_china_exports_yoy,
macro_china_hk_market_info,
macro_china_imports_yoy,
macro_china_trade_balance,
macro_china_shibor_all,
macro_china_industrial_production_yoy,
macro_china_gyzjz,
macro_china_lpr,
macro_china_new_house_price,
macro_china_enterprise_boom_index,
macro_china_national_tax_receipts,
macro_china_new_financial_credit,
macro_china_fx_gold,
macro_china_money_supply,
macro_china_stock_market_cap,
macro_china_cpi,
macro_china_gdp,
macro_china_ppi,
macro_china_pmi,
macro_china_gdzctz,
macro_china_hgjck,
macro_china_czsr,
macro_china_whxd,
macro_china_wbck,
macro_china_bond_public,
macro_china_gksccz,
macro_china_hb,
macro_china_xfzxx,
macro_china_reserve_requirement_ratio,
macro_china_consumer_goods_retail,
macro_china_society_electricity,
macro_china_society_traffic_volume,
macro_china_postal_telecommunicational,
macro_china_international_tourism_fx,
macro_china_passenger_load_factor,
macro_china_freight_index,
macro_china_central_bank_balance,
macro_china_insurance,
macro_china_supply_of_money,
macro_china_swap_rate,
macro_china_foreign_exchange_gold,
macro_china_retail_price_index,
macro_china_real_estate,
macro_china_qyspjg,
macro_china_fdi,
)
"""
全球期货
"""
from akshare.futures.futures_international import (
futures_global_commodity_hist,
futures_global_commodity_name_url_map,
)
"""
外汇
"""
from akshare.fx.fx_quote import fx_pair_quote, fx_spot_quote, fx_swap_quote
"""
债券行情
"""
from akshare.bond.china_bond import bond_spot_quote, bond_spot_deal, bond_china_yield
"""
商品期权
"""
from akshare.option.option_commodity import (
option_dce_daily,
option_czce_daily,
option_shfe_daily,
)
"""
英为财情-债券
"""
from akshare.bond.bond_investing import (
bond_investing_global,
bond_investing_global_country_name_url,
)
"""
英为财情-指数
"""
from akshare.index.index_investing import (
index_investing_global,
index_investing_global_country_name_url,
index_investing_global_from_url,
)
"""
99期货-期货库存数据
"""
from akshare.futures.futures_inventory import futures_inventory_99
"""
东方财富-期货库存数据
"""
from akshare.futures.futures_inventory_em import futures_inventory_em
"""
中国银行间市场交易商协会
"""
from akshare.bond.bond_bank import get_bond_bank
"""
奇货可查-工具模块
"""
from akshare.qhkc_web.qhkc_tool import qhkc_tool_foreign, qhkc_tool_gdp
"""
奇货可查-指数模块
"""
from akshare.qhkc_web.qhkc_index import (
get_qhkc_index,
get_qhkc_index_trend,
get_qhkc_index_profit_loss,
)
"""
奇货可查-资金模块
"""
from akshare.qhkc_web.qhkc_fund import (
get_qhkc_fund_money_change,
get_qhkc_fund_bs,
get_qhkc_fund_position,
)
"""
大宗商品现货价格及基差
"""
from akshare.futures.futures_basis import (
futures_spot_price_daily,
futures_spot_price,
futures_spot_price_previous,
)
"""
期货持仓成交排名数据
"""
from akshare.futures.cot import (
get_rank_sum_daily,
get_rank_sum,
get_shfe_rank_table,
get_czce_rank_table,
get_dce_rank_table,
get_cffex_rank_table,
futures_dce_position_rank,
futures_dce_position_rank_other,
)
"""
大宗商品仓单数据
"""
from akshare.futures.receipt import get_receipt
"""
大宗商品展期收益率数据
"""
from akshare.futures.futures_roll_yield import get_roll_yield_bar, get_roll_yield
"""
交易所日线行情数据
"""
from akshare.futures.futures_daily_bar import (
get_cffex_daily,
get_czce_daily,
get_shfe_v_wap,
get_shfe_daily,
get_dce_daily,
get_futures_daily,
)
| 25.871698
| 118
| 0.79617
|
__version__ = "1.2.33"
__author__ = "Albert King"
import sys
if sys.version_info < (3, 7):
print(f"AKShare {__version__} requires Python 3.7+ and 64 bit OS")
sys.exit(1)
del sys
from akshare.fund.fund_fhsp_em import fund_cf_em, fund_fh_rank_em, fund_fh_em
from akshare.other.other_game import club_rank_game, player_rank_game
from akshare.movie.artist_yien import online_value_artist, business_value_artist
from akshare.movie.video_yien import video_variety_show, video_tv
from akshare.stock_feature.stock_technology_ths import (
stock_rank_cxg_ths,
stock_rank_cxd_ths,
stock_rank_lxsz_ths,
stock_rank_lxxd_ths,
stock_rank_cxfl_ths,
stock_rank_cxsl_ths,
stock_rank_xstp_ths,
stock_rank_xxtp_ths,
stock_rank_ljqd_ths,
stock_rank_ljqs_ths,
stock_rank_xzjp_ths,
)
from akshare.stock_feature.stock_em_hsgt import (
stock_hsgt_individual_em,
stock_hsgt_individual_detail_em,
)
from akshare.index.zh_stock_index_csindex import (
index_value_hist_funddb,
index_value_name_funddb,
)
from akshare.fund.fund_scale_sina import (
fund_scale_open_sina,
fund_scale_close_sina,
fund_scale_structured_sina,
)
from akshare.fund.fund_report_cninfo import (
fund_report_stock_cninfo,
fund_report_industry_allocation_cninfo,
fund_report_asset_allocation_cninfo,
)
from akshare.bond.bond_issue_cninfo import (
bond_treasure_issue_cninfo,
bond_local_government_issue_cninfo,
bond_corporate_issue_cninfo,
bond_cov_issue_cninfo,
bond_cov_stock_issue_cninfo,
)
from akshare.stock.stock_cg_equity_mortgage import stock_cg_equity_mortgage_cninfo
from akshare.stock.stock_cg_lawsuit import stock_cg_lawsuit_cninfo
from akshare.stock.stock_cg_guarantee import stock_cg_guarantee_cninfo
from akshare.stock.stock_zh_b_sina import (
stock_zh_b_spot,
stock_zh_b_daily,
stock_zh_b_minute,
)
from akshare.futures.futures_comm_qihuo import futures_comm_info
from akshare.stock.stock_hold_control_cninfo import (
stock_hold_control_cninfo,
stock_hold_management_detail_cninfo,
)
from akshare.stock.stock_hold_num_cninfo import stock_hold_num_cninfo
from akshare.stock.stock_new_cninfo import stock_new_gh_cninfo, stock_new_ipo_cninfo
from akshare.stock.stock_dividents_cninfo import stock_dividents_cninfo
from akshare.stock.stock_industry_pe_cninfo import stock_industry_pe_ratio_cninfo
from akshare.stock.stock_rank_forecast import stock_rank_forecast_cninfo
from akshare.stock.stock_us_famous import stock_us_famous_spot_em
from akshare.stock.stock_us_pink import stock_us_pink_spot_em
from akshare.reits.reits_basic import reits_info_jsl, reits_realtime_em
from akshare.futures_derivative.futures_egg import (
futures_egg_price_yearly,
futures_egg_price_area,
futures_egg_price,
)
from akshare.stock_feature.stock_ttm_lyr import stock_a_ttm_lyr
from akshare.stock_feature.stock_all_pb import stock_a_all_pb
from akshare.sport.sport_olympic import sport_olympic_hist
from akshare.economic.macro_canada import (
macro_canada_cpi_monthly,
macro_canada_core_cpi_monthly,
macro_canada_bank_rate,
macro_canada_core_cpi_yearly,
macro_canada_cpi_yearly,
macro_canada_gdp_monthly,
macro_canada_new_house_rate,
macro_canada_retail_rate_monthly,
macro_canada_trade,
macro_canada_unemployment_rate,
)
from akshare.futures_derivative.futures_pig import futures_pig_info, futures_pig_rank
from akshare.economic.macro_australia import (
macro_australia_bank_rate,
macro_australia_unemployment_rate,
macro_australia_trade,
macro_australia_cpi_quarterly,
macro_australia_cpi_yearly,
macro_australia_ppi_quarterly,
macro_australia_retail_rate_monthly,
)
from akshare.stock_feature.stock_szse_margin import (
stock_margin_underlying_info_szse,
stock_margin_detail_szse,
stock_margin_szse,
)
from akshare.economic.macro_uk import (
macro_uk_gdp_yearly,
macro_uk_gdp_quarterly,
macro_uk_retail_yearly,
macro_uk_rightmove_monthly,
macro_uk_rightmove_yearly,
macro_uk_unemployment_rate,
macro_uk_halifax_monthly,
macro_uk_bank_rate,
macro_uk_core_cpi_monthly,
macro_uk_core_cpi_yearly,
macro_uk_cpi_monthly,
macro_uk_cpi_yearly,
macro_uk_halifax_yearly,
macro_uk_retail_monthly,
macro_uk_trade,
)
from akshare.economic.macro_japan import (
macro_japan_bank_rate,
macro_japan_core_cpi_yearly,
macro_japan_cpi_yearly,
macro_japan_head_indicator,
macro_japan_unemployment_rate,
)
from akshare.economic.macro_swiss import (
macro_swiss_trade,
macro_swiss_svme,
macro_swiss_cpi_yearly,
macro_swiss_gbd_yearly,
macro_swiss_gbd_bank_rate,
macro_swiss_gdp_quarterly,
)
from akshare.stock.stock_board_concept_em import (
stock_board_concept_cons_em,
stock_board_concept_hist_em,
stock_board_concept_name_em,
)
from akshare.economic.macro_germany import (
macro_germany_gdp,
macro_germany_ifo,
macro_germany_cpi_monthly,
macro_germany_retail_sale_monthly,
macro_germany_trade_adjusted,
macro_germany_retail_sale_yearly,
macro_germany_cpi_yearly,
macro_germany_zew,
)
from akshare.fund.fund_em_aum import fund_em_aum, fund_em_aum_trend, fund_em_aum_hist
from akshare.crypto.crypto_crix import crypto_crix
from akshare.crypto.crypto_bitcoin_cme import crypto_bitcoin_cme
from akshare.stock_feature.stock_pankou import stock_changes_em
from akshare.stock_feature.stock_em_hist import (
stock_zh_a_spot_em,
stock_zh_a_hist,
stock_hk_spot_em,
stock_hk_hist,
stock_us_spot_em,
stock_us_hist,
stock_zh_a_hist_min_em,
stock_zh_a_hist_pre_min_em,
stock_hk_hist_min_em,
stock_us_hist_min_em,
stock_zh_b_spot_em,
)
from akshare.currency.currency_sina_china_bank import currency_boc_sina
from akshare.futures_derivative.futures_sina_cot import futures_sina_hold_pos
from akshare.stock_feature.stock_gdhs import stock_zh_a_gdhs, stock_zh_a_gdhs_detail_em
from akshare.stock.stock_stop import stock_staq_net_stop
from akshare.stock_feature.stock_cls_alerts import stock_zh_a_alerts_cls
from akshare.stock_feature.stock_em_ztb import (
stock_em_zt_pool,
stock_em_zt_pool_previous,
stock_em_zt_pool_dtgc,
stock_em_zt_pool_zbgc,
stock_em_zt_pool_strong,
stock_em_zt_pool_sub_new,
)
from akshare.economic.macro_china_hk import (
marco_china_hk_cpi,
marco_china_hk_cpi_ratio,
marco_china_hk_trade_diff_ratio,
marco_china_hk_gbp_ratio,
marco_china_hk_building_amount,
marco_china_hk_building_volume,
marco_china_hk_gbp,
marco_china_hk_ppi,
marco_china_hk_rate_of_unemployment,
)
from akshare.stock_feature.stock_zf_pg import stock_em_qbzf, stock_em_pg
from akshare.stock_feature.stock_legu_average_position import (
stock_average_position_legu,
)
from akshare.other.other_car import car_gasgoo_sale_rank, car_cpca_energy_sale
from akshare.index.index_cflp import index_cflp_price, index_cflp_volume
from akshare.stock_feature.stock_legu_market import stock_market_activity_legu
from akshare.index.index_eri import index_eri
from akshare.index.drewry_index import drewry_wci_index
from akshare.index.index_kq_fz import index_kq_fz
from akshare.index.index_kq_ss import index_kq_fashion
from akshare.stock_feature.stock_wencai import stock_wc_hot_rank
from akshare.fund.fund_em_init import fund_em_new_found
from akshare.stock_feature.stock_em_gdzjc import stock_em_ggcg
from akshare.stock_feature.stock_fund_flow import (
stock_fund_flow_concept,
stock_fund_flow_industry,
stock_fund_flow_big_deal,
stock_fund_flow_individual,
)
from akshare.crypto.crypto_hold import crypto_bitcoin_hold_report
from akshare.stock_feature.stock_lh_yybpm import (
stock_lh_yyb_capital,
stock_lh_yyb_most,
stock_lh_yyb_control,
)
from akshare.stock_fundamental.stock_notice import stock_notice_report
from akshare.stock_fundamental.stock_ipo_declare import stock_ipo_declare
from akshare.stock_feature.stock_em_report import (
stock_em_zcfz,
stock_em_lrb,
stock_em_xjll,
)
from akshare.stock_feature.stock_em_yjbb import stock_em_yjbb
from akshare.stock_feature.stock_board_industry_ths import (
stock_board_industry_cons_ths,
stock_board_industry_name_ths,
stock_board_industry_info_ths,
stock_board_industry_index_ths,
stock_ipo_benefit_ths,
)
from akshare.stock_feature.stock_board_concept_ths import (
stock_board_concept_cons_ths,
stock_board_concept_name_ths,
stock_board_concept_info_ths,
stock_board_concept_hist_ths,
stock_board_cons_ths,
)
from akshare.stock_feature.stock_em_fhps import stock_em_fhps
from akshare.bond.bond_em import bond_zh_us_rate
from akshare.stock_fundamental.stock_profit_forecast import stock_profit_forecast
from akshare.fund.fund_manager import fund_manager
from akshare.fund.fund_rating import (
fund_rating_sh,
fund_rating_zs,
fund_rating_ja,
fund_rating_all,
)
from akshare.stock_feature.stock_sse_margin import (
stock_margin_detail_sse,
stock_margin_sse,
)
from akshare.futures.futures_to_spot import (
futures_to_spot_czce,
futures_to_spot_shfe,
futures_to_spot_dce,
futures_delivery_dce,
futures_delivery_shfe,
futures_delivery_czce,
futures_delivery_match_dce,
futures_delivery_match_czce,
)
from akshare.fund.fund_em_portfolio import fund_portfolio_hold_em
from akshare.bond.bond_summary import bond_deal_summary_sse, bond_cash_summary_sse
from akshare.news.news_stock import stock_news_em
from akshare.stock_feature.stock_em_yzxdr import stock_em_yzxdr
from akshare.stock.stock_dzjy import (
stock_dzjy_sctj,
stock_dzjy_mrmx,
stock_dzjy_mrtj,
stock_dzjy_hygtj,
stock_dzjy_yybph,
stock_dzjy_hyyybtj,
)
from akshare.index.index_cni import (
index_cni_hist,
index_cni_all,
index_cni_detail,
index_cni_detail_hist,
index_cni_detail_hist_adjust,
)
from akshare.ws.js_ws_news import js_news
from akshare.option.option_em import option_current_em
from akshare.stock.stock_zh_kcb_report import stock_zh_kcb_report_em
from akshare.futures.futures_contract_detail import futures_contract_detail
from akshare.fortune.fortune_hurun import hurun_rank
from akshare.fortune.fortune_xincaifu_500 import xincaifu_rank
from akshare.fortune.fortune_forbes_500 import forbes_rank
from akshare.rate.repo_rate import repo_rate_hist
from akshare.fund.fund_em_rank import (
fund_em_exchange_rank,
fund_em_money_rank,
fund_em_open_fund_rank,
fund_em_hk_rank,
fund_em_lcx_rank,
)
from akshare.crypto.crypto_hist_investing import crypto_hist, crypto_name_map
from akshare.movie.movie_yien import (
movie_boxoffice_cinema_daily,
movie_boxoffice_cinema_weekly,
movie_boxoffice_weekly,
movie_boxoffice_daily,
movie_boxoffice_monthly,
movie_boxoffice_realtime,
movie_boxoffice_yearly,
movie_boxoffice_yearly_first_week,
)
from akshare.news.news_cctv import news_cctv
from akshare.bond.bond_china_money import (
bond_china_close_return,
bond_china_close_return_map,
)
from akshare.futures.futures_comex import futures_comex_inventory
from akshare.bond.bond_futures import bond_futures_deliverable_coupons
from akshare.stock.stock_zh_a_special import (
stock_zh_a_new,
stock_zh_a_st_em,
stock_zh_a_new_em,
stock_zh_a_stop_em,
)
from akshare.stock_fundamental.stock_register import (
stock_register_kcb,
stock_register_cyb,
stock_register_db,
)
from akshare.stock_feature.stock_sina_lhb import (
stock_sina_lhb_detail_daily,
stock_sina_lhb_ggtj,
stock_sina_lhb_jgmx,
stock_sina_lhb_jgzz,
stock_sina_lhb_yytj,
)
from akshare.index.zh_stock_index_csindex import (
stock_zh_index_hist_csindex,
stock_zh_index_value_csindex,
)
from akshare.stock.stock_fund_hold import (
stock_report_fund_hold,
stock_report_fund_hold_detail,
)
from akshare.futures.futures_zh_sina import (
futures_zh_minute_sina,
futures_zh_daily_sina,
)
from akshare.stock_feature.stock_cninfo_yjyg import stock_report_disclosure
from akshare.fund.fund_etf import fund_etf_hist_sina, fund_etf_category_sina
from akshare.tool.trade_date_hist import tool_trade_date_hist_sina
from akshare.option.option_commodity_sina import (
option_sina_commodity_contract_list,
option_sina_commodity_dict,
option_sina_commodity_hist,
)
from akshare.stock_feature.stock_a_pb import stock_a_pb
from akshare.stock_feature.stock_a_pe import stock_a_pe
from akshare.stock_feature.stock_a_indicator import (
stock_a_lg_indicator,
stock_hk_eniu_indicator,
)
from akshare.stock_feature.stock_a_high_low import stock_a_high_low_statistics
from akshare.stock_feature.stock_a_below_net_asset_statistics import (
stock_a_below_net_asset_statistics,
)
from akshare.fortune.fortune_bloomberg import index_bloomberg_billionaires
from akshare.stock_feature.stock_em_qsjy import stock_em_qsjy
from akshare.futures.futures_warehouse_receipt import (
futures_czce_warehouse_receipt,
futures_dce_warehouse_receipt,
futures_shfe_warehouse_receipt,
)
from akshare.stock.stock_js_us import stock_js_price
from akshare.stock.stock_summary import (
stock_sse_summary,
stock_szse_summary,
stock_sse_deal_daily,
)
from akshare.stock_fundamental.stock_recommend import (
stock_institute_recommend,
stock_institute_recommend_detail,
)
from akshare.stock_fundamental.stock_hold import (
stock_institute_hold_detail,
stock_institute_hold,
)
from akshare.stock.stock_info import (
stock_info_sh_delist,
stock_info_sz_delist,
stock_info_a_code_name,
stock_info_sh_name_code,
stock_info_sz_name_code,
stock_info_sz_change_name,
stock_info_change_name,
)
from akshare.stock.stock_industry import stock_sector_spot, stock_sector_detail
from akshare.stock_fundamental.stock_finance import (
stock_financial_abstract,
stock_financial_report_sina,
stock_financial_analysis_indicator,
stock_add_stock,
stock_ipo_info,
stock_history_dividend_detail,
stock_history_dividend,
stock_circulate_stock_holder,
stock_restricted_shares,
stock_fund_stock_holder,
stock_main_stock_holder,
)
from akshare.stock_fundamental.stock_finance_hk import (
stock_financial_hk_analysis_indicator_em,
stock_financial_hk_report_em,
)
from akshare.stock.stock_fund import (
stock_individual_fund_flow,
stock_market_fund_flow,
stock_sector_fund_flow_rank,
stock_individual_fund_flow_rank,
)
from akshare.air.air_zhenqi import (
air_quality_hist,
air_quality_rank,
air_quality_watch_point,
air_city_list,
)
from akshare.hf.hf_sp500 import hf_sp_500
from akshare.stock_feature.stock_em_yjyg import (
stock_em_yjyg,
stock_em_yysj,
stock_em_yjkb,
)
from akshare.stock_feature.stock_em_dxsyl import stock_em_dxsyl, stock_em_xgsglb
from akshare.article.fred_md import fred_md, fred_qd
from akshare.event.covid import (
covid_19_csse_daily,
covid_19_csse_global_confirmed,
covid_19_csse_global_death,
covid_19_csse_global_recovered,
covid_19_csse_us_death,
covid_19_csse_us_confirmed,
)
from akshare.futures.futures_cfmmc import futures_index_cscidx_map, futures_index_cscidx
from akshare.futures.futures_em_spot_stock import futures_spot_stock
from akshare.energy.energy_oil import energy_oil_detail, energy_oil_hist
from akshare.economic.macro_other import index_vix
from akshare.futures.futures_foreign import futures_foreign_detail, futures_foreign_hist
from akshare.stock_feature.stock_em_tfp import stock_tfp_em
from akshare.stock_feature.stock_em_hsgt import (
stock_em_hsgt_north_acc_flow_in,
stock_em_hsgt_north_cash,
stock_em_hsgt_north_net_flow_in,
stock_em_hsgt_south_acc_flow_in,
stock_em_hsgt_south_cash,
stock_em_hsgt_south_net_flow_in,
stock_em_hsgt_hold_stock,
stock_em_hsgt_hist,
stock_em_hsgt_institution_statistics,
stock_em_hsgt_stock_statistics,
stock_em_hsgt_board_rank,
)
from akshare.stock_feature.stock_em_comment import stock_em_comment
from akshare.stock_feature.stock_em_analyst import (
stock_em_analyst_detail,
stock_em_analyst_rank,
)
from akshare.tool.tool_github import tool_github_star_list, tool_github_email_address
from akshare.futures.futures_sgx_daily import futures_sgx_daily
from akshare.currency.currency import (
currency_convert,
currency_currencies,
currency_history,
currency_latest,
currency_time_series,
)
from akshare.nlp.nlp_interface import nlp_ownthink, nlp_answer
from akshare.stock.stock_weibo_nlp import stock_js_weibo_nlp_time, stock_js_weibo_report
from akshare.option.option_finance_sina import (
option_sina_cffex_hs300_list,
option_sina_cffex_hs300_spot,
option_sina_cffex_hs300_daily,
option_sina_sse_list,
option_sina_sse_expire_day,
option_sina_sse_codes,
option_sina_sse_spot_price,
option_sina_sse_underlying_spot_price,
option_sina_sse_greeks,
option_sina_sse_minute,
option_sina_sse_daily,
option_sina_finance_minute,
)
from akshare.charity.charity_china import (
charity_china_organization,
charity_china_plan,
charity_china_platform,
charity_china_progress,
charity_china_report,
charity_china_trust,
)
from akshare.event.franchise import franchise_china
from akshare.bond.bond_zh_sina import bond_zh_hs_daily, bond_zh_hs_spot
from akshare.bond.bond_zh_cov_sina import (
bond_zh_hs_cov_daily,
bond_zh_hs_cov_spot,
bond_cov_comparison,
bond_zh_cov,
bond_zh_cov_info,
bond_zh_hs_cov_min,
)
from akshare.bond.bond_convert import bond_cov_jsl
from akshare.pro.data_pro import pro_api
from akshare.utils.token_process import set_token
from akshare.bond.china_repo import bond_repo_zh_tick
from akshare.event.covid import (
covid_19_trip,
covid_19_trace,
)
from akshare.fund.fund_em import (
fund_em_open_fund_daily,
fund_em_open_fund_info,
fund_em_etf_fund_daily,
fund_em_etf_fund_info,
fund_em_financial_fund_daily,
fund_em_financial_fund_info,
fund_em_fund_name,
fund_em_graded_fund_daily,
fund_em_graded_fund_info,
fund_em_money_fund_daily,
fund_em_money_fund_info,
fund_em_value_estimation,
fund_em_hk_fund_hist,
)
from akshare.event.covid import (
migration_area_baidu,
migration_scale_baidu,
)
from akshare.event.covid import (
covid_19_163,
covid_19_dxy,
covid_19_baidu,
covid_19_hist_city,
covid_19_hist_province,
)
from akshare.fx.currency_investing import (
currency_hist,
currency_name_code,
currency_pair_map,
)
from akshare.option.option_czce import option_czce_hist
from akshare.interest_rate.interbank_rate_em import rate_interbank
from akshare.interest_rate.interbank_rate_em import rate_interbank
from akshare.economic.macro_other import macro_fx_sentiment
from akshare.economic.macro_euro import (
macro_euro_gdp_yoy,
macro_euro_cpi_mom,
macro_euro_cpi_yoy,
macro_euro_current_account_mom,
macro_euro_employment_change_qoq,
macro_euro_industrial_production_mom,
macro_euro_manufacturing_pmi,
macro_euro_ppi_mom,
macro_euro_retail_sales_mom,
macro_euro_sentix_investor_confidence,
macro_euro_services_pmi,
macro_euro_trade_balance,
macro_euro_unemployment_rate_mom,
macro_euro_zew_economic_sentiment,
macro_euro_lme_holding,
macro_euro_lme_stock,
)
from akshare.economic.macro_bank import (
macro_bank_australia_interest_rate,
macro_bank_brazil_interest_rate,
macro_bank_china_interest_rate,
macro_bank_brazil_interest_rate,
macro_bank_english_interest_rate,
macro_bank_euro_interest_rate,
macro_bank_india_interest_rate,
macro_bank_japan_interest_rate,
macro_bank_newzealand_interest_rate,
macro_bank_russia_interest_rate,
macro_bank_switzerland_interest_rate,
macro_bank_usa_interest_rate,
)
from akshare.index.index_yw import index_yw
from akshare.index.index_cons import (
index_stock_info,
index_stock_cons,
index_stock_hist,
index_stock_cons_sina,
index_stock_cons_csindex,
stock_a_code_to_symbol,
)
from akshare.stock_feature.stock_em_account import stock_em_account
from akshare.futures.futures_rule import futures_rule
from akshare.stock_feature.stock_em_sy import (
stock_em_sy_profile,
stock_em_sy_yq_list,
stock_em_sy_jz_list,
stock_em_sy_list,
stock_em_sy_hy_list,
)
from akshare.stock_feature.stock_em_gpzy import (
stock_em_gpzy_pledge_ratio,
stock_em_gpzy_profile,
stock_em_gpzy_distribute_statistics_bank,
stock_em_gpzy_distribute_statistics_company,
stock_em_gpzy_industry_data,
stock_em_gpzy_pledge_ratio_detail,
)
from akshare.stock_feature.stock_em_jgdy import stock_em_jgdy_tj, stock_em_jgdy_detail
from akshare.fortune.fortune_it_juzi import (
death_company,
maxima_company,
nicorn_company,
)
from akshare.futures_derivative.sina_futures_index import (
futures_main_sina,
futures_display_main_sina,
)
from akshare.economic.marco_cnbs import macro_cnbs
from akshare.index.index_spot import spot_goods
from akshare.cost.cost_living import cost_living
from akshare.energy.energy_carbon import (
energy_carbon_domestic,
energy_carbon_bj,
energy_carbon_eu,
energy_carbon_gz,
energy_carbon_hb,
energy_carbon_sz,
)
from akshare.fund.fund_amac import (
amac_manager_info,
amac_member_info,
amac_member_sub_info,
amac_aoin_info,
amac_fund_account_info,
amac_fund_info,
amac_fund_sub_info,
amac_futures_info,
amac_manager_cancelled_info,
amac_securities_info,
amac_fund_abs,
amac_manager_classify_info,
amac_person_fund_org_list,
amac_person_bond_org_list,
)
from akshare.fortune.fortune_500 import fortune_rank, fortune_rank_eng
from akshare.index.index_sw import (
sw_index_representation_spot,
sw_index_spot,
sw_index_second_spot,
sw_index_cons,
sw_index_daily,
sw_index_daily_indicator,
)
from akshare.index.index_google import google_index
from akshare.index.index_baidu import (
baidu_search_index,
baidu_info_index,
baidu_media_index,
)
from akshare.index.index_weibo import weibo_index
from akshare.article.epu_index import article_epu_index
from akshare.futures_derivative.nh_index_return import (
nh_return_index,
get_nh_list_table,
)
from akshare.futures_derivative.nh_index_price import nh_price_index
from akshare.futures_derivative.nh_index_volatility import nh_volatility_index
from akshare.air.air_hebei import air_quality_hebei
from akshare.air.time_and_date import sunrise_daily, sunrise_monthly
from akshare.stock.stock_zh_a_tick_tx_163 import (
stock_zh_a_tick_tx,
stock_zh_a_tick_tx_js,
stock_zh_a_tick_163,
stock_zh_a_tick_163_now,
)
from akshare.index.zh_stock_index_sina import (
stock_zh_index_daily,
stock_zh_index_spot,
stock_zh_index_daily_tx,
stock_zh_index_daily_em,
)
from akshare.futures.futures_hq_sina import (
futures_foreign_commodity_realtime,
futures_foreign_commodity_subscribe_exchange_symbol,
)
from akshare.article.ff_factor import article_ff_crr
from akshare.article.risk_rv import (
article_oman_rv,
article_oman_rv_short,
article_rlab_rv,
)
from akshare.bank.bank_cbirc_2020 import bank_fjcf_table_detail
from akshare.stock.stock_zh_kcb_sina import stock_zh_kcb_spot, stock_zh_kcb_daily
from akshare.stock.stock_zh_a_sina import (
stock_zh_a_spot,
stock_zh_a_daily,
stock_zh_a_minute,
stock_zh_a_cdr_daily,
)
from akshare.stock.stock_zh_ah_tx import (
stock_zh_ah_spot,
stock_zh_ah_daily,
stock_zh_ah_name,
)
from akshare.economic.macro_other import crypto_js_spot
from akshare.option.option_finance import (
option_finance_board,
option_finance_underlying,
)
from akshare.stock.stock_us_sina import (
stock_us_daily,
stock_us_spot,
get_us_stock_name,
stock_us_fundamental,
)
from akshare.stock.stock_hk_sina import stock_hk_daily, stock_hk_spot
from akshare.futures.futures_zh_sina import futures_zh_spot, match_main_contract
from akshare.futures_derivative.futures_xgx import _get_code_pic, futures_xgx_index
from akshare.futures_derivative.sys_spot_futures import (
get_sys_spot_futures,
get_sys_spot_futures_dict,
)
from akshare.stock.stock_us_zh_hx import stock_us_zh_spot, stock_us_zh_daily
from akshare.stock.stock_zh_zrbg_hx import stock_zh_a_scr_report
from akshare.economic.macro_constitute import (
macro_cons_gold_amount,
macro_cons_gold_change,
macro_cons_gold_volume,
macro_cons_opec_month,
macro_cons_silver_amount,
macro_cons_silver_change,
macro_cons_silver_volume,
)
from akshare.economic.macro_usa import (
macro_usa_eia_crude_rate,
macro_usa_non_farm,
macro_usa_unemployment_rate,
macro_usa_adp_employment,
macro_usa_core_pce_price,
macro_usa_cpi_monthly,
macro_usa_crude_inner,
macro_usa_gdp_monthly,
macro_usa_initial_jobless,
macro_usa_lmci,
macro_usa_api_crude_stock,
macro_usa_building_permits,
macro_usa_business_inventories,
macro_usa_cb_consumer_confidence,
macro_usa_core_cpi_monthly,
macro_usa_core_ppi,
macro_usa_current_account,
macro_usa_durable_goods_orders,
macro_usa_trade_balance,
macro_usa_spcs20,
macro_usa_services_pmi,
macro_usa_rig_count,
macro_usa_retail_sales,
macro_usa_real_consumer_spending,
macro_usa_ppi,
macro_usa_pmi,
macro_usa_personal_spending,
macro_usa_pending_home_sales,
macro_usa_nfib_small_business,
macro_usa_new_home_sales,
macro_usa_nahb_house_market_index,
macro_usa_michigan_consumer_sentiment,
macro_usa_exist_home_sales,
macro_usa_export_price,
macro_usa_factory_orders,
macro_usa_house_price_index,
macro_usa_house_starts,
macro_usa_import_price,
macro_usa_industrial_production,
macro_usa_ism_non_pmi,
macro_usa_ism_pmi,
macro_usa_job_cuts,
macro_usa_cftc_nc_holding,
macro_usa_cftc_c_holding,
macro_usa_cftc_merchant_currency_holding,
macro_usa_cftc_merchant_goods_holding,
macro_usa_phs,
)
from akshare.economic.macro_china import (
macro_china_cpi_monthly,
macro_china_cpi_yearly,
macro_china_m2_yearly,
macro_china_fx_reserves_yearly,
macro_china_cx_pmi_yearly,
macro_china_pmi_yearly,
macro_china_daily_energy,
macro_china_non_man_pmi,
macro_china_rmb,
macro_china_gdp_yearly,
macro_china_shrzgm,
macro_china_ppi_yearly,
macro_china_cx_services_pmi_yearly,
macro_china_market_margin_sh,
macro_china_market_margin_sz,
macro_china_au_report,
macro_china_ctci_detail,
macro_china_ctci_detail_hist,
macro_china_ctci,
macro_china_exports_yoy,
macro_china_hk_market_info,
macro_china_imports_yoy,
macro_china_trade_balance,
macro_china_shibor_all,
macro_china_industrial_production_yoy,
macro_china_gyzjz,
macro_china_lpr,
macro_china_new_house_price,
macro_china_enterprise_boom_index,
macro_china_national_tax_receipts,
macro_china_new_financial_credit,
macro_china_fx_gold,
macro_china_money_supply,
macro_china_stock_market_cap,
macro_china_cpi,
macro_china_gdp,
macro_china_ppi,
macro_china_pmi,
macro_china_gdzctz,
macro_china_hgjck,
macro_china_czsr,
macro_china_whxd,
macro_china_wbck,
macro_china_bond_public,
macro_china_gksccz,
macro_china_hb,
macro_china_xfzxx,
macro_china_reserve_requirement_ratio,
macro_china_consumer_goods_retail,
macro_china_society_electricity,
macro_china_society_traffic_volume,
macro_china_postal_telecommunicational,
macro_china_international_tourism_fx,
macro_china_passenger_load_factor,
macro_china_freight_index,
macro_china_central_bank_balance,
macro_china_insurance,
macro_china_supply_of_money,
macro_china_swap_rate,
macro_china_foreign_exchange_gold,
macro_china_retail_price_index,
macro_china_real_estate,
macro_china_qyspjg,
macro_china_fdi,
)
from akshare.futures.futures_international import (
futures_global_commodity_hist,
futures_global_commodity_name_url_map,
)
from akshare.fx.fx_quote import fx_pair_quote, fx_spot_quote, fx_swap_quote
from akshare.bond.china_bond import bond_spot_quote, bond_spot_deal, bond_china_yield
from akshare.option.option_commodity import (
option_dce_daily,
option_czce_daily,
option_shfe_daily,
)
from akshare.bond.bond_investing import (
bond_investing_global,
bond_investing_global_country_name_url,
)
from akshare.index.index_investing import (
index_investing_global,
index_investing_global_country_name_url,
index_investing_global_from_url,
)
from akshare.futures.futures_inventory import futures_inventory_99
from akshare.futures.futures_inventory_em import futures_inventory_em
from akshare.bond.bond_bank import get_bond_bank
from akshare.qhkc_web.qhkc_tool import qhkc_tool_foreign, qhkc_tool_gdp
from akshare.qhkc_web.qhkc_index import (
get_qhkc_index,
get_qhkc_index_trend,
get_qhkc_index_profit_loss,
)
from akshare.qhkc_web.qhkc_fund import (
get_qhkc_fund_money_change,
get_qhkc_fund_bs,
get_qhkc_fund_position,
)
from akshare.futures.futures_basis import (
futures_spot_price_daily,
futures_spot_price,
futures_spot_price_previous,
)
from akshare.futures.cot import (
get_rank_sum_daily,
get_rank_sum,
get_shfe_rank_table,
get_czce_rank_table,
get_dce_rank_table,
get_cffex_rank_table,
futures_dce_position_rank,
futures_dce_position_rank_other,
)
from akshare.futures.receipt import get_receipt
from akshare.futures.futures_roll_yield import get_roll_yield_bar, get_roll_yield
from akshare.futures.futures_daily_bar import (
get_cffex_daily,
get_czce_daily,
get_shfe_v_wap,
get_shfe_daily,
get_dce_daily,
get_futures_daily,
)
| true
| true
|
79055eadfcf0cb8d1cb96dc6ff1085b7d3f4d342
| 849
|
py
|
Python
|
src/AuShadha/demographics/guardian/dijit_fields_constants.py
|
GosthMan/AuShadha
|
3ab48825a0dba19bf880b6ac6141ab7a6adf1f3e
|
[
"PostgreSQL"
] | 46
|
2015-03-04T14:19:47.000Z
|
2021-12-09T02:58:46.000Z
|
src/AuShadha/demographics/guardian/dijit_fields_constants.py
|
aytida23/AuShadha
|
3ab48825a0dba19bf880b6ac6141ab7a6adf1f3e
|
[
"PostgreSQL"
] | 2
|
2015-06-05T10:29:04.000Z
|
2015-12-06T16:54:10.000Z
|
src/AuShadha/demographics/guardian/dijit_fields_constants.py
|
aytida23/AuShadha
|
3ab48825a0dba19bf880b6ac6141ab7a6adf1f3e
|
[
"PostgreSQL"
] | 24
|
2015-03-23T01:38:11.000Z
|
2022-01-24T16:23:42.000Z
|
GUARDIAN_FORM_CONSTANTS = {
'guardian_name':{'max_length': 30,
"data-dojo-type": "dijit.form.ValidationTextBox",
"data-dojo-props": r"'required' :'true' ,'regExp':'[\\w]+','invalidMessage':'Invalid Character' "
},
'relation_to_guardian':{
'max_length': 30,
"data-dojo-type": "dijit.form.Select",
"data-dojo-props": r"'required' : 'true' ,'regExp':'[\\w]+','invalidMessage' : 'Invalid Character'"
},
'guardian_phone':{
'max_length': 30,
"data-dojo-type": "dijit.form.ValidationTextBox",
"data-dojo-props": r"'required' : 'true' ,'regExp':'[\\w]+','invalidMessage' : 'Invalid Character'"
}
}
| 42.45
| 123
| 0.468787
|
GUARDIAN_FORM_CONSTANTS = {
'guardian_name':{'max_length': 30,
"data-dojo-type": "dijit.form.ValidationTextBox",
"data-dojo-props": r"'required' :'true' ,'regExp':'[\\w]+','invalidMessage':'Invalid Character' "
},
'relation_to_guardian':{
'max_length': 30,
"data-dojo-type": "dijit.form.Select",
"data-dojo-props": r"'required' : 'true' ,'regExp':'[\\w]+','invalidMessage' : 'Invalid Character'"
},
'guardian_phone':{
'max_length': 30,
"data-dojo-type": "dijit.form.ValidationTextBox",
"data-dojo-props": r"'required' : 'true' ,'regExp':'[\\w]+','invalidMessage' : 'Invalid Character'"
}
}
| true
| true
|
7905629a5c8eb7bb5d89a3d06a2a42774518bb37
| 4,779
|
py
|
Python
|
tests/test_validator.py
|
finhold72/recaptcha
|
474ff67d468e8d3af8a2e58d9c34ff834d52bf2a
|
[
"MIT"
] | null | null | null |
tests/test_validator.py
|
finhold72/recaptcha
|
474ff67d468e8d3af8a2e58d9c34ff834d52bf2a
|
[
"MIT"
] | null | null | null |
tests/test_validator.py
|
finhold72/recaptcha
|
474ff67d468e8d3af8a2e58d9c34ff834d52bf2a
|
[
"MIT"
] | null | null | null |
from unittest import mock
import pytest
from rest_framework.serializers import ValidationError
from drf_recaptcha.client import RecaptchaResponse
from drf_recaptcha.validators import ReCaptchaV2Validator, ReCaptchaV3Validator
@pytest.mark.parametrize(
("validator_class", "params"),
[
(ReCaptchaV2Validator, {}),
(ReCaptchaV3Validator, {"action": "test_action", "required_score": 0.4}),
],
)
def test_recaptcha_validator_get_response_success(validator_class, params):
validator = validator_class(**params)
assert isinstance(validator.get_response("test_token"), RecaptchaResponse)
@pytest.mark.parametrize(
("validator_class", "params"),
[
(ReCaptchaV2Validator, {}),
(ReCaptchaV3Validator, {"action": "test_action", "required_score": 0.4}),
],
)
def test_recaptcha_validator_get_response_fail(validator_class, params):
validator = validator_class(**params)
assert isinstance(validator.get_response("test_token"), RecaptchaResponse)
@pytest.mark.parametrize(
("validator_class", "params", "response"),
[
(ReCaptchaV2Validator, {}, RecaptchaResponse(is_valid=True)),
(
ReCaptchaV3Validator,
{"action": "test_action", "required_score": 0.4},
RecaptchaResponse(
is_valid=True, extra_data={"score": 0.6, "action": "test_action"}
),
),
],
)
def test_recaptcha_validator_call_success(validator_class, params, response):
validator = validator_class(**params)
validator.get_response = mock.Mock(return_value=response)
try:
validator("test_token")
except ValidationError:
pytest.fail("Validation is not passed")
@pytest.mark.parametrize(
("validator_class", "params", "response", "error"),
[
(
ReCaptchaV2Validator,
{},
RecaptchaResponse(is_valid=False),
"[ErrorDetail(string='Error verifying reCAPTCHA, please try again.', code='captcha_invalid')]",
),
(
ReCaptchaV2Validator,
{},
RecaptchaResponse(
is_valid=True, extra_data={"score": 0.6, "action": "test_action"}
),
"[ErrorDetail(string='Error verifying reCAPTCHA, please try again.', code='captcha_error')]",
),
(
ReCaptchaV3Validator,
{"action": "test_action", "required_score": 0.4},
RecaptchaResponse(is_valid=False),
"[ErrorDetail(string='Error verifying reCAPTCHA, please try again.', code='captcha_invalid')]",
),
(
ReCaptchaV3Validator,
{"action": "test_action", "required_score": 0.4},
RecaptchaResponse(is_valid=True),
"[ErrorDetail(string='Error verifying reCAPTCHA, please try again.', code='captcha_error')]",
),
(
ReCaptchaV3Validator,
{"action": "test_action", "required_score": 0.4},
RecaptchaResponse(is_valid=True, extra_data={"score": 0.3}),
"[ErrorDetail(string='Error verifying reCAPTCHA, please try again.', code='captcha_invalid')]",
),
(
ReCaptchaV3Validator,
{"action": "test_action", "required_score": 0.4},
RecaptchaResponse(is_valid=True, extra_data={"score": 0.5}),
"[ErrorDetail(string='Error verifying reCAPTCHA, please try again.', code='captcha_invalid')]",
),
(
ReCaptchaV3Validator,
{"action": "test_action", "required_score": 0.4},
RecaptchaResponse(
is_valid=True, extra_data={"score": 0.5, "action": "other_action"}
),
"[ErrorDetail(string='Error verifying reCAPTCHA, please try again.', code='captcha_invalid')]",
),
],
)
def test_recaptcha_validator_call_fail(validator_class, params, response, error):
validator = validator_class(**params)
validator.get_response = mock.Mock(return_value=response)
with pytest.raises(ValidationError) as exc_info:
validator("test_token")
assert str(exc_info.value) == error
@pytest.mark.parametrize(
("validator_class", "params"),
[
(ReCaptchaV2Validator, {}),
(ReCaptchaV3Validator, {"action": "test_action", "required_score": 0.4}),
],
)
def test_recaptcha_validator_set_context(validator_class, params, settings):
settings.DRF_RECAPTCHA_TESTING = True
validator = validator_class(**params)
assert validator.recaptcha_client_ip == ""
serializer_field = mock.Mock(
context={"request": mock.Mock(META={"HTTP_X_FORWARDED_FOR": "4.3.2.1"})}
)
validator("test_token", serializer_field)
assert validator.recaptcha_client_ip == "4.3.2.1"
| 34.630435
| 107
| 0.634442
|
from unittest import mock
import pytest
from rest_framework.serializers import ValidationError
from drf_recaptcha.client import RecaptchaResponse
from drf_recaptcha.validators import ReCaptchaV2Validator, ReCaptchaV3Validator
@pytest.mark.parametrize(
("validator_class", "params"),
[
(ReCaptchaV2Validator, {}),
(ReCaptchaV3Validator, {"action": "test_action", "required_score": 0.4}),
],
)
def test_recaptcha_validator_get_response_success(validator_class, params):
validator = validator_class(**params)
assert isinstance(validator.get_response("test_token"), RecaptchaResponse)
@pytest.mark.parametrize(
("validator_class", "params"),
[
(ReCaptchaV2Validator, {}),
(ReCaptchaV3Validator, {"action": "test_action", "required_score": 0.4}),
],
)
def test_recaptcha_validator_get_response_fail(validator_class, params):
validator = validator_class(**params)
assert isinstance(validator.get_response("test_token"), RecaptchaResponse)
@pytest.mark.parametrize(
("validator_class", "params", "response"),
[
(ReCaptchaV2Validator, {}, RecaptchaResponse(is_valid=True)),
(
ReCaptchaV3Validator,
{"action": "test_action", "required_score": 0.4},
RecaptchaResponse(
is_valid=True, extra_data={"score": 0.6, "action": "test_action"}
),
),
],
)
def test_recaptcha_validator_call_success(validator_class, params, response):
validator = validator_class(**params)
validator.get_response = mock.Mock(return_value=response)
try:
validator("test_token")
except ValidationError:
pytest.fail("Validation is not passed")
@pytest.mark.parametrize(
("validator_class", "params", "response", "error"),
[
(
ReCaptchaV2Validator,
{},
RecaptchaResponse(is_valid=False),
"[ErrorDetail(string='Error verifying reCAPTCHA, please try again.', code='captcha_invalid')]",
),
(
ReCaptchaV2Validator,
{},
RecaptchaResponse(
is_valid=True, extra_data={"score": 0.6, "action": "test_action"}
),
"[ErrorDetail(string='Error verifying reCAPTCHA, please try again.', code='captcha_error')]",
),
(
ReCaptchaV3Validator,
{"action": "test_action", "required_score": 0.4},
RecaptchaResponse(is_valid=False),
"[ErrorDetail(string='Error verifying reCAPTCHA, please try again.', code='captcha_invalid')]",
),
(
ReCaptchaV3Validator,
{"action": "test_action", "required_score": 0.4},
RecaptchaResponse(is_valid=True),
"[ErrorDetail(string='Error verifying reCAPTCHA, please try again.', code='captcha_error')]",
),
(
ReCaptchaV3Validator,
{"action": "test_action", "required_score": 0.4},
RecaptchaResponse(is_valid=True, extra_data={"score": 0.3}),
"[ErrorDetail(string='Error verifying reCAPTCHA, please try again.', code='captcha_invalid')]",
),
(
ReCaptchaV3Validator,
{"action": "test_action", "required_score": 0.4},
RecaptchaResponse(is_valid=True, extra_data={"score": 0.5}),
"[ErrorDetail(string='Error verifying reCAPTCHA, please try again.', code='captcha_invalid')]",
),
(
ReCaptchaV3Validator,
{"action": "test_action", "required_score": 0.4},
RecaptchaResponse(
is_valid=True, extra_data={"score": 0.5, "action": "other_action"}
),
"[ErrorDetail(string='Error verifying reCAPTCHA, please try again.', code='captcha_invalid')]",
),
],
)
def test_recaptcha_validator_call_fail(validator_class, params, response, error):
validator = validator_class(**params)
validator.get_response = mock.Mock(return_value=response)
with pytest.raises(ValidationError) as exc_info:
validator("test_token")
assert str(exc_info.value) == error
@pytest.mark.parametrize(
("validator_class", "params"),
[
(ReCaptchaV2Validator, {}),
(ReCaptchaV3Validator, {"action": "test_action", "required_score": 0.4}),
],
)
def test_recaptcha_validator_set_context(validator_class, params, settings):
settings.DRF_RECAPTCHA_TESTING = True
validator = validator_class(**params)
assert validator.recaptcha_client_ip == ""
serializer_field = mock.Mock(
context={"request": mock.Mock(META={"HTTP_X_FORWARDED_FOR": "4.3.2.1"})}
)
validator("test_token", serializer_field)
assert validator.recaptcha_client_ip == "4.3.2.1"
| true
| true
|
790562bc00dcdb90ab02470c69a150b42ec00587
| 2,331
|
py
|
Python
|
src/utils/config.py
|
ttgc/zigotoland
|
0f1910e9853761a0f8187bb20c79a467f19ff3e2
|
[
"MIT"
] | 2
|
2019-06-27T22:43:05.000Z
|
2021-07-08T13:22:52.000Z
|
src/utils/config.py
|
ttgc/zigotoland
|
0f1910e9853761a0f8187bb20c79a467f19ff3e2
|
[
"MIT"
] | 2
|
2019-06-28T08:34:52.000Z
|
2019-06-28T13:46:23.000Z
|
src/utils/config.py
|
ttgc/zigotoland
|
0f1910e9853761a0f8187bb20c79a467f19ff3e2
|
[
"MIT"
] | null | null | null |
#!usr/bin/env python3.7
#-*-coding:utf-8-*-
import json
import discord
PATH = "config.json"
def singleton(class_):
instances = {}
def getinstance(*args, **kwargs):
if class_ not in instances:
instances[class_] = class_(*args, **kwargs)
return instances[class_]
return getinstance
@singleton
class Config:
def __init__(self):
with open(PATH,"r") as configfile:
self.config = json.load(configfile)
self.token = self.config["token"]
self.owners = self.config["owner"]
self.guildID = None
if self.config["self-guild"].get("mode","load") == "load":
self.guildID = self.config["self-guild"]["ID"]
self.guildRegion = self.parseRegion(self.config["self-guild"]["region"])
self.guild = None
self.adminrole = None
def __getitem__(self,item):
return self.config[item]
def initGuild(self, guild):
self.guild = guild
self.adminrole = discord.utils.get(self.guild.roles, name="Masakaki")
@classmethod
def parseRegion(cl, regionString):
key = regionString.lower()
if (key == "amsterdam"): return discord.VoiceRegion.amsterdam
elif (key == "brazil"): return discord.VoiceRegion.brazil
elif (key == "eu_central"): return discord.VoiceRegion.eu_central
elif (key == "eu_west"): return discord.VoiceRegion.eu_west
elif (key == "frankfurt"): return discord.VoiceRegion.frankfurt
elif (key == "hongkong"): return discord.VoiceRegion.hongkong
elif (key == "india"): return discord.VoiceRegion.india
elif (key == "japan"): return discord.VoiceRegion.japan
elif (key == "london"): return discord.VoiceRegion.london
elif (key == "russia"): return discord.VoiceRegion.russia
elif (key == "singapore"): return discord.VoiceRegion.singapore
elif (key == "southafrica"): return discord.VoiceRegion.southafrica
elif (key == "sydney"): return discord.VoiceRegion.sydney
elif (key == "us_central"): return discord.VoiceRegion.us_central
elif (key == "us_east"): return discord.VoiceRegion.us_east
elif (key == "us_south"): return discord.VoiceRegion.us_south
elif (key == "us_west"): return discord.VoiceRegion.us_west
return None
| 38.85
| 80
| 0.640927
|
import json
import discord
PATH = "config.json"
def singleton(class_):
instances = {}
def getinstance(*args, **kwargs):
if class_ not in instances:
instances[class_] = class_(*args, **kwargs)
return instances[class_]
return getinstance
@singleton
class Config:
def __init__(self):
with open(PATH,"r") as configfile:
self.config = json.load(configfile)
self.token = self.config["token"]
self.owners = self.config["owner"]
self.guildID = None
if self.config["self-guild"].get("mode","load") == "load":
self.guildID = self.config["self-guild"]["ID"]
self.guildRegion = self.parseRegion(self.config["self-guild"]["region"])
self.guild = None
self.adminrole = None
def __getitem__(self,item):
return self.config[item]
def initGuild(self, guild):
self.guild = guild
self.adminrole = discord.utils.get(self.guild.roles, name="Masakaki")
@classmethod
def parseRegion(cl, regionString):
key = regionString.lower()
if (key == "amsterdam"): return discord.VoiceRegion.amsterdam
elif (key == "brazil"): return discord.VoiceRegion.brazil
elif (key == "eu_central"): return discord.VoiceRegion.eu_central
elif (key == "eu_west"): return discord.VoiceRegion.eu_west
elif (key == "frankfurt"): return discord.VoiceRegion.frankfurt
elif (key == "hongkong"): return discord.VoiceRegion.hongkong
elif (key == "india"): return discord.VoiceRegion.india
elif (key == "japan"): return discord.VoiceRegion.japan
elif (key == "london"): return discord.VoiceRegion.london
elif (key == "russia"): return discord.VoiceRegion.russia
elif (key == "singapore"): return discord.VoiceRegion.singapore
elif (key == "southafrica"): return discord.VoiceRegion.southafrica
elif (key == "sydney"): return discord.VoiceRegion.sydney
elif (key == "us_central"): return discord.VoiceRegion.us_central
elif (key == "us_east"): return discord.VoiceRegion.us_east
elif (key == "us_south"): return discord.VoiceRegion.us_south
elif (key == "us_west"): return discord.VoiceRegion.us_west
return None
| true
| true
|
7905636cb6219b7cb1702daadb1550929691dfd7
| 46
|
py
|
Python
|
cryptoquant/api/okex/config.py
|
studyquant/StudyQuant
|
24790634ac320b25361672754558c3797f4fc9e3
|
[
"Apache-2.0"
] | 74
|
2018-08-10T17:05:57.000Z
|
2022-03-26T07:06:02.000Z
|
cryptoquant/api/okex/config.py
|
ezailwoo/studyquant
|
24790634ac320b25361672754558c3797f4fc9e3
|
[
"Apache-2.0"
] | 1
|
2022-03-24T06:42:00.000Z
|
2022-03-24T06:42:00.000Z
|
cryptoquant/api/okex/config.py
|
ezailwoo/studyquant
|
24790634ac320b25361672754558c3797f4fc9e3
|
[
"Apache-2.0"
] | 18
|
2020-09-22T09:03:49.000Z
|
2022-03-31T20:48:54.000Z
|
api_key = ''
seceret_key = ''
passphrase = ''
| 11.5
| 16
| 0.608696
|
api_key = ''
seceret_key = ''
passphrase = ''
| true
| true
|
7905648bce70b580b9648beb73466912b21db9a9
| 4,062
|
py
|
Python
|
instagram/settings.py
|
Brayonski/Instagram-1
|
7135f99d869d1e15310c02e73ca540ff8cacef18
|
[
"MIT"
] | 6
|
2018-10-17T18:09:28.000Z
|
2020-09-25T19:30:47.000Z
|
instagram/settings.py
|
Brayonski/Instagram-1
|
7135f99d869d1e15310c02e73ca540ff8cacef18
|
[
"MIT"
] | 4
|
2020-06-05T18:27:55.000Z
|
2021-09-07T23:53:10.000Z
|
instagram/settings.py
|
Brayonski/Instagram-1
|
7135f99d869d1e15310c02e73ca540ff8cacef18
|
[
"MIT"
] | 11
|
2018-06-21T07:03:55.000Z
|
2019-07-29T06:59:25.000Z
|
import os
import dj_database_url
from decouple import config, Csv
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
MODE=config("MODE", default="dev")
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG', default=False, cast=bool)
ALLOWED_HOSTS = ['*']
UPLOADCARE = {
'pub_key': config('pub_key'),
'secret': config('secret'),
}
# Application definition
INSTALLED_APPS = [
'pyuploadcare.dj',
'gram.apps.GramConfig',
'tinymce',
'bootstrap4',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'instagram.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'instagram.wsgi.application'
LOGIN_REDIRECT_URL = '/home'
# AUTH_PROFILE_MODULE = 'accounts.UserProfile'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
if config('MODE')=="dev":
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': config('DBNAME'),
'USER': config('DBUSER'),
'PASSWORD': config('DBPASS')
}
}
# production
else:
DATABASES = {
'default': dj_database_url.config(
default=config('DATABASE_URL')
)
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Nairobi'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
db_from_env=dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR,'static')]
STATIC_ROOT = os.path.join(BASE_DIR,'staticfiles')
STATICFILES_STORAGE='whitenoise.django.GzipManifestStaticFilesStorage'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR,'media')
# Email configurations
EMAIL_USE_TLS = config('EMAIL_USE_TLS')
EMAIL_HOST = config('EMAIL_HOST')
EMAIL_PORT = config('EMAIL_PORT')
EMAIL_HOST_USER = config('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD')
| 27.821918
| 91
| 0.693747
|
import os
import dj_database_url
from decouple import config, Csv
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
MODE=config("MODE", default="dev")
SECRET_KEY = config('SECRET_KEY')
DEBUG = config('DEBUG', default=False, cast=bool)
ALLOWED_HOSTS = ['*']
UPLOADCARE = {
'pub_key': config('pub_key'),
'secret': config('secret'),
}
# Application definition
INSTALLED_APPS = [
'pyuploadcare.dj',
'gram.apps.GramConfig',
'tinymce',
'bootstrap4',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'instagram.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'instagram.wsgi.application'
LOGIN_REDIRECT_URL = '/home'
# AUTH_PROFILE_MODULE = 'accounts.UserProfile'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
if config('MODE')=="dev":
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': config('DBNAME'),
'USER': config('DBUSER'),
'PASSWORD': config('DBPASS')
}
}
# production
else:
DATABASES = {
'default': dj_database_url.config(
default=config('DATABASE_URL')
)
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Nairobi'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
db_from_env=dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR,'static')]
STATIC_ROOT = os.path.join(BASE_DIR,'staticfiles')
STATICFILES_STORAGE='whitenoise.django.GzipManifestStaticFilesStorage'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR,'media')
# Email configurations
EMAIL_USE_TLS = config('EMAIL_USE_TLS')
EMAIL_HOST = config('EMAIL_HOST')
EMAIL_PORT = config('EMAIL_PORT')
EMAIL_HOST_USER = config('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD')
| true
| true
|
7905660d1f710852bc60081a5ce7c97980c9665a
| 2,207
|
py
|
Python
|
dmlab2d/settings_helper.py
|
Robert-Held/lab2d
|
ebf569aeda6c86a9493622b0e33e568686b4a608
|
[
"Apache-2.0"
] | 377
|
2020-11-16T01:30:06.000Z
|
2022-03-24T09:30:00.000Z
|
dmlab2d/settings_helper.py
|
Robert-Held/lab2d
|
ebf569aeda6c86a9493622b0e33e568686b4a608
|
[
"Apache-2.0"
] | 17
|
2020-11-18T13:57:12.000Z
|
2022-03-28T01:20:52.000Z
|
dmlab2d/settings_helper.py
|
Robert-Held/lab2d
|
ebf569aeda6c86a9493622b0e33e568686b4a608
|
[
"Apache-2.0"
] | 47
|
2020-11-16T12:36:10.000Z
|
2022-03-24T17:50:18.000Z
|
# Lint as: python3
# Copyright 2020 The DMLab2D Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Function for flattening dictionary settings."""
import numbers
from typing import Mapping, Sequence
def _flatten_args(pairs_in, args_out, prefix, visited_stack):
"""Helper function for flatten_args. See `flatten_args` below for details."""
for key, v in pairs_in:
if not isinstance(key, str):
raise ValueError('Keys must be strings. %r' % key)
flat_key = prefix + '.' + key if prefix else key
if v is None:
args_out[flat_key] = 'none'
elif isinstance(v, str):
args_out[flat_key] = v
elif isinstance(v, bool):
args_out[flat_key] = 'true' if v else 'false'
elif isinstance(v, numbers.Number):
args_out[flat_key] = str(v)
elif isinstance(v, Mapping):
if not any(v is entry for entry in visited_stack):
_flatten_args(v.items(), args_out, flat_key, visited_stack + [v])
elif isinstance(v, Sequence):
if not any(v is entry for entry in visited_stack):
_flatten_args(((str(i + 1), vv) for i, vv in enumerate(v)), args_out,
flat_key, visited_stack + [v])
else:
raise ValueError('Value for \'{}\' cannot be type: \'{}\''.format(
flat_key, str(type(v))))
def flatten_args(args_in):
"""Converts a dictionary of dictionarys and lists into a flat table.
Args:
args_in: dictionary containing a hierachy of dictionaries and lists. Leaf
values can be strings, bools, numbers..
Returns:
A flat dictionary with keys separated by '.' and string values.
"""
args_out = {}
_flatten_args(args_in.items(), args_out, None, [args_in])
return args_out
| 35.031746
| 79
| 0.686905
|
import numbers
from typing import Mapping, Sequence
def _flatten_args(pairs_in, args_out, prefix, visited_stack):
for key, v in pairs_in:
if not isinstance(key, str):
raise ValueError('Keys must be strings. %r' % key)
flat_key = prefix + '.' + key if prefix else key
if v is None:
args_out[flat_key] = 'none'
elif isinstance(v, str):
args_out[flat_key] = v
elif isinstance(v, bool):
args_out[flat_key] = 'true' if v else 'false'
elif isinstance(v, numbers.Number):
args_out[flat_key] = str(v)
elif isinstance(v, Mapping):
if not any(v is entry for entry in visited_stack):
_flatten_args(v.items(), args_out, flat_key, visited_stack + [v])
elif isinstance(v, Sequence):
if not any(v is entry for entry in visited_stack):
_flatten_args(((str(i + 1), vv) for i, vv in enumerate(v)), args_out,
flat_key, visited_stack + [v])
else:
raise ValueError('Value for \'{}\' cannot be type: \'{}\''.format(
flat_key, str(type(v))))
def flatten_args(args_in):
args_out = {}
_flatten_args(args_in.items(), args_out, None, [args_in])
return args_out
| true
| true
|
790567e9dd7a343e995d4e222f05719a9750ecfe
| 14,359
|
py
|
Python
|
qiskit/circuit/library/grover_operator.py
|
SpinQTech/SpinQKit
|
2e24826688b2b26cf7efa66fd47f0e7ef883a96c
|
[
"Apache-2.0"
] | 2
|
2021-12-20T05:19:44.000Z
|
2021-12-20T05:21:48.000Z
|
qiskit/circuit/library/grover_operator.py
|
SpinQTech/SpinQKit
|
2e24826688b2b26cf7efa66fd47f0e7ef883a96c
|
[
"Apache-2.0"
] | null | null | null |
qiskit/circuit/library/grover_operator.py
|
SpinQTech/SpinQKit
|
2e24826688b2b26cf7efa66fd47f0e7ef883a96c
|
[
"Apache-2.0"
] | 1
|
2021-12-20T05:20:35.000Z
|
2021-12-20T05:20:35.000Z
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""The Grover operator."""
from typing import List, Optional, Union
import numpy
from qiskit.circuit import QuantumCircuit, QuantumRegister, AncillaRegister
# from qiskit.quantum_info import Statevector, Operator, DensityMatrix
from qiskit.quantum_info import Operator
from .standard_gates import MCXGate
class GroverOperator(QuantumCircuit):
r"""The Grover operator.
Grover's search algorithm [1, 2] consists of repeated applications of the so-called
Grover operator used to amplify the amplitudes of the desired output states.
This operator, :math:`\mathcal{Q}`, consists of the phase oracle, :math:`\mathcal{S}_f`,
zero phase-shift or zero reflection, :math:`\mathcal{S}_0`, and an
input state preparation :math:`\mathcal{A}`:
.. math::
\mathcal{Q} = \mathcal{A} \mathcal{S}_0 \mathcal{A}^\dagger \mathcal{S}_f
In the standard Grover search we have :math:`\mathcal{A} = H^{\otimes n}`:
.. math::
\mathcal{Q} = H^{\otimes n} \mathcal{S}_0 H^{\otimes n} \mathcal{S}_f
= D \mathcal{S_f}
The operation :math:`D = H^{\otimes n} \mathcal{S}_0 H^{\otimes n}` is also referred to as
diffusion operator. In this formulation we can see that Grover's operator consists of two
steps: first, the phase oracle multiplies the good states by -1 (with :math:`\mathcal{S}_f`)
and then the whole state is reflected around the mean (with :math:`D`).
This class allows setting a different state preparation, as in quantum amplitude
amplification (a generalization of Grover's algorithm), :math:`\mathcal{A}` might not be
a layer of Hardamard gates [3].
The action of the phase oracle :math:`\mathcal{S}_f` is defined as
.. math::
\mathcal{S}_f: |x\rangle \mapsto (-1)^{f(x)}|x\rangle
where :math:`f(x) = 1` if :math:`x` is a good state and 0 otherwise. To highlight the fact
that this oracle flips the phase of the good states and does not flip the state of a result
qubit, we call :math:`\mathcal{S}_f` a phase oracle.
Note that you can easily construct a phase oracle from a bitflip oracle by sandwiching the
controlled X gate on the result qubit by a X and H gate. For instance
.. parsed-literal::
Bitflip oracle Phaseflip oracle
q_0: ──■── q_0: ────────────■────────────
┌─┴─┐ ┌───┐┌───┐┌─┴─┐┌───┐┌───┐
out: ┤ X ├ out: ┤ X ├┤ H ├┤ X ├┤ H ├┤ X ├
└───┘ └───┘└───┘└───┘└───┘└───┘
There is some flexibility in defining the oracle and :math:`\mathcal{A}` operator. Before the
Grover operator is applied in Grover's algorithm, the qubits are first prepared with one
application of the :math:`\mathcal{A}` operator (or Hadamard gates in the standard formulation).
Thus, we always have operation of the form
:math:`\mathcal{A} \mathcal{S}_f \mathcal{A}^\dagger`. Therefore it is possible to move
bitflip logic into :math:`\mathcal{A}` and leaving the oracle only to do phaseflips via Z gates
based on the bitflips. One possible use-case for this are oracles that do not uncompute the
state qubits.
The zero reflection :math:`\mathcal{S}_0` is usually defined as
.. math::
\mathcal{S}_0 = 2 |0\rangle^{\otimes n} \langle 0|^{\otimes n} - \mathbb{I}_n
where :math:`\mathbb{I}_n` is the identity on :math:`n` qubits.
By default, this class implements the negative version
:math:`2 |0\rangle^{\otimes n} \langle 0|^{\otimes n} - \mathbb{I}_n`, since this can simply
be implemented with a multi-controlled Z sandwiched by X gates on the target qubit and the
introduced global phase does not matter for Grover's algorithm.
Examples:
>>> from qiskit.circuit import QuantumCircuit
>>> from qiskit.circuit.library import GroverOperator
>>> oracle = QuantumCircuit(2)
>>> oracle.z(0) # good state = first qubit is |1>
>>> grover_op = GroverOperator(oracle, insert_barriers=True)
>>> grover_op.draw()
┌───┐ ░ ┌───┐ ░ ┌───┐ ┌───┐ ░ ┌───┐
state_0: ┤ Z ├─░─┤ H ├─░─┤ X ├───────■──┤ X ├──────░─┤ H ├
└───┘ ░ ├───┤ ░ ├───┤┌───┐┌─┴─┐├───┤┌───┐ ░ ├───┤
state_1: ──────░─┤ H ├─░─┤ X ├┤ H ├┤ X ├┤ H ├┤ X ├─░─┤ H ├
░ └───┘ ░ └───┘└───┘└───┘└───┘└───┘ ░ └───┘
>>> oracle = QuantumCircuit(1)
>>> oracle.z(0) # the qubit state |1> is the good state
>>> state_preparation = QuantumCircuit(1)
>>> state_preparation.ry(0.2, 0) # non-uniform state preparation
>>> grover_op = GroverOperator(oracle, state_preparation)
>>> grover_op.draw()
┌───┐┌──────────┐┌───┐┌───┐┌───┐┌─────────┐
state_0: ┤ Z ├┤ RY(-0.2) ├┤ X ├┤ Z ├┤ X ├┤ RY(0.2) ├
└───┘└──────────┘└───┘└───┘└───┘└─────────┘
>>> oracle = QuantumCircuit(4)
>>> oracle.z(3)
>>> reflection_qubits = [0, 3]
>>> state_preparation = QuantumCircuit(4)
>>> state_preparation.cry(0.1, 0, 3)
>>> state_preparation.ry(0.5, 3)
>>> grover_op = GroverOperator(oracle, state_preparation,
... reflection_qubits=reflection_qubits)
>>> grover_op.draw()
┌───┐ ┌───┐
state_0: ──────────────────────■──────┤ X ├───────■──┤ X ├──────────■────────────────
│ └───┘ │ └───┘ │
state_1: ──────────────────────┼──────────────────┼─────────────────┼────────────────
│ │ │
state_2: ──────────────────────┼──────────────────┼─────────────────┼────────────────
┌───┐┌──────────┐┌────┴─────┐┌───┐┌───┐┌─┴─┐┌───┐┌───┐┌────┴────┐┌─────────┐
state_3: ┤ Z ├┤ RY(-0.5) ├┤ RY(-0.1) ├┤ X ├┤ H ├┤ X ├┤ H ├┤ X ├┤ RY(0.1) ├┤ RY(0.5) ├
└───┘└──────────┘└──────────┘└───┘└───┘└───┘└───┘└───┘└─────────┘└─────────┘
>>> mark_state = Statevector.from_label('011')
>>> diffuse_operator = 2 * DensityMatrix.from_label('000') - Operator.from_label('III')
>>> grover_op = GroverOperator(oracle=mark_state, zero_reflection=diffuse_operator)
>>> grover_op.draw(fold=70)
┌─────────────────┐ ┌───┐ »
state_0: ┤0 ├──────┤ H ├──────────────────────────»
│ │┌─────┴───┴─────┐ ┌───┐ »
state_1: ┤1 UCRZ(0,pi,0,0) ├┤0 ├─────┤ H ├──────────»
│ ││ UCRZ(pi/2,0) │┌────┴───┴────┐┌───┐»
state_2: ┤2 ├┤1 ├┤ UCRZ(-pi/4) ├┤ H ├»
└─────────────────┘└───────────────┘└─────────────┘└───┘»
« ┌─────────────────┐ ┌───┐
«state_0: ┤0 ├──────┤ H ├─────────────────────────
« │ │┌─────┴───┴─────┐ ┌───┐
«state_1: ┤1 UCRZ(pi,0,0,0) ├┤0 ├────┤ H ├──────────
« │ ││ UCRZ(pi/2,0) │┌───┴───┴────┐┌───┐
«state_2: ┤2 ├┤1 ├┤ UCRZ(pi/4) ├┤ H ├
« └─────────────────┘└───────────────┘└────────────┘└───┘
References:
[1]: L. K. Grover (1996), A fast quantum mechanical algorithm for database search,
`arXiv:quant-ph/9605043 <https://arxiv.org/abs/quant-ph/9605043>`_.
[2]: I. Chuang & M. Nielsen, Quantum Computation and Quantum Information,
Cambridge: Cambridge University Press, 2000. Chapter 6.1.2.
[3]: Brassard, G., Hoyer, P., Mosca, M., & Tapp, A. (2000).
Quantum Amplitude Amplification and Estimation.
`arXiv:quant-ph/0005055 <http://arxiv.org/abs/quant-ph/0005055>`_.
"""
def __init__(
self,
# oracle: Union[QuantumCircuit, Statevector],
oracle: QuantumCircuit,
state_preparation: Optional[QuantumCircuit] = None,
# zero_reflection: Optional[Union[QuantumCircuit, DensityMatrix, Operator]] = None,
zero_reflection: Optional[Union[QuantumCircuit, Operator]] = None,
reflection_qubits: Optional[List[int]] = None,
insert_barriers: bool = False,
mcx_mode: str = "noancilla",
name: str = "Q",
) -> None:
r"""
Args:
oracle: The phase oracle implementing a reflection about the bad state. Note that this
is not a bitflip oracle, see the docstring for more information.
state_preparation: The operator preparing the good and bad state.
For Grover's algorithm, this is a n-qubit Hadamard gate and for amplitude
amplification or estimation the operator :math:`\mathcal{A}`.
zero_reflection: The reflection about the zero state, :math:`\mathcal{S}_0`.
reflection_qubits: Qubits on which the zero reflection acts on.
insert_barriers: Whether barriers should be inserted between the reflections and A.
mcx_mode: The mode to use for building the default zero reflection.
name: The name of the circuit.
"""
super().__init__(name=name)
# store inputs
# if isinstance(oracle, Statevector):
# from qiskit.circuit.library import Diagonal # pylint: disable=cyclic-import
# oracle = Diagonal((-1) ** oracle.data)
self._oracle = oracle
# if isinstance(zero_reflection, (Operator, DensityMatrix)):
# from qiskit.circuit.library import Diagonal # pylint: disable=cyclic-import
# zero_reflection = Diagonal(zero_reflection.data.diagonal())
self._zero_reflection = zero_reflection
self._reflection_qubits = reflection_qubits
self._state_preparation = state_preparation
self._insert_barriers = insert_barriers
self._mcx_mode = mcx_mode
# build circuit
self._build()
@property
def reflection_qubits(self):
"""Reflection qubits, on which S0 is applied (if S0 is not user-specified)."""
if self._reflection_qubits is not None:
return self._reflection_qubits
num_state_qubits = self.oracle.num_qubits - self.oracle.num_ancillas
return list(range(num_state_qubits))
@property
def zero_reflection(self) -> QuantumCircuit:
"""The subcircuit implementing the reflection about 0."""
if self._zero_reflection is not None:
return self._zero_reflection
num_state_qubits = self.oracle.num_qubits - self.oracle.num_ancillas
return _zero_reflection(num_state_qubits, self.reflection_qubits, self._mcx_mode)
@property
def state_preparation(self) -> QuantumCircuit:
"""The subcircuit implementing the A operator or Hadamards."""
if self._state_preparation is not None:
return self._state_preparation
num_state_qubits = self.oracle.num_qubits - self.oracle.num_ancillas
hadamards = QuantumCircuit(num_state_qubits, name="H")
# apply Hadamards only on reflection qubits, rest will cancel out
hadamards.h(self.reflection_qubits)
return hadamards
@property
def oracle(self):
"""The oracle implementing a reflection about the bad state."""
return self._oracle
def _build(self):
num_state_qubits = self.oracle.num_qubits - self.oracle.num_ancillas
self.add_register(QuantumRegister(num_state_qubits, name="state"))
num_ancillas = numpy.max(
[
self.oracle.num_ancillas,
self.zero_reflection.num_ancillas,
self.state_preparation.num_ancillas,
]
)
if num_ancillas > 0:
self.add_register(AncillaRegister(num_ancillas, name="ancilla"))
self.compose(self.oracle, list(range(self.oracle.num_qubits)), inplace=True)
if self._insert_barriers:
self.barrier()
self.compose(
self.state_preparation.inverse(),
list(range(self.state_preparation.num_qubits)),
inplace=True,
)
if self._insert_barriers:
self.barrier()
self.compose(
self.zero_reflection, list(range(self.zero_reflection.num_qubits)), inplace=True
)
if self._insert_barriers:
self.barrier()
self.compose(
self.state_preparation, list(range(self.state_preparation.num_qubits)), inplace=True
)
# minus sign
self.global_phase = numpy.pi
# TODO use the oracle compiler or the bit string oracle
def _zero_reflection(
num_state_qubits: int, qubits: List[int], mcx_mode: Optional[str] = None
) -> QuantumCircuit:
qr_state = QuantumRegister(num_state_qubits, "state")
reflection = QuantumCircuit(qr_state, name="S_0")
num_ancillas = MCXGate.get_num_ancilla_qubits(len(qubits) - 1, mcx_mode)
if num_ancillas > 0:
qr_ancilla = AncillaRegister(num_ancillas, "ancilla")
reflection.add_register(qr_ancilla)
else:
qr_ancilla = []
reflection.x(qubits)
if len(qubits) == 1:
reflection.z(0) # MCX does not allow 0 control qubits, therefore this is separate
else:
reflection.h(qubits[-1])
reflection.mcx(qubits[:-1], qubits[-1], qr_ancilla[:], mode=mcx_mode)
reflection.h(qubits[-1])
reflection.x(qubits)
return reflection
| 47.233553
| 101
| 0.548088
|
from typing import List, Optional, Union
import numpy
from qiskit.circuit import QuantumCircuit, QuantumRegister, AncillaRegister
from qiskit.quantum_info import Operator
from .standard_gates import MCXGate
class GroverOperator(QuantumCircuit):
def __init__(
self,
oracle: QuantumCircuit,
state_preparation: Optional[QuantumCircuit] = None,
zero_reflection: Optional[Union[QuantumCircuit, Operator]] = None,
reflection_qubits: Optional[List[int]] = None,
insert_barriers: bool = False,
mcx_mode: str = "noancilla",
name: str = "Q",
) -> None:
super().__init__(name=name)
acle = oracle
eflection = zero_reflection
self._reflection_qubits = reflection_qubits
self._state_preparation = state_preparation
self._insert_barriers = insert_barriers
self._mcx_mode = mcx_mode
self._build()
@property
def reflection_qubits(self):
if self._reflection_qubits is not None:
return self._reflection_qubits
num_state_qubits = self.oracle.num_qubits - self.oracle.num_ancillas
return list(range(num_state_qubits))
@property
def zero_reflection(self) -> QuantumCircuit:
if self._zero_reflection is not None:
return self._zero_reflection
num_state_qubits = self.oracle.num_qubits - self.oracle.num_ancillas
return _zero_reflection(num_state_qubits, self.reflection_qubits, self._mcx_mode)
@property
def state_preparation(self) -> QuantumCircuit:
if self._state_preparation is not None:
return self._state_preparation
num_state_qubits = self.oracle.num_qubits - self.oracle.num_ancillas
hadamards = QuantumCircuit(num_state_qubits, name="H")
hadamards.h(self.reflection_qubits)
return hadamards
@property
def oracle(self):
return self._oracle
def _build(self):
num_state_qubits = self.oracle.num_qubits - self.oracle.num_ancillas
self.add_register(QuantumRegister(num_state_qubits, name="state"))
num_ancillas = numpy.max(
[
self.oracle.num_ancillas,
self.zero_reflection.num_ancillas,
self.state_preparation.num_ancillas,
]
)
if num_ancillas > 0:
self.add_register(AncillaRegister(num_ancillas, name="ancilla"))
self.compose(self.oracle, list(range(self.oracle.num_qubits)), inplace=True)
if self._insert_barriers:
self.barrier()
self.compose(
self.state_preparation.inverse(),
list(range(self.state_preparation.num_qubits)),
inplace=True,
)
if self._insert_barriers:
self.barrier()
self.compose(
self.zero_reflection, list(range(self.zero_reflection.num_qubits)), inplace=True
)
if self._insert_barriers:
self.barrier()
self.compose(
self.state_preparation, list(range(self.state_preparation.num_qubits)), inplace=True
)
self.global_phase = numpy.pi
def _zero_reflection(
num_state_qubits: int, qubits: List[int], mcx_mode: Optional[str] = None
) -> QuantumCircuit:
qr_state = QuantumRegister(num_state_qubits, "state")
reflection = QuantumCircuit(qr_state, name="S_0")
num_ancillas = MCXGate.get_num_ancilla_qubits(len(qubits) - 1, mcx_mode)
if num_ancillas > 0:
qr_ancilla = AncillaRegister(num_ancillas, "ancilla")
reflection.add_register(qr_ancilla)
else:
qr_ancilla = []
reflection.x(qubits)
if len(qubits) == 1:
reflection.z(0)
else:
reflection.h(qubits[-1])
reflection.mcx(qubits[:-1], qubits[-1], qr_ancilla[:], mode=mcx_mode)
reflection.h(qubits[-1])
reflection.x(qubits)
return reflection
| true
| true
|
790568afced767abc6eb9268aa1733b1c3326aa9
| 2,631
|
py
|
Python
|
utest/writer/test_filewriters.py
|
nopparat-mkw/robotframework
|
1c460dd57383f992eb3642a4b0c50fee2dc91581
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
utest/writer/test_filewriters.py
|
nopparat-mkw/robotframework
|
1c460dd57383f992eb3642a4b0c50fee2dc91581
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
utest/writer/test_filewriters.py
|
nopparat-mkw/robotframework
|
1c460dd57383f992eb3642a4b0c50fee2dc91581
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
import unittest
from robot.parsing import TestCaseFile
from robot.parsing.model import TestCaseTable
from robot.utils import ET, ETSource, StringIO
from robot.utils.asserts import assert_equal
def create_test_case_file():
data = TestCaseFile(source='foo.txt')
table = TestCaseTable(data)
data.testcase_table = table
table.set_header(['test case', 'some', 'and other'])
test = table.add('A test')
test.add_step(['A kw', 'an arg'])
return data
class _WriterTestCase(unittest.TestCase):
def _test_rows_are_not_split_if_there_are_headers(self, format='txt'):
output = self._add_long_step_and_save(format)
assert_equal(len(output.splitlines()), 3)
def _add_long_step_and_save(self, format):
data = create_test_case_file()
data.testcase_table.tests[0].add_step(['A kw', '1', '2', '3', '4', '6', '7', '8'])
output = StringIO()
data.save(format=format, output=output)
return output.getvalue().strip()
class TestSpaceSeparatedWriter(_WriterTestCase):
def test_end_of_line_whitespace_is_removed(self):
output = StringIO()
create_test_case_file().save(output=output)
expected = '''\
*** test case *** some and other
A test A kw an arg
'''
assert_equal(repr(expected), repr(output.getvalue()))
def test_rows_are_not_split_if_there_are_headers(self):
self._test_rows_are_not_split_if_there_are_headers()
def test_configuring_number_of_separating_spaces(self):
output = StringIO()
create_test_case_file().save(output=output, txt_separating_spaces=8)
expected = '''\
*** test case *** some and other
A test A kw an arg
'''
assert_equal(repr(expected), repr(output.getvalue()))
class TestTsvWriter(_WriterTestCase):
def test_rows_are_not_split_if_there_are_headers(self):
try:
import csv
except ImportError:
pass # csv not available on IronPython 2.7
else:
self._test_rows_are_not_split_if_there_are_headers('tsv')
class TestHtmlWriter(_WriterTestCase):
def test_rows_are_not_split_if_there_are_headers(self):
output = self._add_long_step_and_save('html')
with ETSource('\n'.join(output.splitlines()[1:])) as source:
tree = ET.parse(source)
lines = tree.findall('body/table/tr')
assert_equal(len(lines), 3)
for l in lines:
cols = l.findall('td') or l.findall('th')
assert_equal(len(cols), 9)
if __name__ == '__main__':
unittest.main()
| 31.698795
| 90
| 0.659065
|
import unittest
from robot.parsing import TestCaseFile
from robot.parsing.model import TestCaseTable
from robot.utils import ET, ETSource, StringIO
from robot.utils.asserts import assert_equal
def create_test_case_file():
data = TestCaseFile(source='foo.txt')
table = TestCaseTable(data)
data.testcase_table = table
table.set_header(['test case', 'some', 'and other'])
test = table.add('A test')
test.add_step(['A kw', 'an arg'])
return data
class _WriterTestCase(unittest.TestCase):
def _test_rows_are_not_split_if_there_are_headers(self, format='txt'):
output = self._add_long_step_and_save(format)
assert_equal(len(output.splitlines()), 3)
def _add_long_step_and_save(self, format):
data = create_test_case_file()
data.testcase_table.tests[0].add_step(['A kw', '1', '2', '3', '4', '6', '7', '8'])
output = StringIO()
data.save(format=format, output=output)
return output.getvalue().strip()
class TestSpaceSeparatedWriter(_WriterTestCase):
def test_end_of_line_whitespace_is_removed(self):
output = StringIO()
create_test_case_file().save(output=output)
expected = '''\
*** test case *** some and other
A test A kw an arg
'''
assert_equal(repr(expected), repr(output.getvalue()))
def test_rows_are_not_split_if_there_are_headers(self):
self._test_rows_are_not_split_if_there_are_headers()
def test_configuring_number_of_separating_spaces(self):
output = StringIO()
create_test_case_file().save(output=output, txt_separating_spaces=8)
expected = '''\
*** test case *** some and other
A test A kw an arg
'''
assert_equal(repr(expected), repr(output.getvalue()))
class TestTsvWriter(_WriterTestCase):
def test_rows_are_not_split_if_there_are_headers(self):
try:
import csv
except ImportError:
pass
else:
self._test_rows_are_not_split_if_there_are_headers('tsv')
class TestHtmlWriter(_WriterTestCase):
def test_rows_are_not_split_if_there_are_headers(self):
output = self._add_long_step_and_save('html')
with ETSource('\n'.join(output.splitlines()[1:])) as source:
tree = ET.parse(source)
lines = tree.findall('body/table/tr')
assert_equal(len(lines), 3)
for l in lines:
cols = l.findall('td') or l.findall('th')
assert_equal(len(cols), 9)
if __name__ == '__main__':
unittest.main()
| true
| true
|
790568b8149eb575b8c3d09f5df162c3ec0fbfec
| 4,179
|
py
|
Python
|
yandeley/models/files.py
|
shuichiro-makigaki/yandeley-python-sdk
|
2c15145d11ddfdf33a94da6c846afdd13f310b54
|
[
"Apache-2.0"
] | null | null | null |
yandeley/models/files.py
|
shuichiro-makigaki/yandeley-python-sdk
|
2c15145d11ddfdf33a94da6c846afdd13f310b54
|
[
"Apache-2.0"
] | null | null | null |
yandeley/models/files.py
|
shuichiro-makigaki/yandeley-python-sdk
|
2c15145d11ddfdf33a94da6c846afdd13f310b54
|
[
"Apache-2.0"
] | null | null | null |
import json
import os
import re
from yandeley.models.annotations import Annotation
from yandeley.response import SessionResponseObject
class File(SessionResponseObject):
"""
A file attached to a document.
.. attribute:: id
.. attribute:: size
.. attribute:: file_name
.. attribute:: mime_type
.. attribute:: filehash
.. attribute:: download_url
"""
content_type = 'application/vnd.mendeley-file.1+json'
filename_regex = re.compile('filename="(\S+)"')
@property
def download_url(self):
"""
the URL at which the file can be downloaded. This is only valid for a short time, so should not be cached.
"""
file_url = '/files/%s' % self.id
rsp = self.session.get(file_url, allow_redirects=False)
return rsp.headers['location']
def document(self, view=None):
"""
:param view: document view to return.
:return: a :class:`UserDocument <yandeley.models.documents.UserDocument>` or
:class:`CatalogDocument <yandeley.models.catalog.CatalogDocument>`, depending on which the document is
attached to.
"""
if 'document_id' in self.json:
return self.session.documents.get_lazy(self.json['document_id'], view=view)
elif 'catalog_id' in self.json:
return self.session.catalog.get_lazy(self.json['catalog_id'], view=view)
else:
return None
def download(self, directory):
"""
Downloads the file.
:param directory: the directory to download the file to. This must exist.
:return: the path to the downloaded file.
"""
rsp = self.session.get('/files/%s' % self.id, stream=True)
filename = self.filename_regex.search(rsp.headers['content-disposition']).group(1)
path = os.path.join(directory, filename)
with open(path, 'wb') as f:
for block in rsp.iter_content(1024):
if not block:
break
f.write(block)
return path
def delete(self):
"""
Deletes the file.
"""
self.session.delete('/files/%s' % self.id)
def add_sticky_note(self, text, x_position, y_position, page_number):
"""
Adds a sticky note to this file.
:param text: the text of the sticky_note.
:param x_position: the x position on the file of the sticky_note.
:param y_position: the y position on the file of the stick_note.
:param page_number: the page_number on the file of the sticky_note.
:return: a :class:`Annotation <yandeley.models.annotations.Annotation>`.
"""
position = {'x': x_position, 'y': y_position}
bounding_box = {'top_left': position, 'bottom_right': position, 'page': page_number}
annotation = {
'document_id': self.document().id,
'text': text,
'filehash': self.filehash,
'positions': [bounding_box]
}
rsp = self.session.post('/annotations/', data=json.dumps(annotation), headers={
'Accept': Annotation.content_type,
'Content-Type': Annotation.content_type
})
return Annotation(self.session, rsp.json())
def add_highlight(self, bounding_boxes, color):
"""
Adds a highlight to this file.
:param bounding_boxes: the area the highlight covers on the file.
:param color: the color of the highlight.
:return: a :class:`Annotation <yandeley.models.annotations.Annotation>`.
"""
annotation = {
'document_id': self.document().id,
'filehash': self.filehash,
'positions': [box.json for box in bounding_boxes],
'color': color.json
}
rsp = self.session.post('/annotations/', data=json.dumps(annotation), headers={
'Accept': Annotation.content_type,
'Content-Type': Annotation.content_type
})
return Annotation(self.session, rsp.json())
@classmethod
def fields(cls):
return ['id', 'size', 'file_name', 'mime_type', 'filehash']
| 33.97561
| 119
| 0.603494
|
import json
import os
import re
from yandeley.models.annotations import Annotation
from yandeley.response import SessionResponseObject
class File(SessionResponseObject):
content_type = 'application/vnd.mendeley-file.1+json'
filename_regex = re.compile('filename="(\S+)"')
@property
def download_url(self):
file_url = '/files/%s' % self.id
rsp = self.session.get(file_url, allow_redirects=False)
return rsp.headers['location']
def document(self, view=None):
if 'document_id' in self.json:
return self.session.documents.get_lazy(self.json['document_id'], view=view)
elif 'catalog_id' in self.json:
return self.session.catalog.get_lazy(self.json['catalog_id'], view=view)
else:
return None
def download(self, directory):
rsp = self.session.get('/files/%s' % self.id, stream=True)
filename = self.filename_regex.search(rsp.headers['content-disposition']).group(1)
path = os.path.join(directory, filename)
with open(path, 'wb') as f:
for block in rsp.iter_content(1024):
if not block:
break
f.write(block)
return path
def delete(self):
self.session.delete('/files/%s' % self.id)
def add_sticky_note(self, text, x_position, y_position, page_number):
position = {'x': x_position, 'y': y_position}
bounding_box = {'top_left': position, 'bottom_right': position, 'page': page_number}
annotation = {
'document_id': self.document().id,
'text': text,
'filehash': self.filehash,
'positions': [bounding_box]
}
rsp = self.session.post('/annotations/', data=json.dumps(annotation), headers={
'Accept': Annotation.content_type,
'Content-Type': Annotation.content_type
})
return Annotation(self.session, rsp.json())
def add_highlight(self, bounding_boxes, color):
annotation = {
'document_id': self.document().id,
'filehash': self.filehash,
'positions': [box.json for box in bounding_boxes],
'color': color.json
}
rsp = self.session.post('/annotations/', data=json.dumps(annotation), headers={
'Accept': Annotation.content_type,
'Content-Type': Annotation.content_type
})
return Annotation(self.session, rsp.json())
@classmethod
def fields(cls):
return ['id', 'size', 'file_name', 'mime_type', 'filehash']
| true
| true
|
7905695831cb68228214abcd4e9cbe043ee10984
| 532
|
py
|
Python
|
Day_55/sandbox.py
|
ecanro/100DaysOfCode_Python
|
a86ebe5a793fd4743e0de87454ba76925efdd23d
|
[
"MIT"
] | null | null | null |
Day_55/sandbox.py
|
ecanro/100DaysOfCode_Python
|
a86ebe5a793fd4743e0de87454ba76925efdd23d
|
[
"MIT"
] | null | null | null |
Day_55/sandbox.py
|
ecanro/100DaysOfCode_Python
|
a86ebe5a793fd4743e0de87454ba76925efdd23d
|
[
"MIT"
] | null | null | null |
## ********Day 55 Start**********
## Advanced Python Decorator Functions
class User:
def __init__(self, name):
self.name = name
self.is_logged_in = False
def is_authenticated_decorator(function):
def wrapper(*args, **kwargs):
if args[0].is_logged_in == True:
function(args[0])
return wrapper
@is_authenticated_decorator
def create_blog_post(user):
print(f"This is {user.name}'s new blog post.")
new_user = User("Edgar")
new_user.is_logged_in = True
create_blog_post(new_user)
| 24.181818
| 50
| 0.667293
|
e
self.is_logged_in = False
def is_authenticated_decorator(function):
def wrapper(*args, **kwargs):
if args[0].is_logged_in == True:
function(args[0])
return wrapper
@is_authenticated_decorator
def create_blog_post(user):
print(f"This is {user.name}'s new blog post.")
new_user = User("Edgar")
new_user.is_logged_in = True
create_blog_post(new_user)
| true
| true
|
790569d6482d7e5566b735e8104a8a049aa90f87
| 585
|
py
|
Python
|
elasticsearch/elasticsearch.py
|
webvul/Allscanner
|
a1a4dc9369e28f5be2dffdb6a789147da9e44dc6
|
[
"MIT"
] | 1
|
2020-01-08T22:43:27.000Z
|
2020-01-08T22:43:27.000Z
|
elasticsearch/elasticsearch.py
|
webvul/Allscanner
|
a1a4dc9369e28f5be2dffdb6a789147da9e44dc6
|
[
"MIT"
] | null | null | null |
elasticsearch/elasticsearch.py
|
webvul/Allscanner
|
a1a4dc9369e28f5be2dffdb6a789147da9e44dc6
|
[
"MIT"
] | 1
|
2020-09-15T01:07:07.000Z
|
2020-09-15T01:07:07.000Z
|
#coding:utf-8
import urllib2
import sys,socket
def elasticburp(ip,port):
addr = (ip,int(port))
url = "http://" + ip + ":" + str(port) + "/_cat"
sock_9200 = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
try:
sock_9200.settimeout(1)
sock_9200.connect(addr)
print '%s 9200 open!'
try:
data = urllib2.urlopen(url).read()
if '/_cat/master' in data:
sys.stdout.write('%s:%d is ElasticSearch Unauthorized\n' % (ip, port))
except:
pass
except:
sock_9200.close()
| 20.892857
| 86
| 0.555556
|
import urllib2
import sys,socket
def elasticburp(ip,port):
addr = (ip,int(port))
url = "http://" + ip + ":" + str(port) + "/_cat"
sock_9200 = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
try:
sock_9200.settimeout(1)
sock_9200.connect(addr)
print '%s 9200 open!'
try:
data = urllib2.urlopen(url).read()
if '/_cat/master' in data:
sys.stdout.write('%s:%d is ElasticSearch Unauthorized\n' % (ip, port))
except:
pass
except:
sock_9200.close()
| false
| true
|
79056a0d4c4e25f66e8adcf62667faf578d40c78
| 12,288
|
py
|
Python
|
reporting/base.py
|
flagshipenterprise/django-prickly-reports
|
14375d2e24c2257c631c013432a92c5aa19f5aa9
|
[
"MIT"
] | 1
|
2015-02-03T19:42:23.000Z
|
2015-02-03T19:42:23.000Z
|
reporting/base.py
|
flagshipenterprise/django-prickly-reports
|
14375d2e24c2257c631c013432a92c5aa19f5aa9
|
[
"MIT"
] | null | null | null |
reporting/base.py
|
flagshipenterprise/django-prickly-reports
|
14375d2e24c2257c631c013432a92c5aa19f5aa9
|
[
"MIT"
] | null | null | null |
from django import forms
from django.http import QueryDict
from django.forms.formsets import formset_factory
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
from datetime import date
import itertools
import re
from fields import SubmitButtonField, SubmitButtonWidget
class Filter(object):
__metaclass__ = ABCMeta
_order = itertools.count()
form_field_class = None
form_field_widget = None
filter_state_names = ['%s', ]
filter_field = ''
def __init__(self,
default=None,
required=False,
label=None,
form_field_class=None,
form_field_widget=None,
filter_set=False,
filter_field=None):
self.default = default
self.required = required
self.label = label
self.form_field_class = form_field_class or self.form_field_class
self.form_field_widget = form_field_widget or self.form_field_widget
self.order = Filter._order.next()
self.filter_set = filter_set
self.filter_field = filter_field or self.filter_field
def get_form_field(self):
"""
Returns an instance of the form field class, used for constructing the
filter form for a report.
"""
return self.form_field_class(required=(self.required and not self.filter_set),
widget=self.form_field_widget,
label=self.label)
def get_form_class(self, name, index=0, postfix="Form"):
form_class_name = "%s%s" % (type(self).__name__, postfix)
form_class_dict = {name: self.get_form_field()}
return type(form_class_name, (forms.Form,), form_class_dict)
def clean_data(self, name, raw_data):
form = self.get_form_class(name)(data=raw_data)
return form.cleaned_data[name] if form.is_valid() else None
def get_data(self, name, data):
"""
To get the data for this filter given the filter sets, we instantiate
the form with the data, validate it, and return the cleaned data.
"""
cleaned_data = self.clean_data(name, data)
return cleaned_data if cleaned_data else self.default
def get_data_set(self, name, data):
"""
This horribly ugly little function is in charge of returning a list of
data entries, given filter states, for a filter set. It does the same
thing as get_data, but for every item in a filter set, returning the
results in a list.
"""
# If we're not really a set, just return a 1-element list with the data
if not self.filter_set:
return [self.get_data(name, data)]
# Get the deletion field name and index
delete = data.get('delete', None)
delete_index = None
if delete:
n, i = delete.split('.')
if n == name:
delete_index = int(i) + 1
# Zip together all the lists of filter state values. This gives us a
# list of tuples of filter state fields. Ugly but necessary in case we
# have a filter which generates a MultiValueField (aka,
# NumericComparisonFilter). Exclude elements which have been deleted.
filter_state_names = self.filter_state_names[:]
filter_state_list = [data.getlist(state_name % name, []) for state_name in filter_state_names]
filter_states = zip(*filter_state_list)
# Loop over every filter state tuple, converting it to a mini filter-
# -state dict. Clean it, and store the cleaned data in a list
data_set = []
for i in range(len(filter_states)):
# If this index is getting deleted, don't add it
if i == delete_index:
continue
# Get the dict of states for this filter set element
state = filter_states[i]
filter_dict = {}
for i in range(0, len(filter_state_names)):
filter_dict.update({filter_state_names[i] % name: state[i]})
# Clean and validate the set instance data. If it validates, store
# it in the state list.
cleaned_data = self.clean_data(name, filter_dict)
if cleaned_data:
data_elem = cleaned_data
data_set.append(data_elem)
# Return the list of states
return data_set
def get_filter_state_from_data(self, name, data):
"""
Another nasty little bit. This one (if not overridden) takes some
data and encodes it, using the filter state names, to be a valid
filter_state which would return the original data if passed to get_data
TODO: Make sure this actually works for stuff other than
NumericComparisonFilter
TODO: Add good comments :P
"""
if len(self.filter_state_names) > 1:
if not (hasattr(data, '__iter__') and len(self.filter_state_names) == len(data)):
raise Exception()
state = {}
for i in range(0, len(data)):
state.update({self.filter_state_names[i] % name: data[i]})
return state
else:
return {self.filter_state_names[0] % name: data}
def apply_filter(self, queryset, data):
filterspec = {self.filter_field: data}
return queryset.filter(**filterspec)
def apply_filter_set(self, queryset, data_set):
# Apply the filter to the queryset based on each entry in the data set
for data in data_set:
queryset = self.apply_filter(queryset, data)
return queryset
class Report(object):
__metaclass__ = ABCMeta
headers = None
footers = None
title = None
def __init__(self, filter_states={}):
"""
filter_state will be a querydict with keys corresponding to the names
of the filter members on this report object.
"""
if isinstance(filter_states, QueryDict):
self.filter_states = filter_states
else:
self.filter_states = QueryDict('', mutable=True)
self.filter_states.update(filter_states)
self.title = self.title or self.get_title_from_class_name()
def __getattribute__(self, name):
"""
When getting a filter attribute, looks for the corresponding filter
state and returns that instead of the filter object. If none is found,
looks for the default value on the filter object. If that's not found
either, then returns none.
"""
# Perform the normal __getattribute__ call
attr = object.__getattribute__(self, name)
# If it's a filter attribute...
if issubclass(type(attr), Filter):
# If we have a filter state for this filter, convert it to the type
# of data for this filter.
if not attr.filter_set:
return attr.get_data(name, self.filter_states)
else:
return attr.get_data_set(name, self.filter_states)
# This isn't a filter, just return the attribute
return attr
def get_title_from_class_name(self):
"""
Split the class name into words, delimited by capitals.
"""
words = re.split(r'([A-Z])', self.__class__.__name__)[1:]
words = [words[i] + words[i+1] for i in range(0, len(words) - 1, 2)]
return ' '.join(words)
def get_filter(self, name):
"""
Perform the normal __getattribute__ call,
and return it if it's a filter
"""
attr = object.__getattribute__(self, name)
return attr if issubclass(type(attr), Filter) else None
def get_filters(self):
"""
Return a list of all the names and attributes on this report instance
which have a base class of Filter.
"""
filters = []
for name in dir(self):
attr = object.__getattribute__(self, name)
if issubclass(type(attr), Filter):
filters.append((name, attr))
return sorted(filters, key=lambda attr: attr[1].order)
def get_filter_forms(self):
for name, attr in self.get_filters():
# If it is a filter set, loop through the existing list of data
# in the filter states, if there are any. For each of these, make a
# sub-form which includes a "delete" checkbox
if attr.filter_set:
# Get the new-set element form
form = attr.get_form_class(name)()
form.name = name
yield form
# Yield all the existing form elements
data_set = attr.get_data_set(name, self.filter_states)
for i in range(len(data_set)):
data = data_set[i]
state = attr.get_filter_state_from_data(name, data)
# Generate and yield a form containing the filter's field,
# as well as a deleting submit field to mark deletions
form = attr.get_form_class(
name=name,
postfix="FormSetElem"
)(data=state)
form.delete = {
'filter': name,
'index': i}
form.name = name
yield form
# If it ain't a filter set, just get it's form class and render it
# with the filter state data
else:
form = attr.get_form_class(name)(data=self.filter_states)
form.name = name
yield form
def get_title(self):
return self.title
def get_headers(self):
return self.headers
def get_footers(self):
return self.footers
def apply_filter(self, queryset, name):
f = self.get_filter(name)
# If it's not a filterset, just get the regular data and apply it
if not f.filter_set:
data = f.get_data(name, self.filter_states)
if data:
return f.apply_filter(queryset, data)
# Otherwise, get the full data set and apply it
else:
data_set = f.get_data_set(name, self.filter_states)
if len(data_set) > 0:
return f.apply_filter_set(queryset, data_set)
# If we weren't able to apply the filter, return the raw queryset
return queryset
def apply_filters(self, queryset, names=None, excludes=[]):
for name, f in self.get_filters():
# Only apply this filter if it's selected
if name in excludes or (names and name not in names):
continue
# Apply this filter
queryset = self.apply_filter(queryset, name)
# Return the filtered queryset
return queryset
def get_queryset(self):
return []
def get_row(self, item):
"""
This can return a list for simple data that doesn't need special
template rendering, or a dict for more complex data where individual
fields will need to be rendered specially.
"""
return []
def get_rows(self):
rows = []
for item in self.get_queryset():
row = self.get_row(item)
if row:
rows.append(row)
return rows
def get_count(self):
return self.get_queryset().count()
def get_table(self):
return [[cell for cell in row] for row in self.get_rows()]
@staticmethod
def encode_filter_states(data):
"""
Converts a normal POST querydict to the filterstate data,
to be stored in the url
"""
#data = QueryDict(data.urlencode(), mutable=True)
return data
@staticmethod
def decode_filter_states(data):
"""
Opposite of encode_filter_states
"""
return data
class Row(object):
def __init__(self, list, attrs=None):
self.list = list
if attrs:
for name, value in attrs.iteritems():
setattr(self, name, value)
def __iter__(self):
return self.list.__iter__()
| 35.008547
| 102
| 0.595296
|
from django import forms
from django.http import QueryDict
from django.forms.formsets import formset_factory
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
from datetime import date
import itertools
import re
from fields import SubmitButtonField, SubmitButtonWidget
class Filter(object):
__metaclass__ = ABCMeta
_order = itertools.count()
form_field_class = None
form_field_widget = None
filter_state_names = ['%s', ]
filter_field = ''
def __init__(self,
default=None,
required=False,
label=None,
form_field_class=None,
form_field_widget=None,
filter_set=False,
filter_field=None):
self.default = default
self.required = required
self.label = label
self.form_field_class = form_field_class or self.form_field_class
self.form_field_widget = form_field_widget or self.form_field_widget
self.order = Filter._order.next()
self.filter_set = filter_set
self.filter_field = filter_field or self.filter_field
def get_form_field(self):
return self.form_field_class(required=(self.required and not self.filter_set),
widget=self.form_field_widget,
label=self.label)
def get_form_class(self, name, index=0, postfix="Form"):
form_class_name = "%s%s" % (type(self).__name__, postfix)
form_class_dict = {name: self.get_form_field()}
return type(form_class_name, (forms.Form,), form_class_dict)
def clean_data(self, name, raw_data):
form = self.get_form_class(name)(data=raw_data)
return form.cleaned_data[name] if form.is_valid() else None
def get_data(self, name, data):
cleaned_data = self.clean_data(name, data)
return cleaned_data if cleaned_data else self.default
def get_data_set(self, name, data):
if not self.filter_set:
return [self.get_data(name, data)]
# Get the deletion field name and index
delete = data.get('delete', None)
delete_index = None
if delete:
n, i = delete.split('.')
if n == name:
delete_index = int(i) + 1
# Zip together all the lists of filter state values. This gives us a
# list of tuples of filter state fields. Ugly but necessary in case we
# have a filter which generates a MultiValueField (aka,
# NumericComparisonFilter). Exclude elements which have been deleted.
filter_state_names = self.filter_state_names[:]
filter_state_list = [data.getlist(state_name % name, []) for state_name in filter_state_names]
filter_states = zip(*filter_state_list)
# Loop over every filter state tuple, converting it to a mini filter-
# -state dict. Clean it, and store the cleaned data in a list
data_set = []
for i in range(len(filter_states)):
# If this index is getting deleted, don't add it
if i == delete_index:
continue
state = filter_states[i]
filter_dict = {}
for i in range(0, len(filter_state_names)):
filter_dict.update({filter_state_names[i] % name: state[i]})
cleaned_data = self.clean_data(name, filter_dict)
if cleaned_data:
data_elem = cleaned_data
data_set.append(data_elem)
return data_set
def get_filter_state_from_data(self, name, data):
if len(self.filter_state_names) > 1:
if not (hasattr(data, '__iter__') and len(self.filter_state_names) == len(data)):
raise Exception()
state = {}
for i in range(0, len(data)):
state.update({self.filter_state_names[i] % name: data[i]})
return state
else:
return {self.filter_state_names[0] % name: data}
def apply_filter(self, queryset, data):
filterspec = {self.filter_field: data}
return queryset.filter(**filterspec)
def apply_filter_set(self, queryset, data_set):
for data in data_set:
queryset = self.apply_filter(queryset, data)
return queryset
class Report(object):
__metaclass__ = ABCMeta
headers = None
footers = None
title = None
def __init__(self, filter_states={}):
if isinstance(filter_states, QueryDict):
self.filter_states = filter_states
else:
self.filter_states = QueryDict('', mutable=True)
self.filter_states.update(filter_states)
self.title = self.title or self.get_title_from_class_name()
def __getattribute__(self, name):
attr = object.__getattribute__(self, name)
if issubclass(type(attr), Filter):
# If we have a filter state for this filter, convert it to the type
# of data for this filter.
if not attr.filter_set:
return attr.get_data(name, self.filter_states)
else:
return attr.get_data_set(name, self.filter_states)
# This isn't a filter, just return the attribute
return attr
def get_title_from_class_name(self):
words = re.split(r'([A-Z])', self.__class__.__name__)[1:]
words = [words[i] + words[i+1] for i in range(0, len(words) - 1, 2)]
return ' '.join(words)
def get_filter(self, name):
attr = object.__getattribute__(self, name)
return attr if issubclass(type(attr), Filter) else None
def get_filters(self):
filters = []
for name in dir(self):
attr = object.__getattribute__(self, name)
if issubclass(type(attr), Filter):
filters.append((name, attr))
return sorted(filters, key=lambda attr: attr[1].order)
def get_filter_forms(self):
for name, attr in self.get_filters():
if attr.filter_set:
form = attr.get_form_class(name)()
form.name = name
yield form
data_set = attr.get_data_set(name, self.filter_states)
for i in range(len(data_set)):
data = data_set[i]
state = attr.get_filter_state_from_data(name, data)
# as well as a deleting submit field to mark deletions
form = attr.get_form_class(
name=name,
postfix="FormSetElem"
)(data=state)
form.delete = {
'filter': name,
'index': i}
form.name = name
yield form
# If it ain't a filter set, just get it's form class and render it
# with the filter state data
else:
form = attr.get_form_class(name)(data=self.filter_states)
form.name = name
yield form
def get_title(self):
return self.title
def get_headers(self):
return self.headers
def get_footers(self):
return self.footers
def apply_filter(self, queryset, name):
f = self.get_filter(name)
# If it's not a filterset, just get the regular data and apply it
if not f.filter_set:
data = f.get_data(name, self.filter_states)
if data:
return f.apply_filter(queryset, data)
else:
data_set = f.get_data_set(name, self.filter_states)
if len(data_set) > 0:
return f.apply_filter_set(queryset, data_set)
return queryset
def apply_filters(self, queryset, names=None, excludes=[]):
for name, f in self.get_filters():
# Only apply this filter if it's selected
if name in excludes or (names and name not in names):
continue
queryset = self.apply_filter(queryset, name)
return queryset
def get_queryset(self):
return []
def get_row(self, item):
return []
def get_rows(self):
rows = []
for item in self.get_queryset():
row = self.get_row(item)
if row:
rows.append(row)
return rows
def get_count(self):
return self.get_queryset().count()
def get_table(self):
return [[cell for cell in row] for row in self.get_rows()]
@staticmethod
def encode_filter_states(data):
return data
@staticmethod
def decode_filter_states(data):
return data
class Row(object):
def __init__(self, list, attrs=None):
self.list = list
if attrs:
for name, value in attrs.iteritems():
setattr(self, name, value)
def __iter__(self):
return self.list.__iter__()
| true
| true
|
79056a95587e00fccae95091e487e9684f3db15e
| 10,292
|
py
|
Python
|
grr/lib/rdfvalues/paths.py
|
panhania/grr
|
fe16a7311a528e31fe0e315a880e98273b8df960
|
[
"Apache-2.0"
] | null | null | null |
grr/lib/rdfvalues/paths.py
|
panhania/grr
|
fe16a7311a528e31fe0e315a880e98273b8df960
|
[
"Apache-2.0"
] | null | null | null |
grr/lib/rdfvalues/paths.py
|
panhania/grr
|
fe16a7311a528e31fe0e315a880e98273b8df960
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""Pathspecs are methods of specifying the path on the client.
The GRR client has a number of drivers to virtualize access to different objects
to create a Virtual File System (VFS) abstraction. These are called 'VFS
Handlers' and they provide typical file-like operations (e.g. read, seek, tell
and stat). It is possible to recursively apply different drivers in the correct
order to arrive at a certain file like object. In order to specify how drivers
should be applied we use 'Path Specifications' or pathspec.
Each VFS handler is constructed from a previous handler and a pathspec. The
pathspec is just a collection of arguments which make sense to the specific VFS
handler. The type of the handler is carried by the pathtype parameter.
On the server the PathSpec is represented as a PathSpec object, and stored
as an attribute of the AFF4 object. This module defines this abstraction.
"""
import itertools
import posixpath
import re
from grr.lib import rdfvalue
from grr.lib import utils
from grr.lib.rdfvalues import standard as rdf_standard
from grr.lib.rdfvalues import structs as rdf_structs
from grr.proto import jobs_pb2
from grr.server import artifact_utils
INTERPOLATED_REGEX = re.compile(r"%%([^%]+?)%%")
# Grouping pattern: e.g. {test.exe,foo.doc,bar.txt}
GROUPING_PATTERN = re.compile("{([^}]+,[^}]+)}")
class PathSpec(rdf_structs.RDFProtoStruct):
"""A path specification.
The pathspec protobuf is a recursive protobuf which contains components. This
class makes it easier to manipulate these structures by providing useful
helpers.
"""
protobuf = jobs_pb2.PathSpec
rdf_deps = [
rdfvalue.ByteSize,
"PathSpec", # TODO(user): recursive definition.
]
def CopyConstructor(self, other):
# pylint: disable=protected-access
self.SetRawData(other._CopyRawData())
# pylint: enable=protected-access
self.age = other.age
def __len__(self):
"""Return the total number of path components."""
i = -1
for i, _ in enumerate(self):
pass
return i + 1
def __getitem__(self, item):
for i, element in enumerate(self):
if i == item:
return element
raise IndexError("Pathspec index (%s) out of range" % item)
def __iter__(self):
"""Only iterate over all components from the current pointer."""
element = self
while element.HasField("pathtype"):
yield element
if element.HasField("nested_path"):
element = element.nested_path
else:
break
def Insert(self, index, rdfpathspec=None, **kwarg):
"""Insert a single component at index."""
if rdfpathspec is None:
rdfpathspec = self.__class__(**kwarg)
if index == 0:
# Copy ourselves to a temp copy.
nested_proto = self.__class__()
nested_proto.SetRawData(self.GetRawData())
# Replace ourselves with the new object.
self.SetRawData(rdfpathspec.GetRawData())
# Append the temp copy to the end.
self.last.nested_path = nested_proto
else:
previous = self[index - 1]
rdfpathspec.last.nested_path = previous.nested_path
previous.nested_path = rdfpathspec
def Append(self, component=None, **kwarg):
"""Append a new pathspec component to this pathspec."""
if component is None:
component = self.__class__(**kwarg)
if self.HasField("pathtype"):
self.last.nested_path = component
else:
for k, v in kwarg.items():
setattr(self, k, v)
self.SetRawData(component.GetRawData())
return self
def CollapsePath(self):
return utils.JoinPath(*[x.path for x in self])
def Pop(self, index=0):
"""Removes and returns the pathspec at the specified index."""
if index < 0:
index += len(self)
if index == 0:
result = self.__class__()
result.SetRawData(self.GetRawData())
self.SetRawData(self.nested_path.GetRawData())
else:
# Get the raw protobufs for the previous member.
previous = self[index - 1]
result = previous.nested_path
# Manipulate the previous members protobuf to patch the next component in.
previous.nested_path = result.nested_path
result.nested_path = None
return result
@property
def first(self):
return self
@property
def last(self):
if self.HasField("pathtype") and self.pathtype != self.PathType.UNSET:
return list(self)[-1]
return self
def Dirname(self):
"""Get a new copied object with only the directory path."""
result = self.Copy()
while 1:
last_directory = posixpath.dirname(result.last.path)
if last_directory != "/" or len(result) <= 1:
result.last.path = last_directory
# Make sure to clear the inode information.
result.last.inode = None
break
result.Pop(-1)
return result
def Basename(self):
for component in reversed(self):
basename = posixpath.basename(component.path)
if basename:
return basename
return ""
def Validate(self):
if not self.HasField("pathtype") or self.pathtype == self.PathType.UNSET:
raise ValueError("No path type set in PathSpec.")
AFF4_PREFIXES = {
0: "/fs/os", # PathSpec.PathType.OS
1: "/fs/tsk", # PathSpec.PathType.TSK
2: "/registry", # PathSpec.PathType.REGISTRY
3: "/devices/memory", # PathSpec.PathType.MEMORY
4: "/temp", # PathSpec.PathType.TMPFILE
}
def AFF4Path(self, client_urn):
"""Returns the AFF4 URN this pathspec will be stored under.
Args:
client_urn: A ClientURN.
Returns:
A urn that corresponds to this pathspec.
Raises:
ValueError: If pathspec is not of the correct type.
"""
# If the first level is OS and the second level is TSK its probably a mount
# point resolution. We map it into the tsk branch. For example if we get:
# path: \\\\.\\Volume{1234}\\
# pathtype: OS
# mount_point: /c:/
# nested_path {
# path: /windows/
# pathtype: TSK
# }
# We map this to aff4://client_id/fs/tsk/\\\\.\\Volume{1234}\\/windows/
if not self.HasField("pathtype"):
raise ValueError("Can't determine AFF4 path without a valid pathtype.")
first_component = self[0]
dev = first_component.path
if first_component.HasField("offset"):
# We divide here just to get prettier numbers in the GUI
dev += ":" + str(first_component.offset / 512)
if (len(self) > 1 and first_component.pathtype == PathSpec.PathType.OS and
self[1].pathtype == PathSpec.PathType.TSK):
result = [self.AFF4_PREFIXES[PathSpec.PathType.TSK], dev]
# Skip the top level pathspec.
start = 1
else:
# For now just map the top level prefix based on the first pathtype
result = [self.AFF4_PREFIXES[first_component.pathtype]]
start = 0
for p in self[start]:
component = p.path
# The following encode different pathspec properties into the AFF4 path in
# such a way that unique files on the client are mapped to unique URNs in
# the AFF4 space. Note that this transformation does not need to be
# reversible since we always use the PathSpec when accessing files on the
# client.
if p.HasField("offset"):
component += ":" + str(p.offset / 512)
# Support ADS names.
if p.HasField("stream_name"):
component += ":" + p.stream_name
result.append(component)
return client_urn.Add("/".join(result))
class GlobExpression(rdfvalue.RDFString):
"""A glob expression for a client path.
A glob expression represents a set of regular expressions which match files on
the client. The Glob expression supports the following expansions:
1) Client attribute expansions are surrounded with %% characters. They will be
expanded from the client AFF4 object.
2) Groupings are collections of alternates. e.g. {foo.exe,bar.sys}
3) Wild cards like * and ?
"""
context_help_url = "investigating-with-grr/flows/specifying-file-paths.html"
RECURSION_REGEX = re.compile(r"\*\*(\d*)")
def Validate(self):
"""GlobExpression is valid."""
if len(self.RECURSION_REGEX.findall(self._value)) > 1:
raise ValueError("Only one ** is permitted per path: %s." % self._value)
def Interpolate(self, client=None):
kb = client.Get(client.Schema.KNOWLEDGE_BASE)
patterns = artifact_utils.InterpolateKbAttributes(self._value, kb)
for pattern in patterns:
# Normalize the component path (this allows us to resolve ../
# sequences).
pattern = utils.NormalizePath(pattern.replace("\\", "/"))
for pattern in self.InterpolateGrouping(pattern):
yield pattern
def InterpolateGrouping(self, pattern):
"""Interpolate inline globbing groups."""
components = []
offset = 0
for match in GROUPING_PATTERN.finditer(pattern):
components.append([pattern[offset:match.start()]])
# Expand the attribute into the set of possibilities:
alternatives = match.group(1).split(",")
components.append(set(alternatives))
offset = match.end()
components.append([pattern[offset:]])
# Now calculate the cartesian products of all these sets to form all
# strings.
for vector in itertools.product(*components):
yield u"".join(vector)
def _ReplaceRegExGrouping(self, grouping):
alternatives = grouping.group(1).split(",")
return "(" + "|".join(re.escape(s) for s in alternatives) + ")"
def _ReplaceRegExPart(self, part):
if part == "**/":
return "(?:.*\\/)?"
elif part == "*":
return "[^\\/]*"
elif part == "?":
return "[^\\/]"
elif GROUPING_PATTERN.match(part):
return GROUPING_PATTERN.sub(self._ReplaceRegExGrouping, part)
else:
return re.escape(part)
REGEX_SPLIT_PATTERN = re.compile(
"(" + "|".join(["{[^}]+,[^}]+}", "\\?", "\\*\\*\\/?", "\\*"]) + ")")
def AsRegEx(self):
"""Return the current glob as a simple regex.
Note: No interpolation is performed.
Returns:
A RegularExpression() object.
"""
parts = self.__class__.REGEX_SPLIT_PATTERN.split(self._value)
result = "".join(self._ReplaceRegExPart(p) for p in parts)
return rdf_standard.RegularExpression("(?i)\\A%s\\Z" % result)
| 30.449704
| 80
| 0.667314
|
import itertools
import posixpath
import re
from grr.lib import rdfvalue
from grr.lib import utils
from grr.lib.rdfvalues import standard as rdf_standard
from grr.lib.rdfvalues import structs as rdf_structs
from grr.proto import jobs_pb2
from grr.server import artifact_utils
INTERPOLATED_REGEX = re.compile(r"%%([^%]+?)%%")
GROUPING_PATTERN = re.compile("{([^}]+,[^}]+)}")
class PathSpec(rdf_structs.RDFProtoStruct):
protobuf = jobs_pb2.PathSpec
rdf_deps = [
rdfvalue.ByteSize,
"PathSpec",
]
def CopyConstructor(self, other):
self.SetRawData(other._CopyRawData())
self.age = other.age
def __len__(self):
i = -1
for i, _ in enumerate(self):
pass
return i + 1
def __getitem__(self, item):
for i, element in enumerate(self):
if i == item:
return element
raise IndexError("Pathspec index (%s) out of range" % item)
def __iter__(self):
element = self
while element.HasField("pathtype"):
yield element
if element.HasField("nested_path"):
element = element.nested_path
else:
break
def Insert(self, index, rdfpathspec=None, **kwarg):
if rdfpathspec is None:
rdfpathspec = self.__class__(**kwarg)
if index == 0:
nested_proto = self.__class__()
nested_proto.SetRawData(self.GetRawData())
self.SetRawData(rdfpathspec.GetRawData())
self.last.nested_path = nested_proto
else:
previous = self[index - 1]
rdfpathspec.last.nested_path = previous.nested_path
previous.nested_path = rdfpathspec
def Append(self, component=None, **kwarg):
if component is None:
component = self.__class__(**kwarg)
if self.HasField("pathtype"):
self.last.nested_path = component
else:
for k, v in kwarg.items():
setattr(self, k, v)
self.SetRawData(component.GetRawData())
return self
def CollapsePath(self):
return utils.JoinPath(*[x.path for x in self])
def Pop(self, index=0):
if index < 0:
index += len(self)
if index == 0:
result = self.__class__()
result.SetRawData(self.GetRawData())
self.SetRawData(self.nested_path.GetRawData())
else:
previous = self[index - 1]
result = previous.nested_path
previous.nested_path = result.nested_path
result.nested_path = None
return result
@property
def first(self):
return self
@property
def last(self):
if self.HasField("pathtype") and self.pathtype != self.PathType.UNSET:
return list(self)[-1]
return self
def Dirname(self):
result = self.Copy()
while 1:
last_directory = posixpath.dirname(result.last.path)
if last_directory != "/" or len(result) <= 1:
result.last.path = last_directory
result.last.inode = None
break
result.Pop(-1)
return result
def Basename(self):
for component in reversed(self):
basename = posixpath.basename(component.path)
if basename:
return basename
return ""
def Validate(self):
if not self.HasField("pathtype") or self.pathtype == self.PathType.UNSET:
raise ValueError("No path type set in PathSpec.")
AFF4_PREFIXES = {
0: "/fs/os",
1: "/fs/tsk",
2: "/registry",
3: "/devices/memory",
4: "/temp",
}
def AFF4Path(self, client_urn):
if not self.HasField("pathtype"):
raise ValueError("Can't determine AFF4 path without a valid pathtype.")
first_component = self[0]
dev = first_component.path
if first_component.HasField("offset"):
# We divide here just to get prettier numbers in the GUI
dev += ":" + str(first_component.offset / 512)
if (len(self) > 1 and first_component.pathtype == PathSpec.PathType.OS and
self[1].pathtype == PathSpec.PathType.TSK):
result = [self.AFF4_PREFIXES[PathSpec.PathType.TSK], dev]
# Skip the top level pathspec.
start = 1
else:
# For now just map the top level prefix based on the first pathtype
result = [self.AFF4_PREFIXES[first_component.pathtype]]
start = 0
for p in self[start]:
component = p.path
# The following encode different pathspec properties into the AFF4 path in
# such a way that unique files on the client are mapped to unique URNs in
# the AFF4 space. Note that this transformation does not need to be
# reversible since we always use the PathSpec when accessing files on the
# client.
if p.HasField("offset"):
component += ":" + str(p.offset / 512)
# Support ADS names.
if p.HasField("stream_name"):
component += ":" + p.stream_name
result.append(component)
return client_urn.Add("/".join(result))
class GlobExpression(rdfvalue.RDFString):
context_help_url = "investigating-with-grr/flows/specifying-file-paths.html"
RECURSION_REGEX = re.compile(r"\*\*(\d*)")
def Validate(self):
if len(self.RECURSION_REGEX.findall(self._value)) > 1:
raise ValueError("Only one ** is permitted per path: %s." % self._value)
def Interpolate(self, client=None):
kb = client.Get(client.Schema.KNOWLEDGE_BASE)
patterns = artifact_utils.InterpolateKbAttributes(self._value, kb)
for pattern in patterns:
# Normalize the component path (this allows us to resolve ../
# sequences).
pattern = utils.NormalizePath(pattern.replace("\\", "/"))
for pattern in self.InterpolateGrouping(pattern):
yield pattern
def InterpolateGrouping(self, pattern):
components = []
offset = 0
for match in GROUPING_PATTERN.finditer(pattern):
components.append([pattern[offset:match.start()]])
# Expand the attribute into the set of possibilities:
alternatives = match.group(1).split(",")
components.append(set(alternatives))
offset = match.end()
components.append([pattern[offset:]])
# Now calculate the cartesian products of all these sets to form all
# strings.
for vector in itertools.product(*components):
yield u"".join(vector)
def _ReplaceRegExGrouping(self, grouping):
alternatives = grouping.group(1).split(",")
return "(" + "|".join(re.escape(s) for s in alternatives) + ")"
def _ReplaceRegExPart(self, part):
if part == "**/":
return "(?:.*\\/)?"
elif part == "*":
return "[^\\/]*"
elif part == "?":
return "[^\\/]"
elif GROUPING_PATTERN.match(part):
return GROUPING_PATTERN.sub(self._ReplaceRegExGrouping, part)
else:
return re.escape(part)
REGEX_SPLIT_PATTERN = re.compile(
"(" + "|".join(["{[^}]+,[^}]+}", "\\?", "\\*\\*\\/?", "\\*"]) + ")")
def AsRegEx(self):
parts = self.__class__.REGEX_SPLIT_PATTERN.split(self._value)
result = "".join(self._ReplaceRegExPart(p) for p in parts)
return rdf_standard.RegularExpression("(?i)\\A%s\\Z" % result)
| true
| true
|
79056c4d6dbb00640cc2ebf158ebf31c20a234ed
| 8,530
|
py
|
Python
|
python/cuml/dask/cluster/kmeans.py
|
codereport/cuml
|
7225fadb72ef5408af58ab16ce062762b64f2c79
|
[
"Apache-2.0"
] | null | null | null |
python/cuml/dask/cluster/kmeans.py
|
codereport/cuml
|
7225fadb72ef5408af58ab16ce062762b64f2c79
|
[
"Apache-2.0"
] | null | null | null |
python/cuml/dask/cluster/kmeans.py
|
codereport/cuml
|
7225fadb72ef5408af58ab16ce062762b64f2c79
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cupy as cp
from cuml.dask.common.base import BaseEstimator
from cuml.dask.common.base import DelayedPredictionMixin
from cuml.dask.common.base import DelayedTransformMixin
from cuml.dask.common.base import mnmg_import
from cuml.dask.common.input_utils import concatenate
from cuml.dask.common.input_utils import DistributedDataHandler
from cuml.dask.common.comms import CommsContext
from cuml.dask.common.comms import worker_state
from cuml.dask.common.utils import raise_exception_from_futures
from dask.distributed import wait
from cuml.common.memory_utils import with_cupy_rmm
class KMeans(BaseEstimator, DelayedPredictionMixin, DelayedTransformMixin):
"""
Multi-Node Multi-GPU implementation of KMeans.
This version minimizes data transfer by sharing only
the centroids between workers in each iteration.
Predictions are done embarrassingly parallel, using cuML's
single-GPU version.
For more information on this implementation, refer to the
documentation for single-GPU K-Means.
Parameters
----------
handle : cuml.Handle
If it is None, a new one is created just for this class.
n_clusters : int (default = 8)
The number of centroids or clusters you want.
max_iter : int (default = 300)
The more iterations of EM, the more accurate, but slower.
tol : float (default = 1e-4)
Stopping criterion when centroid means do not change much.
verbose : int or boolean (default = False)
Logging level for printing diagnostic information
random_state : int (default = 1)
If you want results to be the same when you restart Python,
select a state.
init : {'scalable-kmeans++', 'k-means||' , 'random' or an ndarray}
(default = 'scalable-k-means++')
'scalable-k-means++' or 'k-means||': Uses fast and stable scalable
kmeans++ intialization.
'random': Choose 'n_cluster' observations (rows) at random
from data for the initial centroids. If an ndarray is passed,
it should be of shape (n_clusters, n_features) and gives the
initial centers.
oversampling_factor : int (default = 2) The amount of points to sample
in scalable k-means++ initialization for potential centroids.
Increasing this value can lead to better initial centroids at the
cost of memory. The total number of centroids sampled in scalable
k-means++ is oversampling_factor * n_clusters * 8.
max_samples_per_batch : int (default = 32768) The number of data
samples to use for batches of the pairwise distance computation.
This computation is done throughout both fit predict. The default
should suit most cases. The total number of elements in the
batched pairwise distance computation is max_samples_per_batch
* n_clusters. It might become necessary to lower this number when
n_clusters becomes prohibitively large.
Attributes
----------
cluster_centers_ : cuDF DataFrame or CuPy ndarray
The coordinates of the final clusters. This represents of "mean" of
each data cluster.
"""
def __init__(self, client=None, verbose=False, **kwargs):
super(KMeans, self).__init__(client=client,
verbose=verbose,
**kwargs)
@staticmethod
@mnmg_import
def _func_fit(sessionId, objs, datatype, **kwargs):
from cuml.cluster.kmeans_mg import KMeansMG as cumlKMeans
handle = worker_state(sessionId)["handle"]
inp_data = concatenate(objs)
return cumlKMeans(handle=handle, output_type=datatype,
**kwargs).fit(inp_data)
@staticmethod
def _score(model, data):
ret = model.score(data)
return ret
@with_cupy_rmm
def fit(self, X):
"""
Fit a multi-node multi-GPU KMeans model
Parameters
----------
X : Dask cuDF DataFrame or CuPy backed Dask Array
Training data to cluster.
"""
data = DistributedDataHandler.create(X, client=self.client)
self.datatype = data.datatype
comms = CommsContext(comms_p2p=False)
comms.init(workers=data.workers)
kmeans_fit = [self.client.submit(KMeans._func_fit,
comms.sessionId,
wf[1],
self.datatype,
**self.kwargs,
workers=[wf[0]],
pure=False)
for idx, wf in enumerate(data.worker_to_parts.items())]
wait(kmeans_fit)
raise_exception_from_futures(kmeans_fit)
comms.destroy()
self.local_model = kmeans_fit[0].result()
self.cluster_centers_ = self.local_model.cluster_centers_
return self
def fit_predict(self, X, delayed=True):
"""
Compute cluster centers and predict cluster index for each sample.
Parameters
----------
X : Dask cuDF DataFrame or CuPy backed Dask Array
Data to predict
Returns
-------
result: Dask cuDF DataFrame or CuPy backed Dask Array
Distributed object containing predictions
"""
return self.fit(X).predict(X, delayed=delayed)
def predict(self, X, delayed=True):
"""
Predict labels for the input
Parameters
----------
X : Dask cuDF DataFrame or CuPy backed Dask Array
Data to predict
delayed : bool (default = True)
Whether to do a lazy prediction (and return Delayed objects) or an
eagerly executed one.
Returns
-------
result: Dask cuDF DataFrame or CuPy backed Dask Array
Distributed object containing predictions
"""
return self._predict(X, delayed=delayed)
def fit_transform(self, X, delayed=True):
"""
Calls fit followed by transform using a distributed KMeans model
Parameters
----------
X : Dask cuDF DataFrame or CuPy backed Dask Array
Data to predict
delayed : bool (default = True)
Whether to execute as a delayed task or eager.
Returns
-------
result: Dask cuDF DataFrame or CuPy backed Dask Array
Distributed object containing the transformed data
"""
return self.fit(X).transform(X, delayed=delayed)
def transform(self, X, delayed=True):
"""
Transforms the input into the learned centroid space
Parameters
----------
X : Dask cuDF DataFrame or CuPy backed Dask Array
Data to predict
delayed : bool (default = True)
Whether to execute as a delayed task or eager.
Returns
-------
result: Dask cuDF DataFrame or CuPy backed Dask Array
Distributed object containing the transformed data
"""
return self._transform(X, n_dims=2, delayed=delayed)
@with_cupy_rmm
def score(self, X):
"""
Computes the inertia score for the trained KMeans centroids.
Parameters
----------
X : dask_cudf.Dataframe
Dataframe to compute score
Returns
-------
Inertial score
"""
scores = self._run_parallel_func(KMeans._score,
X,
n_dims=1,
delayed=False,
output_futures=True)
return -1 * cp.sum(cp.asarray(
self.client.compute(scores, sync=True))*-1.0)
def get_param_names(self):
return list(self.kwargs.keys())
| 33.582677
| 78
| 0.617468
|
import cupy as cp
from cuml.dask.common.base import BaseEstimator
from cuml.dask.common.base import DelayedPredictionMixin
from cuml.dask.common.base import DelayedTransformMixin
from cuml.dask.common.base import mnmg_import
from cuml.dask.common.input_utils import concatenate
from cuml.dask.common.input_utils import DistributedDataHandler
from cuml.dask.common.comms import CommsContext
from cuml.dask.common.comms import worker_state
from cuml.dask.common.utils import raise_exception_from_futures
from dask.distributed import wait
from cuml.common.memory_utils import with_cupy_rmm
class KMeans(BaseEstimator, DelayedPredictionMixin, DelayedTransformMixin):
def __init__(self, client=None, verbose=False, **kwargs):
super(KMeans, self).__init__(client=client,
verbose=verbose,
**kwargs)
@staticmethod
@mnmg_import
def _func_fit(sessionId, objs, datatype, **kwargs):
from cuml.cluster.kmeans_mg import KMeansMG as cumlKMeans
handle = worker_state(sessionId)["handle"]
inp_data = concatenate(objs)
return cumlKMeans(handle=handle, output_type=datatype,
**kwargs).fit(inp_data)
@staticmethod
def _score(model, data):
ret = model.score(data)
return ret
@with_cupy_rmm
def fit(self, X):
data = DistributedDataHandler.create(X, client=self.client)
self.datatype = data.datatype
comms = CommsContext(comms_p2p=False)
comms.init(workers=data.workers)
kmeans_fit = [self.client.submit(KMeans._func_fit,
comms.sessionId,
wf[1],
self.datatype,
**self.kwargs,
workers=[wf[0]],
pure=False)
for idx, wf in enumerate(data.worker_to_parts.items())]
wait(kmeans_fit)
raise_exception_from_futures(kmeans_fit)
comms.destroy()
self.local_model = kmeans_fit[0].result()
self.cluster_centers_ = self.local_model.cluster_centers_
return self
def fit_predict(self, X, delayed=True):
return self.fit(X).predict(X, delayed=delayed)
def predict(self, X, delayed=True):
return self._predict(X, delayed=delayed)
def fit_transform(self, X, delayed=True):
return self.fit(X).transform(X, delayed=delayed)
def transform(self, X, delayed=True):
return self._transform(X, n_dims=2, delayed=delayed)
@with_cupy_rmm
def score(self, X):
scores = self._run_parallel_func(KMeans._score,
X,
n_dims=1,
delayed=False,
output_futures=True)
return -1 * cp.sum(cp.asarray(
self.client.compute(scores, sync=True))*-1.0)
def get_param_names(self):
return list(self.kwargs.keys())
| true
| true
|
79056d2216512f0e0029ae4ed759c3d6388e83c9
| 2,651
|
py
|
Python
|
src/sqlfluff/core/rules/std/L042.py
|
Jophish/sqlfluff
|
c579ca3ec7c0a83a04e40aa94fe9478486198b04
|
[
"MIT"
] | null | null | null |
src/sqlfluff/core/rules/std/L042.py
|
Jophish/sqlfluff
|
c579ca3ec7c0a83a04e40aa94fe9478486198b04
|
[
"MIT"
] | 1
|
2020-04-02T09:05:39.000Z
|
2020-12-10T14:42:59.000Z
|
src/sqlfluff/core/rules/std/L042.py
|
Jophish/sqlfluff
|
c579ca3ec7c0a83a04e40aa94fe9478486198b04
|
[
"MIT"
] | null | null | null |
"""Implementation of Rule L042."""
from sqlfluff.core.rules.base import BaseCrawler, LintResult
from sqlfluff.core.rules.doc_decorators import document_configuration
@document_configuration
class Rule_L042(BaseCrawler):
"""Join/From clauses should not contain subqueries. Use CTEs instead.
By default this rule is configured to allow subqueries within `FROM`
clauses but not within `JOIN` clauses. If you prefer a stricter lint
then this is configurable.
NB: Some dialects don't allow CTEs, and for those dialects
this rule makes no sense and should be disabled.
| **Anti-pattern**
.. code-block:: sql
select
a.x, a.y, b.z
from a
join (
select x, z from b
) using(x)
| **Best practice**
.. code-block:: sql
with c as (
select x, z from b
)
select
a.x, a.y, c.z
from a
join c using(x)
"""
config_keywords = ["forbid_subquery_in"]
_config_mapping = {
"join": ["join_clause"],
"from": ["from_clause"],
"both": ["join_clause", "from_clause"],
}
def _eval(self, segment, **kwargs):
"""Join/From clauses should not contain subqueries. Use CTEs instead.
NB: No fix for this routine because it would be very complex to
implement reliably.
"""
parent_types = self._config_mapping[self.forbid_subquery_in]
for parent_type in parent_types:
if segment.is_type(parent_type):
# Get the referenced table segment
table_expression = segment.get_child("table_expression")
if not table_expression:
return None # There isn't one. We're done.
# Get the main bit
table_expression = table_expression.get_child("main_table_expression")
if not table_expression:
return None # There isn't one. We're done.
# If any of the following are found, raise an issue.
# If not, we're fine.
problem_children = [
"with_compound_statement",
"set_expression",
"select_statement",
]
for seg_type in problem_children:
seg = table_expression.get_child(seg_type)
if seg:
return LintResult(
anchor=seg,
description=f"{parent_type} clauses should not contain subqueries. Use CTEs instead",
)
| 31.559524
| 113
| 0.562429
|
from sqlfluff.core.rules.base import BaseCrawler, LintResult
from sqlfluff.core.rules.doc_decorators import document_configuration
@document_configuration
class Rule_L042(BaseCrawler):
config_keywords = ["forbid_subquery_in"]
_config_mapping = {
"join": ["join_clause"],
"from": ["from_clause"],
"both": ["join_clause", "from_clause"],
}
def _eval(self, segment, **kwargs):
parent_types = self._config_mapping[self.forbid_subquery_in]
for parent_type in parent_types:
if segment.is_type(parent_type):
table_expression = segment.get_child("table_expression")
if not table_expression:
return None
table_expression = table_expression.get_child("main_table_expression")
if not table_expression:
return None
problem_children = [
"with_compound_statement",
"set_expression",
"select_statement",
]
for seg_type in problem_children:
seg = table_expression.get_child(seg_type)
if seg:
return LintResult(
anchor=seg,
description=f"{parent_type} clauses should not contain subqueries. Use CTEs instead",
)
| true
| true
|
79056d3c213cf9c3c5b51f02b3618516f5ebaf18
| 179
|
py
|
Python
|
api/__init__.py
|
zhaojiejoe/fastapi-friendly-response-demo
|
7628e4af481a4df4661c16af1d7e0164ecf64952
|
[
"MIT"
] | 1
|
2020-05-12T18:49:43.000Z
|
2020-05-12T18:49:43.000Z
|
api/__init__.py
|
zhaojiejoe/fastapi-friendly-response-demo
|
7628e4af481a4df4661c16af1d7e0164ecf64952
|
[
"MIT"
] | null | null | null |
api/__init__.py
|
zhaojiejoe/fastapi-friendly-response-demo
|
7628e4af481a4df4661c16af1d7e0164ecf64952
|
[
"MIT"
] | null | null | null |
from fastapi_utils.inferring_router import InferringRouter
from . import views
router = InferringRouter()
router.include_router(views.router, prefix='/api', tags=['api'])
| 25.571429
| 65
| 0.765363
|
from fastapi_utils.inferring_router import InferringRouter
from . import views
router = InferringRouter()
router.include_router(views.router, prefix='/api', tags=['api'])
| true
| true
|
79056e0540f5aa0eabc1dae02e853b45b7c8665c
| 6,164
|
py
|
Python
|
bsddb3/bsddb3-6.2.6/build/lib.freebsd-12.1-RELEASE-amd64-3.7/bsddb3/tests/test_compat.py
|
mpwillson/spambayes3
|
b51d7bb9016066234ce88dad65faabed85f63d78
|
[
"PSF-2.0"
] | 1
|
2020-03-21T15:17:22.000Z
|
2020-03-21T15:17:22.000Z
|
bsddb3/bsddb3-6.2.6/Lib3/bsddb/test/test_compat.py
|
mpwillson/spambayes3
|
b51d7bb9016066234ce88dad65faabed85f63d78
|
[
"PSF-2.0"
] | 1
|
2022-02-22T22:23:55.000Z
|
2022-02-22T22:23:55.000Z
|
bsddb3/bsddb3-6.2.6/build/lib.freebsd-12.1-RELEASE-amd64-3.7/bsddb3/tests/test_compat.py
|
mpwillson/spambayes3
|
b51d7bb9016066234ce88dad65faabed85f63d78
|
[
"PSF-2.0"
] | null | null | null |
"""
Copyright (c) 2008-2018, Jesus Cea Avion <jcea@jcea.es>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
3. Neither the name of Jesus Cea Avion nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
"""
"""
Test cases adapted from the test_bsddb.py module in Python's
regression test suite.
"""
import os, string
import unittest
from .test_all import db, hashopen, btopen, rnopen, verbose, \
get_new_database_path
class CompatibilityTestCase(unittest.TestCase):
def setUp(self):
self.filename = get_new_database_path()
def tearDown(self):
try:
os.remove(self.filename)
except os.error:
pass
def test01_btopen(self):
self.do_bthash_test(btopen, 'btopen')
def test02_hashopen(self):
self.do_bthash_test(hashopen, 'hashopen')
def test03_rnopen(self):
data = "The quick brown fox jumped over the lazy dog.".split()
if verbose:
print("\nTesting: rnopen")
f = rnopen(self.filename, 'c')
for x in range(len(data)):
f[x+1] = data[x]
getTest = (f[1], f[2], f[3])
if verbose:
print('%s %s %s' % getTest)
self.assertEqual(getTest[1], 'quick', 'data mismatch!')
rv = f.set_location(3)
if rv != (3, 'brown'):
self.fail('recno database set_location failed: '+repr(rv))
f[25] = 'twenty-five'
f.close()
del f
f = rnopen(self.filename, 'w')
f[20] = 'twenty'
def noRec(f):
rec = f[15]
self.assertRaises(KeyError, noRec, f)
def badKey(f):
rec = f['a string']
self.assertRaises(TypeError, badKey, f)
del f[3]
rec = f.first()
while rec:
if verbose:
print(rec)
try:
rec = next(f)
except KeyError:
break
f.close()
def test04_n_flag(self):
f = hashopen(self.filename, 'n')
f.close()
def do_bthash_test(self, factory, what):
if verbose:
print('\nTesting: ', what)
f = factory(self.filename, 'c')
if verbose:
print('creation...')
# truth test
if f:
if verbose: print("truth test: true")
else:
if verbose: print("truth test: false")
f['0'] = ''
f['a'] = 'Guido'
f['b'] = 'van'
f['c'] = 'Rossum'
f['d'] = 'invented'
# 'e' intentionally left out
f['f'] = 'Python'
if verbose:
print('%s %s %s' % (f['a'], f['b'], f['c']))
if verbose:
print('key ordering...')
start = f.set_location(f.first()[0])
if start != ('0', ''):
self.fail("incorrect first() result: "+repr(start))
while 1:
try:
rec = next(f)
except KeyError:
self.assertEqual(rec, f.last(), 'Error, last <> last!')
f.previous()
break
if verbose:
print(rec)
self.assertTrue('f' in f, 'Error, missing key!')
# test that set_location() returns the next nearest key, value
# on btree databases and raises KeyError on others.
if factory == btopen:
e = f.set_location('e')
if e != ('f', 'Python'):
self.fail('wrong key,value returned: '+repr(e))
else:
try:
e = f.set_location('e')
except KeyError:
pass
else:
self.fail("set_location on non-existent key did not raise KeyError")
f.sync()
f.close()
# truth test
try:
if f:
if verbose: print("truth test: true")
else:
if verbose: print("truth test: false")
except db.DBError:
pass
else:
self.fail("Exception expected")
del f
if verbose:
print('modification...')
f = factory(self.filename, 'w')
f['d'] = 'discovered'
if verbose:
print('access...')
for key in list(f.keys()):
word = f[key]
if verbose:
print(word)
def noRec(f):
rec = f['no such key']
self.assertRaises(KeyError, noRec, f)
def badKey(f):
rec = f[15]
self.assertRaises(TypeError, badKey, f)
f.close()
#----------------------------------------------------------------------
def test_suite():
return unittest.makeSuite(CompatibilityTestCase)
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| 28.018182
| 84
| 0.553861
|
import os, string
import unittest
from .test_all import db, hashopen, btopen, rnopen, verbose, \
get_new_database_path
class CompatibilityTestCase(unittest.TestCase):
def setUp(self):
self.filename = get_new_database_path()
def tearDown(self):
try:
os.remove(self.filename)
except os.error:
pass
def test01_btopen(self):
self.do_bthash_test(btopen, 'btopen')
def test02_hashopen(self):
self.do_bthash_test(hashopen, 'hashopen')
def test03_rnopen(self):
data = "The quick brown fox jumped over the lazy dog.".split()
if verbose:
print("\nTesting: rnopen")
f = rnopen(self.filename, 'c')
for x in range(len(data)):
f[x+1] = data[x]
getTest = (f[1], f[2], f[3])
if verbose:
print('%s %s %s' % getTest)
self.assertEqual(getTest[1], 'quick', 'data mismatch!')
rv = f.set_location(3)
if rv != (3, 'brown'):
self.fail('recno database set_location failed: '+repr(rv))
f[25] = 'twenty-five'
f.close()
del f
f = rnopen(self.filename, 'w')
f[20] = 'twenty'
def noRec(f):
rec = f[15]
self.assertRaises(KeyError, noRec, f)
def badKey(f):
rec = f['a string']
self.assertRaises(TypeError, badKey, f)
del f[3]
rec = f.first()
while rec:
if verbose:
print(rec)
try:
rec = next(f)
except KeyError:
break
f.close()
def test04_n_flag(self):
f = hashopen(self.filename, 'n')
f.close()
def do_bthash_test(self, factory, what):
if verbose:
print('\nTesting: ', what)
f = factory(self.filename, 'c')
if verbose:
print('creation...')
if f:
if verbose: print("truth test: true")
else:
if verbose: print("truth test: false")
f['0'] = ''
f['a'] = 'Guido'
f['b'] = 'van'
f['c'] = 'Rossum'
f['d'] = 'invented'
f['f'] = 'Python'
if verbose:
print('%s %s %s' % (f['a'], f['b'], f['c']))
if verbose:
print('key ordering...')
start = f.set_location(f.first()[0])
if start != ('0', ''):
self.fail("incorrect first() result: "+repr(start))
while 1:
try:
rec = next(f)
except KeyError:
self.assertEqual(rec, f.last(), 'Error, last <> last!')
f.previous()
break
if verbose:
print(rec)
self.assertTrue('f' in f, 'Error, missing key!')
if factory == btopen:
e = f.set_location('e')
if e != ('f', 'Python'):
self.fail('wrong key,value returned: '+repr(e))
else:
try:
e = f.set_location('e')
except KeyError:
pass
else:
self.fail("set_location on non-existent key did not raise KeyError")
f.sync()
f.close()
try:
if f:
if verbose: print("truth test: true")
else:
if verbose: print("truth test: false")
except db.DBError:
pass
else:
self.fail("Exception expected")
del f
if verbose:
print('modification...')
f = factory(self.filename, 'w')
f['d'] = 'discovered'
if verbose:
print('access...')
for key in list(f.keys()):
word = f[key]
if verbose:
print(word)
def noRec(f):
rec = f['no such key']
self.assertRaises(KeyError, noRec, f)
def badKey(f):
rec = f[15]
self.assertRaises(TypeError, badKey, f)
f.close()
def test_suite():
return unittest.makeSuite(CompatibilityTestCase)
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| true
| true
|
790571628ddd93fc8d5c7a10847001b8a363f6a8
| 7,116
|
py
|
Python
|
Spike generation/spike_recorder_focal.py
|
XiaoquinNUDT/Three-SNN-learning-algorithms-in-Brian2
|
b7a5b0aba03172cdc04e738f02a949c373c1aac2
|
[
"BSD-2-Clause"
] | 8
|
2019-12-18T09:36:34.000Z
|
2021-06-22T15:47:49.000Z
|
Spike generation/spike_recorder_focal.py
|
Mary-Shi/Three-SNN-learning-algorithms-in-Brian2
|
b7a5b0aba03172cdc04e738f02a949c373c1aac2
|
[
"BSD-2-Clause"
] | null | null | null |
Spike generation/spike_recorder_focal.py
|
Mary-Shi/Three-SNN-learning-algorithms-in-Brian2
|
b7a5b0aba03172cdc04e738f02a949c373c1aac2
|
[
"BSD-2-Clause"
] | 6
|
2020-03-31T11:40:29.000Z
|
2022-03-14T01:26:40.000Z
|
"""
load the dataset example and return the maximum image size, which is used to definite the spike generation network;
images with different size are focused onto the center of the spike generation network;
the generated poisson spikes are recorded and saved for further use.
"""
"""
on 12th November
by xiaoquinNUDT
version 0.0
"""
"""
test: no
"""
"""
optimization record:
"""
##-----------------------------------------------------------------------------------------
## module import
##-----------------------------------------------------------------------------------------
import brian2 as b2
from brian2 import *
import numpy as np
import cPickle as pickle
import os
import sys
from struct import unpack
np.set_printoptions(threshold = np.inf)
##-----------------------------------------------------------------------------------------
## code generation device setup
##-----------------------------------------------------------------------------------------
b2.defaultclock.dt = 0.2*b2.ms
b2.core.default_float_dtype = float64 ### reconsider
b2.core.default_integer_dtype = int16 ### retest
codegen.target = 'cython' # default 'auto', other setting include numpy, weave, cython
#clear_cache('cython') #clear the disk cache manually, or use the clear_cache function
codegen.cpp_compiler = 'gcc'
codegen.cpp_extra_compile_args_gcc = ['-ffast-math -march=native']
## Cython runtime codegen preferences
'''
Location of the cache directory for Cython files. By default,
will be stored in a brian_extensions subdirectory
where Cython inline stores its temporary files (the result of get_cython_cache_dir()).
'''
codegen.runtime_cython_cache_dir = None
codegen.runtime_cython_delete_source_files = True
codegen.runtime_cython_multiprocess_safe = True
##-----------------------------------------------------------------------------------------
## self-definition method
##-----------------------------------------------------------------------------------------
def get_dataset_example_mnist(path_dataset, name_dataset, using_test_dataset):
"""
read input images (vector), dump into
'.pickle' format for next load, and return it as a numpy array.
"""
flag_dataloaded = 0
if name_dataset != 'mnist_test_example' and name_dataset != 'mnist_train_example':
raise Exception('You have provide the wrong dataset name or path, please check carefully')
else:
dataset_path_name = path_dataset + name_dataset
if os.path.isfile('%s.pickle' % dataset_path_name):
example = pickle.load(open('%s.pickle' % dataset_path_name))
flag_dataloaded = 1
else:
flag_datasetsource = os.path.isfile(path_dataset+'train-images.idx3-ubyte') & \
os.path.isfile(path_dataset+'train-labels.idx1-ubyte') & \
os.path.isfile(path_dataset+'t10k-images.idx3-ubyte') & \
os.path.isfile(path_dataset+'t10k-labels.idx1-ubyte')
if flag_datasetsource == False:
raise Exception("You haven't downloaded the dataset into the %s!" % path_dataset)
else:
if using_test_dataset:
image = open(path_dataset+'t10k-images.idx3-ubyte', 'rb')
else:
image = open(path_dataset+'train-images.idx3-ubyte', 'rb')
# get metadata for images
image.read(4) # skip the magic number
num_image = unpack('>I', image.read(4))[0]
height_image = unpack('>I', image.read(4))[0]
length_image = unpack('>I', image.read(4))[0]
example = np.zeros((num_image, height_image, length_image), dtype = np.uint8)
for i in xrange(num_image):
example[i] = [[unpack('>B', image.read(1))[0] for m in xrange(length_image)] for n in xrange(height_image)]
pickle.dump(example, open('%s.pickle' % dataset_path_name, 'wb'))
# the dataset has been readed and processed
flag_dataloaded = 1
if flag_dataloaded == 0:
raise Exception('Failed to load the required dataset, please check the name_dataset and other printed information!')
else:
return example
## file system
path_dataset = '../dataset_mnist/'
spike_record_path = './'
## input parameter
using_test_dataset = bool(int(sys.argv[1]))
print(using_test_dataset)
num_example = int(sys.argv[2])
print(num_example)
num_iteration = int(sys.argv[3])
print(num_iteration)
height_receptive_field = 28
length_receptive_field = 28
if using_test_dataset:
num_per_dataset = 10000
name_dataset = 'mnist_test_example'
name_spike_record = 'mnist_spike_record_test'
else:
num_per_dataset = 60000
name_dataset = 'mnist_train_example'
name_spike_record = 'mnist_spike_record_train'
## network setting parameters
input_intensity = 2.0
population_IN = height_receptive_field * length_receptive_field
working_time = 350 * b2.ms
resting_time = 150 * b2.ms
neuron_group_record = {}
spike_monitor_record = {}
name_neuron_group = 'Poisson_spike'
## create input poisson spike train
neuron_group_record[name_neuron_group] = b2.PoissonGroup(population_IN, 0*Hz)
spike_monitor_record[name_neuron_group] = b2.SpikeMonitor(neuron_group_record[name_neuron_group])
network_record = b2.Network()
for obj_sim in [neuron_group_record, spike_monitor_record]:
for key in obj_sim:
network_record.add(obj_sim[key])
## dataset loading and record the input poisson spike
input_example = get_dataset_example_mnist(path_dataset, name_dataset, using_test_dataset)
number_example = 0
while number_example < num_example:
input_image = input_example[(number_example + num_iteration * num_example) % num_per_dataset]
height_example, length_example = input_image.shape
length_margin = int((length_receptive_field - length_example)/2)
height_margin = int((height_receptive_field - height_example)/2)
input_rate = np.zeros((height_receptive_field, length_receptive_field), dtype = np.float32)
for i in xrange(height_example):
for j in xrange(length_example):
input_rate[i + height_margin, j + length_margin] = input_image[i, j]
neuron_group_record[name_neuron_group].rates = input_rate.flatten() / 8.0 * input_intensity * Hz
network_record.run(working_time, report = 'text')
neuron_group_record[name_neuron_group].rates = 0*Hz
network_record.run(resting_time)
number_example += 1
spike_index = np.asarray(spike_monitor_record[name_neuron_group].i, dtype = np.int16)
spike_time = np.asarray(spike_monitor_record[name_neuron_group].t, dtype = np.float64)
if using_test_dataset:
spike_record_path_name = spike_record_path + name_spike_record + '_' + str(num_example)
else:
spike_record_path_name = spike_record_path + name_spike_record + '_' + str(num_example) + '_' + str(num_iteration)
file_spike_record = open('%s.pickle' % spike_record_path_name, 'wb')
pickle.dump(spike_index, file_spike_record)
pickle.dump(spike_time, file_spike_record)
file_spike_record.close()
| 45.909677
| 127
| 0.657532
|
= True
e Exception('You have provide the wrong dataset name or path, please check carefully')
else:
dataset_path_name = path_dataset + name_dataset
if os.path.isfile('%s.pickle' % dataset_path_name):
example = pickle.load(open('%s.pickle' % dataset_path_name))
flag_dataloaded = 1
else:
flag_datasetsource = os.path.isfile(path_dataset+'train-images.idx3-ubyte') & \
os.path.isfile(path_dataset+'train-labels.idx1-ubyte') & \
os.path.isfile(path_dataset+'t10k-images.idx3-ubyte') & \
os.path.isfile(path_dataset+'t10k-labels.idx1-ubyte')
if flag_datasetsource == False:
raise Exception("You haven't downloaded the dataset into the %s!" % path_dataset)
else:
if using_test_dataset:
image = open(path_dataset+'t10k-images.idx3-ubyte', 'rb')
else:
image = open(path_dataset+'train-images.idx3-ubyte', 'rb')
# get metadata for images
image.read(4) # skip the magic number
num_image = unpack('>I', image.read(4))[0]
height_image = unpack('>I', image.read(4))[0]
length_image = unpack('>I', image.read(4))[0]
example = np.zeros((num_image, height_image, length_image), dtype = np.uint8)
for i in xrange(num_image):
example[i] = [[unpack('>B', image.read(1))[0] for m in xrange(length_image)] for n in xrange(height_image)]
pickle.dump(example, open('%s.pickle' % dataset_path_name, 'wb'))
# the dataset has been readed and processed
flag_dataloaded = 1
if flag_dataloaded == 0:
raise Exception('Failed to load the required dataset, please check the name_dataset and other printed information!')
else:
return example
## file system
path_dataset = '../dataset_mnist/'
spike_record_path = './'
## input parameter
using_test_dataset = bool(int(sys.argv[1]))
print(using_test_dataset)
num_example = int(sys.argv[2])
print(num_example)
num_iteration = int(sys.argv[3])
print(num_iteration)
height_receptive_field = 28
length_receptive_field = 28
if using_test_dataset:
num_per_dataset = 10000
name_dataset = 'mnist_test_example'
name_spike_record = 'mnist_spike_record_test'
else:
num_per_dataset = 60000
name_dataset = 'mnist_train_example'
name_spike_record = 'mnist_spike_record_train'
## network setting parameters
input_intensity = 2.0
population_IN = height_receptive_field * length_receptive_field
working_time = 350 * b2.ms
resting_time = 150 * b2.ms
neuron_group_record = {}
spike_monitor_record = {}
name_neuron_group = 'Poisson_spike'
## create input poisson spike train
neuron_group_record[name_neuron_group] = b2.PoissonGroup(population_IN, 0*Hz)
spike_monitor_record[name_neuron_group] = b2.SpikeMonitor(neuron_group_record[name_neuron_group])
network_record = b2.Network()
for obj_sim in [neuron_group_record, spike_monitor_record]:
for key in obj_sim:
network_record.add(obj_sim[key])
## dataset loading and record the input poisson spike
input_example = get_dataset_example_mnist(path_dataset, name_dataset, using_test_dataset)
number_example = 0
while number_example < num_example:
input_image = input_example[(number_example + num_iteration * num_example) % num_per_dataset]
height_example, length_example = input_image.shape
length_margin = int((length_receptive_field - length_example)/2)
height_margin = int((height_receptive_field - height_example)/2)
input_rate = np.zeros((height_receptive_field, length_receptive_field), dtype = np.float32)
for i in xrange(height_example):
for j in xrange(length_example):
input_rate[i + height_margin, j + length_margin] = input_image[i, j]
neuron_group_record[name_neuron_group].rates = input_rate.flatten() / 8.0 * input_intensity * Hz
network_record.run(working_time, report = 'text')
neuron_group_record[name_neuron_group].rates = 0*Hz
network_record.run(resting_time)
number_example += 1
spike_index = np.asarray(spike_monitor_record[name_neuron_group].i, dtype = np.int16)
spike_time = np.asarray(spike_monitor_record[name_neuron_group].t, dtype = np.float64)
if using_test_dataset:
spike_record_path_name = spike_record_path + name_spike_record + '_' + str(num_example)
else:
spike_record_path_name = spike_record_path + name_spike_record + '_' + str(num_example) + '_' + str(num_iteration)
file_spike_record = open('%s.pickle' % spike_record_path_name, 'wb')
pickle.dump(spike_index, file_spike_record)
pickle.dump(spike_time, file_spike_record)
file_spike_record.close()
| true
| true
|
7905737374f205b4e3afb2b45da9c7c6b192352c
| 1,320
|
py
|
Python
|
codql-report/generator.py
|
Heersin/codeql_packer
|
5d1258ce2419a67161ac3b844219ebdbe5310e59
|
[
"MIT"
] | null | null | null |
codql-report/generator.py
|
Heersin/codeql_packer
|
5d1258ce2419a67161ac3b844219ebdbe5310e59
|
[
"MIT"
] | null | null | null |
codql-report/generator.py
|
Heersin/codeql_packer
|
5d1258ce2419a67161ac3b844219ebdbe5310e59
|
[
"MIT"
] | null | null | null |
import os
os.chdir("./export")
from reader.csv_mod import CsvReader
from reader.sarif_mod import SarifReader
from reader.server_mod import RestfulReader
from export.export import Exporter
def generate(args):
project_name = args.name
sarif_list = args.sarif
if sarif_list == None:
sarif_list = []
json_list = args.json
if json_list == None:
json_list = []
csv_list = args.csv
if csv_list == None:
csv_list = []
proj_data = []
sarif_reader = SarifReader()
for f in sarif_list:
sarif_reader.read(f)
sarif_data = sarif_reader.get_data()
proj_data.extend(sarif_data['data'])
csv_reader = CsvReader()
for f in csv_list:
csv_reader.read(f)
csv_data = csv_reader.get_data()
proj_data.extend(csv_data['data'])
restful_reader = RestfulReader()
for rid in json_list:
restful_reader.read(rid)
restful_data = restful_reader.get_data()
proj_data.extend(restful_data['data'])
reporter = Exporter()
reporter.setData(proj_data)
return reporter.build(project_name)
#r = SarifReader()
#r.read('/home/heersin/blackhole/codeql/result.sarif')
#print(os.getcwd())
#project_name = "socat"
#pdf_factory = Exporter()
#pdf_factory.setData(r.get_data())
#pdf_factory.build(project_name)
| 23.157895
| 54
| 0.681818
|
import os
os.chdir("./export")
from reader.csv_mod import CsvReader
from reader.sarif_mod import SarifReader
from reader.server_mod import RestfulReader
from export.export import Exporter
def generate(args):
project_name = args.name
sarif_list = args.sarif
if sarif_list == None:
sarif_list = []
json_list = args.json
if json_list == None:
json_list = []
csv_list = args.csv
if csv_list == None:
csv_list = []
proj_data = []
sarif_reader = SarifReader()
for f in sarif_list:
sarif_reader.read(f)
sarif_data = sarif_reader.get_data()
proj_data.extend(sarif_data['data'])
csv_reader = CsvReader()
for f in csv_list:
csv_reader.read(f)
csv_data = csv_reader.get_data()
proj_data.extend(csv_data['data'])
restful_reader = RestfulReader()
for rid in json_list:
restful_reader.read(rid)
restful_data = restful_reader.get_data()
proj_data.extend(restful_data['data'])
reporter = Exporter()
reporter.setData(proj_data)
return reporter.build(project_name)
| true
| true
|
79057419b0cf6e46329fd2c2aad41db629000e02
| 85,313
|
py
|
Python
|
tests/providers/google/cloud/hooks/test_bigquery.py
|
khilawar4/airflow
|
5f3f65b82517f615f31f0c8a7f8ac0facb325235
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 3
|
2021-01-29T20:33:56.000Z
|
2021-08-06T17:35:16.000Z
|
tests/providers/google/cloud/hooks/test_bigquery.py
|
khilawar4/airflow
|
5f3f65b82517f615f31f0c8a7f8ac0facb325235
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 210
|
2021-07-17T00:25:52.000Z
|
2021-12-29T00:44:48.000Z
|
tests/providers/google/cloud/hooks/test_bigquery.py
|
khilawar4/airflow
|
5f3f65b82517f615f31f0c8a7f8ac0facb325235
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2
|
2021-04-14T11:15:17.000Z
|
2021-12-15T16:58:24.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=not-callable
import re
import unittest
from unittest import mock
import pytest
from google.cloud.bigquery import DEFAULT_RETRY, DatasetReference, Table, TableReference
from google.cloud.bigquery.dataset import AccessEntry, Dataset, DatasetListItem
from google.cloud.exceptions import NotFound
from parameterized import parameterized
from airflow import AirflowException
from airflow.providers.google.cloud.hooks.bigquery import (
BigQueryCursor,
BigQueryHook,
_api_resource_configs_duplication_check,
_cleanse_time_partitioning,
_split_tablename,
_validate_src_fmt_configs,
_validate_value,
)
PROJECT_ID = "bq-project"
CREDENTIALS = "bq-credentials"
DATASET_ID = "bq_dataset"
TABLE_ID = "bq_table"
PARTITION_ID = "20200101"
VIEW_ID = 'bq_view'
JOB_ID = "1234"
LOCATION = 'europe-north1'
TABLE_REFERENCE_REPR = {
'tableId': TABLE_ID,
'datasetId': DATASET_ID,
'projectId': PROJECT_ID,
}
TABLE_REFERENCE = TableReference.from_api_repr(TABLE_REFERENCE_REPR)
class _BigQueryBaseTestClass(unittest.TestCase):
def setUp(self) -> None:
class MockedBigQueryHook(BigQueryHook):
def _get_credentials_and_project_id(self):
return CREDENTIALS, PROJECT_ID
self.hook = MockedBigQueryHook()
class TestBigQueryHookMethods(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryConnection")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook._authorize")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.build")
def test_bigquery_client_creation(self, mock_build, mock_authorize, mock_bigquery_connection):
result = self.hook.get_conn()
mock_build.assert_called_once_with(
'bigquery', 'v2', http=mock_authorize.return_value, cache_discovery=False
)
mock_bigquery_connection.assert_called_once_with(
service=mock_build.return_value,
project_id=PROJECT_ID,
hook=self.hook,
use_legacy_sql=self.hook.use_legacy_sql,
location=self.hook.location,
num_retries=self.hook.num_retries,
)
assert mock_bigquery_connection.return_value == result
@mock.patch("airflow.providers.google.common.hooks.base_google.GoogleBaseHook.__init__")
def test_bigquery_bigquery_conn_id_deprecation_warning(
self,
mock_base_hook_init,
):
bigquery_conn_id = "bigquery conn id"
warning_message = (
"The bigquery_conn_id parameter has been deprecated. "
"You should pass the gcp_conn_id parameter."
)
with pytest.warns(DeprecationWarning) as warnings:
BigQueryHook(bigquery_conn_id=bigquery_conn_id)
mock_base_hook_init.assert_called_once_with(
delegate_to=None,
gcp_conn_id='bigquery conn id',
impersonation_chain=None,
)
assert warning_message == str(warnings[0].message)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_location_propagates_properly(self, run_with_config, _):
# TODO: this creates side effect
assert self.hook.location is None
self.hook.run_query(sql='select 1', location='US')
assert run_with_config.call_count == 1
assert self.hook.location == 'US'
def test_bigquery_insert_rows_not_implemented(self):
with pytest.raises(NotImplementedError):
self.hook.insert_rows(table="table", rows=[1, 2])
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_bigquery_table_exists_true(self, mock_client):
result = self.hook.table_exists(project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID)
mock_client.return_value.get_table.assert_called_once_with(TABLE_REFERENCE)
mock_client.assert_called_once_with(project_id=PROJECT_ID)
assert result is True
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_bigquery_table_exists_false(self, mock_client):
mock_client.return_value.get_table.side_effect = NotFound("Dataset not found")
result = self.hook.table_exists(project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID)
mock_client.return_value.get_table.assert_called_once_with(TABLE_REFERENCE)
mock_client.assert_called_once_with(project_id=PROJECT_ID)
assert result is False
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_bigquery_table_partition_exists_true(self, mock_client):
mock_client.return_value.list_partitions.return_value = [PARTITION_ID]
result = self.hook.table_partition_exists(
project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID, partition_id=PARTITION_ID
)
mock_client.return_value.list_partitions.assert_called_once_with(TABLE_REFERENCE)
mock_client.assert_called_once_with(project_id=PROJECT_ID)
assert result is True
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_bigquery_table_partition_exists_false_no_table(self, mock_client):
mock_client.return_value.get_table.side_effect = NotFound("Dataset not found")
result = self.hook.table_partition_exists(
project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID, partition_id=PARTITION_ID
)
mock_client.return_value.list_partitions.assert_called_once_with(TABLE_REFERENCE)
mock_client.assert_called_once_with(project_id=PROJECT_ID)
assert result is False
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_bigquery_table_partition_exists_false_no_partition(self, mock_client):
mock_client.return_value.list_partitions.return_value = []
result = self.hook.table_partition_exists(
project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID, partition_id=PARTITION_ID
)
mock_client.return_value.list_partitions.assert_called_once_with(TABLE_REFERENCE)
mock_client.assert_called_once_with(project_id=PROJECT_ID)
assert result is False
@mock.patch('airflow.providers.google.cloud.hooks.bigquery.read_gbq')
def test_get_pandas_df(self, mock_read_gbq):
self.hook.get_pandas_df('select 1')
mock_read_gbq.assert_called_once_with(
'select 1', credentials=CREDENTIALS, dialect='legacy', project_id=PROJECT_ID, verbose=False
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_invalid_schema_update_options(self, mock_get_service):
with pytest.raises(
Exception,
match=(
r"\['THIS IS NOT VALID'\] contains invalid schema update options."
r"Please only use one or more of the following options: "
r"\['ALLOW_FIELD_ADDITION', 'ALLOW_FIELD_RELAXATION'\]"
),
):
self.hook.run_load(
"test.test",
"test_schema.json",
["test_data.json"],
schema_update_options=["THIS IS NOT VALID"],
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_invalid_schema_update_and_write_disposition(self, mock_get_service):
with pytest.raises(
Exception,
match="schema_update_options is only allowed if"
" write_disposition is 'WRITE_APPEND' or 'WRITE_TRUNCATE'.",
):
self.hook.run_load(
"test.test",
"test_schema.json",
["test_data.json"],
schema_update_options=['ALLOW_FIELD_ADDITION'],
write_disposition='WRITE_EMPTY',
)
@mock.patch(
"airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.poll_job_complete",
side_effect=[False, True],
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_cancel_queries(self, mock_client, mock_poll_job_complete):
running_job_id = 3
self.hook.running_job_id = running_job_id
self.hook.cancel_query()
mock_poll_job_complete.has_calls(mock.call(running_job_id), mock.call(running_job_id))
mock_client.assert_called_once_with(project_id=PROJECT_ID, location=None)
mock_client.return_value.cancel_job.assert_called_once_with(job_id=running_job_id)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_sql_dialect_default(
self,
mock_insert,
_,
):
self.hook.run_query('query')
_, kwargs = mock_insert.call_args
assert kwargs['configuration']['query']['useLegacySql'] is True
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_sql_dialect(self, mock_insert, _):
self.hook.run_query('query', use_legacy_sql=False)
_, kwargs = mock_insert.call_args
assert kwargs['configuration']['query']['useLegacySql'] is False
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_sql_dialect_legacy_with_query_params(self, mock_insert, _):
params = [
{
'name': "param_name",
'parameterType': {'type': "STRING"},
'parameterValue': {'value': "param_value"},
}
]
self.hook.run_query('query', use_legacy_sql=False, query_params=params)
_, kwargs = mock_insert.call_args
assert kwargs['configuration']['query']['useLegacySql'] is False
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_run_query_sql_dialect_legacy_with_query_params_fails(self, _):
params = [
{
'name': "param_name",
'parameterType': {'type': "STRING"},
'parameterValue': {'value': "param_value"},
}
]
with pytest.raises(ValueError, match="Query parameters are not allowed when using legacy SQL"):
self.hook.run_query('query', use_legacy_sql=True, query_params=params)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_run_query_without_sql_fails(self, _):
with pytest.raises(
TypeError, match=r"`BigQueryBaseCursor.run_query` missing 1 required positional argument: `sql`"
):
self.hook.run_query(sql=None)
@parameterized.expand(
[
(['ALLOW_FIELD_ADDITION'], 'WRITE_APPEND'),
(['ALLOW_FIELD_RELAXATION'], 'WRITE_APPEND'),
(['ALLOW_FIELD_ADDITION', 'ALLOW_FIELD_RELAXATION'], 'WRITE_APPEND'),
(['ALLOW_FIELD_ADDITION'], 'WRITE_TRUNCATE'),
(['ALLOW_FIELD_RELAXATION'], 'WRITE_TRUNCATE'),
(['ALLOW_FIELD_ADDITION', 'ALLOW_FIELD_RELAXATION'], 'WRITE_TRUNCATE'),
]
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_schema_update_options(
self,
schema_update_options,
write_disposition,
mock_insert,
mock_get_service,
):
self.hook.run_query(
sql='query',
destination_dataset_table='my_dataset.my_table',
schema_update_options=schema_update_options,
write_disposition=write_disposition,
)
_, kwargs = mock_insert.call_args
assert kwargs['configuration']['query']['schemaUpdateOptions'] == schema_update_options
assert kwargs['configuration']['query']['writeDisposition'] == write_disposition
@parameterized.expand(
[
(
['INCORRECT_OPTION'],
None,
r"\['INCORRECT_OPTION'\] contains invalid schema update options\. "
r"Please only use one or more of the following options: "
r"\['ALLOW_FIELD_ADDITION', 'ALLOW_FIELD_RELAXATION'\]",
),
(
['ALLOW_FIELD_ADDITION', 'ALLOW_FIELD_RELAXATION', 'INCORRECT_OPTION'],
None,
r"\['ALLOW_FIELD_ADDITION', 'ALLOW_FIELD_RELAXATION', 'INCORRECT_OPTION'\] contains invalid "
r"schema update options\. Please only use one or more of the following options: "
r"\['ALLOW_FIELD_ADDITION', 'ALLOW_FIELD_RELAXATION'\]",
),
(
['ALLOW_FIELD_ADDITION'],
None,
r"schema_update_options is only allowed if write_disposition is "
r"'WRITE_APPEND' or 'WRITE_TRUNCATE'",
),
]
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_run_query_schema_update_options_incorrect(
self,
schema_update_options,
write_disposition,
expected_regex,
mock_get_service,
):
with pytest.raises(ValueError, match=expected_regex):
self.hook.run_query(
sql='query',
destination_dataset_table='my_dataset.my_table',
schema_update_options=schema_update_options,
write_disposition=write_disposition,
)
@parameterized.expand([(True,), (False,)])
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_api_resource_configs(
self,
bool_val,
mock_insert,
_,
):
self.hook.run_query('query', api_resource_configs={'query': {'useQueryCache': bool_val}})
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['query']['useQueryCache'] is bool_val
assert kwargs["configuration"]['query']['useLegacySql'] is True
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_api_resource_configs_duplication_warning(self, mock_get_service):
with pytest.raises(
ValueError,
match=(
r"Values of useLegacySql param are duplicated\. api_resource_configs "
r"contained useLegacySql param in `query` config and useLegacySql was "
r"also provided with arg to run_query\(\) method\. Please remove duplicates\."
),
):
self.hook.run_query(
'query', use_legacy_sql=True, api_resource_configs={'query': {'useLegacySql': False}}
)
def test_validate_value(self):
with pytest.raises(
TypeError, match="case_1 argument must have a type <class 'dict'> not <class 'str'>"
):
_validate_value("case_1", "a", dict)
assert _validate_value("case_2", 0, int) is None
def test_duplication_check(self):
with pytest.raises(
ValueError,
match=r"Values of key_one param are duplicated. api_resource_configs contained key_one param in"
r" `query` config and key_one was also provided with arg to run_query\(\) method. "
r"Please remove duplicates.",
):
key_one = True
_api_resource_configs_duplication_check("key_one", key_one, {"key_one": False})
assert _api_resource_configs_duplication_check("key_one", key_one, {"key_one": True}) is None
def test_validate_src_fmt_configs(self):
source_format = "test_format"
valid_configs = ["test_config_known", "compatibility_val"]
backward_compatibility_configs = {"compatibility_val": "val"}
with pytest.raises(
ValueError, match="test_config_unknown is not a valid src_fmt_configs for type test_format."
):
# This config should raise a value error.
src_fmt_configs = {"test_config_unknown": "val"}
_validate_src_fmt_configs(
source_format, src_fmt_configs, valid_configs, backward_compatibility_configs
)
src_fmt_configs = {"test_config_known": "val"}
src_fmt_configs = _validate_src_fmt_configs(
source_format, src_fmt_configs, valid_configs, backward_compatibility_configs
)
assert (
"test_config_known" in src_fmt_configs
), "src_fmt_configs should contain al known src_fmt_configs"
assert (
"compatibility_val" in src_fmt_configs
), "_validate_src_fmt_configs should add backward_compatibility config"
@parameterized.expand([("AVRO",), ("PARQUET",), ("NEWLINE_DELIMITED_JSON",), ("DATASTORE_BACKUP",)])
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_load_with_non_csv_as_src_fmt(self, fmt, _):
try:
self.hook.run_load(
destination_project_dataset_table='my_dataset.my_table',
source_uris=[],
source_format=fmt,
autodetect=True,
)
except ValueError:
self.fail("run_load() raised ValueError unexpectedly!")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_extract(self, mock_insert):
source_project_dataset_table = f"{PROJECT_ID}.{DATASET_ID}.{TABLE_ID}"
destination_cloud_storage_uris = ["gs://bucket/file.csv"]
expected_configuration = {
"extract": {
"sourceTable": {
"projectId": PROJECT_ID,
"datasetId": DATASET_ID,
"tableId": TABLE_ID,
},
"compression": "NONE",
"destinationUris": destination_cloud_storage_uris,
"destinationFormat": "CSV",
"fieldDelimiter": ",",
"printHeader": True,
}
}
self.hook.run_extract(
source_project_dataset_table=source_project_dataset_table,
destination_cloud_storage_uris=destination_cloud_storage_uris,
)
mock_insert.assert_called_once_with(configuration=expected_configuration, project_id=PROJECT_ID)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.SchemaField")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_list_rows(self, mock_client, mock_schema, mock_table):
self.hook.list_rows(
dataset_id=DATASET_ID,
table_id=TABLE_ID,
max_results=10,
selected_fields=["field_1", "field_2"],
page_token="page123",
start_index=5,
location=LOCATION,
)
mock_table.from_api_repr.assert_called_once_with({"tableReference": TABLE_REFERENCE_REPR})
mock_schema.has_calls([mock.call(x, "") for x in ["field_1", "field_2"]])
mock_client.return_value.list_rows.assert_called_once_with(
table=mock_table.from_api_repr.return_value,
max_results=10,
selected_fields=mock.ANY,
page_token='page123',
start_index=5,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_list_rows_with_empty_selected_fields(self, mock_client, mock_table):
self.hook.list_rows(
dataset_id=DATASET_ID,
table_id=TABLE_ID,
max_results=10,
page_token="page123",
selected_fields=[],
start_index=5,
location=LOCATION,
)
mock_table.from_api_repr.assert_called_once_with({"tableReference": TABLE_REFERENCE_REPR})
mock_client.return_value.list_rows.assert_called_once_with(
table=mock_table.from_api_repr.return_value,
max_results=10,
page_token='page123',
selected_fields=None,
start_index=5,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_run_table_delete(self, mock_client, mock_table):
source_project_dataset_table = f"{PROJECT_ID}.{DATASET_ID}.{TABLE_ID}"
self.hook.run_table_delete(source_project_dataset_table, ignore_if_missing=False)
mock_table.from_string.assert_called_once_with(source_project_dataset_table)
mock_client.return_value.delete_table.assert_called_once_with(
table=mock_table.from_string.return_value, not_found_ok=False
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.create_empty_table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset_tables")
def test_table_upsert_create_new_table(self, mock_get, mock_create):
table_resource = {"tableReference": {"tableId": TABLE_ID}}
mock_get.return_value = []
self.hook.run_table_upsert(dataset_id=DATASET_ID, table_resource=table_resource)
mock_get.assert_called_once_with(project_id=PROJECT_ID, dataset_id=DATASET_ID)
mock_create.assert_called_once_with(table_resource=table_resource, project_id=PROJECT_ID)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.update_table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset_tables")
def test_table_upsert_already_exists(self, mock_get, mock_update):
table_resource = {"tableReference": {"tableId": TABLE_ID}}
mock_get.return_value = [{"tableId": TABLE_ID}]
self.hook.run_table_upsert(dataset_id=DATASET_ID, table_resource=table_resource)
mock_get.assert_called_once_with(project_id=PROJECT_ID, dataset_id=DATASET_ID)
mock_update.assert_called_once_with(table_resource=table_resource)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.update_dataset")
def test_run_grant_dataset_view_access_granting(self, mock_update, mock_get):
view_table = f"{TABLE_ID}_view"
view_dataset = f"{DATASET_ID}_view"
view_access = AccessEntry(
role=None,
entity_type="view",
entity_id={'projectId': PROJECT_ID, 'datasetId': view_dataset, 'tableId': view_table},
)
dataset = Dataset(DatasetReference.from_string(DATASET_ID, PROJECT_ID))
dataset.access_entries = []
mock_get.return_value = dataset
self.hook.run_grant_dataset_view_access(
source_dataset=DATASET_ID, view_dataset=view_dataset, view_table=view_table
)
mock_get.assert_called_once_with(project_id=PROJECT_ID, dataset_id=DATASET_ID)
assert view_access in dataset.access_entries
mock_update.assert_called_once_with(
fields=["access"],
dataset_resource=dataset.to_api_repr(),
project_id=PROJECT_ID,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.update_dataset")
def test_run_grant_dataset_view_access_already_granted(self, mock_update, mock_get):
view_table = f"{TABLE_ID}_view"
view_dataset = f"{DATASET_ID}_view"
view_access = AccessEntry(
role=None,
entity_type="view",
entity_id={'projectId': PROJECT_ID, 'datasetId': view_dataset, 'tableId': view_table},
)
dataset = Dataset(DatasetReference.from_string(DATASET_ID, PROJECT_ID))
dataset.access_entries = [view_access]
mock_get.return_value = dataset
self.hook.run_grant_dataset_view_access(
source_dataset=DATASET_ID, view_dataset=view_dataset, view_table=view_table
)
mock_get.assert_called_once_with(project_id=PROJECT_ID, dataset_id=DATASET_ID)
assert len(mock_update.calls) == 0
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_get_dataset_tables_list(self, mock_client):
table_list = [
{"projectId": PROJECT_ID, "datasetId": DATASET_ID, "tableId": "a-1"},
{"projectId": PROJECT_ID, "datasetId": DATASET_ID, "tableId": "b-1"},
{"projectId": PROJECT_ID, "datasetId": DATASET_ID, "tableId": "a-2"},
{"projectId": PROJECT_ID, "datasetId": DATASET_ID, "tableId": "b-2"},
]
table_list_response = [Table.from_api_repr({"tableReference": t}) for t in table_list]
mock_client.return_value.list_tables.return_value = table_list_response
dataset_reference = DatasetReference(PROJECT_ID, DATASET_ID)
result = self.hook.get_dataset_tables_list(dataset_id=DATASET_ID, project_id=PROJECT_ID)
mock_client.return_value.list_tables.assert_called_once_with(
dataset=dataset_reference, max_results=None
)
assert table_list == result
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_poll_job_complete(self, mock_client):
self.hook.poll_job_complete(job_id=JOB_ID, location=LOCATION, project_id=PROJECT_ID)
mock_client.assert_called_once_with(location=LOCATION, project_id=PROJECT_ID)
mock_client.return_value.get_job.assert_called_once_with(job_id=JOB_ID)
mock_client.return_value.get_job.return_value.done.assert_called_once_with(retry=DEFAULT_RETRY)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.poll_job_complete")
@mock.patch("logging.Logger.info")
def test_cancel_query_jobs_to_cancel(
self,
mock_logger_info,
poll_job_complete,
):
poll_job_complete.return_value = True
self.hook.running_job_id = JOB_ID
self.hook.cancel_query()
poll_job_complete.assert_called_once_with(job_id=JOB_ID)
mock_logger_info.has_call(mock.call("No running BigQuery jobs to cancel."))
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.poll_job_complete")
@mock.patch("time.sleep")
@mock.patch("logging.Logger.info")
def test_cancel_query_cancel_timeout(
self,
mock_logger_info,
mock_sleep,
poll_job_complete,
mock_client,
):
poll_job_complete.side_effect = [False] * 13
self.hook.running_job_id = JOB_ID
self.hook.cancel_query()
mock_client.return_value.cancel_job.assert_called_once_with(job_id=JOB_ID)
assert poll_job_complete.call_count == 13
assert mock_sleep.call_count == 11
mock_logger_info.has_call(
mock.call(
f"Stopping polling due to timeout. Job with id {JOB_ID} "
"has not completed cancel and may or may not finish."
)
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.poll_job_complete")
@mock.patch("time.sleep")
@mock.patch("logging.Logger.info")
def test_cancel_query_cancel_completed(
self,
mock_logger_info,
mock_sleep,
poll_job_complete,
mock_client,
):
poll_job_complete.side_effect = [False] * 12 + [True]
self.hook.running_job_id = JOB_ID
self.hook.cancel_query()
mock_client.return_value.cancel_job.assert_called_once_with(job_id=JOB_ID)
assert poll_job_complete.call_count == 13
assert mock_sleep.call_count == 11
mock_logger_info.has_call(mock.call(f"Job successfully canceled: {PROJECT_ID}, {PROJECT_ID}"))
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_get_schema(self, mock_client):
table = {
"tableReference": TABLE_REFERENCE_REPR,
"schema": {
"fields": [
{'name': 'id', 'type': 'STRING', 'mode': 'REQUIRED'},
{'name': 'name', 'type': 'STRING', 'mode': 'NULLABLE'},
]
},
}
mock_client.return_value.get_table.return_value = Table.from_api_repr(table)
result = self.hook.get_schema(dataset_id=DATASET_ID, table_id=TABLE_ID)
mock_client.return_value.get_table.assert_called_once_with(TABLE_REFERENCE)
assert "fields" in result
assert len(result["fields"]) == 2
@mock.patch('airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_schema')
@mock.patch('airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.update_table')
def test_update_table_schema_with_policy_tags(self, mock_update, mock_get_schema):
mock_get_schema.return_value = {
"fields": [
{'name': 'emp_name', 'type': 'STRING', 'mode': 'REQUIRED'},
{
'name': 'salary',
'type': 'INTEGER',
'mode': 'REQUIRED',
'policyTags': {'names': ['sensitive']},
},
{'name': 'not_changed', 'type': 'INTEGER', 'mode': 'REQUIRED'},
{
'name': 'subrecord',
'type': 'RECORD',
'mode': 'REQUIRED',
'fields': [
{
'name': 'field_1',
'type': 'STRING',
'mode': 'REQUIRED',
'policyTags': {'names': ['sensitive']},
},
],
},
]
}
schema_fields_updates = [
{'name': 'emp_name', 'description': 'Name of employee', 'policyTags': {'names': ['sensitive']}},
{
'name': 'salary',
'description': 'Monthly salary in USD',
'policyTags': {},
},
{
'name': 'subrecord',
'description': 'Some Desc',
'fields': [
{'name': 'field_1', 'description': 'Some nested desc'},
],
},
]
expected_result_schema = {
'fields': [
{
'name': 'emp_name',
'type': 'STRING',
'mode': 'REQUIRED',
'description': 'Name of employee',
'policyTags': {'names': ['sensitive']},
},
{
'name': 'salary',
'type': 'INTEGER',
'mode': 'REQUIRED',
'description': 'Monthly salary in USD',
'policyTags': {},
},
{'name': 'not_changed', 'type': 'INTEGER', 'mode': 'REQUIRED'},
{
'name': 'subrecord',
'type': 'RECORD',
'mode': 'REQUIRED',
'description': 'Some Desc',
'fields': [
{
'name': 'field_1',
'type': 'STRING',
'mode': 'REQUIRED',
'description': 'Some nested desc',
'policyTags': {'names': ['sensitive']},
}
],
},
]
}
self.hook.update_table_schema(
schema_fields_updates=schema_fields_updates,
include_policy_tags=True,
dataset_id=DATASET_ID,
table_id=TABLE_ID,
)
mock_update.assert_called_once_with(
dataset_id=DATASET_ID,
table_id=TABLE_ID,
project_id=PROJECT_ID,
table_resource={'schema': expected_result_schema},
fields=['schema'],
)
@mock.patch('airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_schema')
@mock.patch('airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.update_table')
def test_update_table_schema_without_policy_tags(self, mock_update, mock_get_schema):
mock_get_schema.return_value = {
"fields": [
{'name': 'emp_name', 'type': 'STRING', 'mode': 'REQUIRED'},
{'name': 'salary', 'type': 'INTEGER', 'mode': 'REQUIRED'},
{'name': 'not_changed', 'type': 'INTEGER', 'mode': 'REQUIRED'},
{
'name': 'subrecord',
'type': 'RECORD',
'mode': 'REQUIRED',
'fields': [
{'name': 'field_1', 'type': 'STRING', 'mode': 'REQUIRED'},
],
},
]
}
schema_fields_updates = [
{'name': 'emp_name', 'description': 'Name of employee'},
{
'name': 'salary',
'description': 'Monthly salary in USD',
'policyTags': {'names': ['sensitive']},
},
{
'name': 'subrecord',
'description': 'Some Desc',
'fields': [
{'name': 'field_1', 'description': 'Some nested desc'},
],
},
]
expected_result_schema = {
'fields': [
{'name': 'emp_name', 'type': 'STRING', 'mode': 'REQUIRED', 'description': 'Name of employee'},
{
'name': 'salary',
'type': 'INTEGER',
'mode': 'REQUIRED',
'description': 'Monthly salary in USD',
},
{'name': 'not_changed', 'type': 'INTEGER', 'mode': 'REQUIRED'},
{
'name': 'subrecord',
'type': 'RECORD',
'mode': 'REQUIRED',
'description': 'Some Desc',
'fields': [
{
'name': 'field_1',
'type': 'STRING',
'mode': 'REQUIRED',
'description': 'Some nested desc',
}
],
},
]
}
self.hook.update_table_schema(
schema_fields_updates=schema_fields_updates,
include_policy_tags=False,
dataset_id=DATASET_ID,
table_id=TABLE_ID,
)
mock_update.assert_called_once_with(
dataset_id=DATASET_ID,
table_id=TABLE_ID,
project_id=PROJECT_ID,
table_resource={'schema': expected_result_schema},
fields=['schema'],
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_invalid_source_format(self, mock_get_service):
with pytest.raises(
Exception,
match=r"JSON is not a valid source format. Please use one of the following types: \['CSV', "
r"'NEWLINE_DELIMITED_JSON', 'AVRO', 'GOOGLE_SHEETS', 'DATASTORE_BACKUP', 'PARQUET'\]",
):
self.hook.run_load("test.test", "test_schema.json", ["test_data.json"], source_format="json")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_insert_all_succeed(self, mock_client):
rows = [{"json": {"a_key": "a_value_0"}}]
self.hook.insert_all(
project_id=PROJECT_ID,
dataset_id=DATASET_ID,
table_id=TABLE_ID,
rows=rows,
ignore_unknown_values=True,
skip_invalid_rows=True,
)
mock_client.return_value.get_table.assert_called_once_with(TABLE_REFERENCE)
mock_client.return_value.insert_rows.assert_called_once_with(
table=mock_client.return_value.get_table.return_value,
rows=rows,
ignore_unknown_values=True,
skip_invalid_rows=True,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_insert_all_fail(self, mock_client):
rows = [{"json": {"a_key": "a_value_0"}}]
mock_client.return_value.insert_rows.return_value = ["some", "errors"]
with pytest.raises(AirflowException, match="insert error"):
self.hook.insert_all(
project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID, rows=rows, fail_on_error=True
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_with_arg(self, mock_insert):
self.hook.run_query(
sql='select 1',
destination_dataset_table='my_dataset.my_table',
labels={'label1': 'test1', 'label2': 'test2'},
)
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['labels'] == {'label1': 'test1', 'label2': 'test2'}
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.QueryJob")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_insert_job(self, mock_client, mock_query_job):
job_conf = {
"query": {
"query": "SELECT * FROM test",
"useLegacySql": "False",
}
}
mock_query_job._JOB_TYPE = "query"
self.hook.insert_job(
configuration=job_conf,
job_id=JOB_ID,
project_id=PROJECT_ID,
location=LOCATION,
)
mock_client.assert_called_once_with(
project_id=PROJECT_ID,
location=LOCATION,
)
mock_query_job.from_api_repr.assert_called_once_with(
{
'configuration': job_conf,
'jobReference': {'jobId': JOB_ID, 'projectId': PROJECT_ID, 'location': LOCATION},
},
mock_client.return_value,
)
mock_query_job.from_api_repr.return_value.result.assert_called_once_with()
class TestBigQueryTableSplitter(unittest.TestCase):
def test_internal_need_default_project(self):
with pytest.raises(Exception, match="INTERNAL: No default project is specified"):
_split_tablename("dataset.table", None)
@parameterized.expand(
[
("project", "dataset", "table", "dataset.table"),
("alternative", "dataset", "table", "alternative:dataset.table"),
("alternative", "dataset", "table", "alternative.dataset.table"),
("alt1:alt", "dataset", "table", "alt1:alt.dataset.table"),
("alt1:alt", "dataset", "table", "alt1:alt:dataset.table"),
]
)
def test_split_tablename(self, project_expected, dataset_expected, table_expected, table_input):
default_project_id = "project"
project, dataset, table = _split_tablename(table_input, default_project_id)
assert project_expected == project
assert dataset_expected == dataset
assert table_expected == table
@parameterized.expand(
[
("alt1:alt2:alt3:dataset.table", None, "Use either : or . to specify project got {}"),
(
"alt1.alt.dataset.table",
None,
r"Expect format of \(<project\.\|<project\:\)<dataset>\.<table>, got {}",
),
(
"alt1:alt2:alt.dataset.table",
"var_x",
"Format exception for var_x: Use either : or . to specify project got {}",
),
(
"alt1:alt2:alt:dataset.table",
"var_x",
"Format exception for var_x: Use either : or . to specify project got {}",
),
(
"alt1.alt.dataset.table",
"var_x",
r"Format exception for var_x: Expect format of "
r"\(<project\.\|<project:\)<dataset>.<table>, got {}",
),
]
)
def test_invalid_syntax(self, table_input, var_name, exception_message):
default_project_id = "project"
with pytest.raises(Exception, match=exception_message.format(table_input)):
_split_tablename(table_input, default_project_id, var_name)
class TestTableOperations(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_create_view(self, mock_bq_client, mock_table):
view = {
'query': 'SELECT * FROM `test-project-id.test_dataset_id.test_table_prefix*`',
"useLegacySql": False,
}
self.hook.create_empty_table(
project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID, view=view, retry=DEFAULT_RETRY
)
body = {'tableReference': TABLE_REFERENCE_REPR, 'view': view}
mock_table.from_api_repr.assert_called_once_with(body)
mock_bq_client.return_value.create_table.assert_called_once_with(
table=mock_table.from_api_repr.return_value,
exists_ok=True,
retry=DEFAULT_RETRY,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_patch_table(self, mock_client, mock_table):
description_patched = 'Test description.'
expiration_time_patched = 2524608000000
friendly_name_patched = 'Test friendly name.'
labels_patched = {'label1': 'test1', 'label2': 'test2'}
schema_patched = [
{'name': 'id', 'type': 'STRING', 'mode': 'REQUIRED'},
{'name': 'name', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'balance', 'type': 'FLOAT', 'mode': 'NULLABLE'},
{'name': 'new_field', 'type': 'STRING', 'mode': 'NULLABLE'},
]
time_partitioning_patched = {'expirationMs': 10000000}
require_partition_filter_patched = True
view_patched = {
'query': "SELECT * FROM `test-project-id.test_dataset_id.test_table_prefix*` LIMIT 500",
'useLegacySql': False,
}
self.hook.patch_table(
dataset_id=DATASET_ID,
table_id=TABLE_ID,
project_id=PROJECT_ID,
description=description_patched,
expiration_time=expiration_time_patched,
friendly_name=friendly_name_patched,
labels=labels_patched,
schema=schema_patched,
time_partitioning=time_partitioning_patched,
require_partition_filter=require_partition_filter_patched,
view=view_patched,
)
body = {
"description": description_patched,
"expirationTime": expiration_time_patched,
"friendlyName": friendly_name_patched,
"labels": labels_patched,
"schema": {"fields": schema_patched},
"timePartitioning": time_partitioning_patched,
"view": view_patched,
"requirePartitionFilter": require_partition_filter_patched,
}
fields = list(body.keys())
body["tableReference"] = TABLE_REFERENCE_REPR
mock_table.from_api_repr.assert_called_once_with(body)
mock_client.return_value.update_table.assert_called_once_with(
table=mock_table.from_api_repr.return_value, fields=fields
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_create_empty_table_succeed(self, mock_bq_client, mock_table):
self.hook.create_empty_table(project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID)
body = {
'tableReference': {
'tableId': TABLE_ID,
'projectId': PROJECT_ID,
'datasetId': DATASET_ID,
}
}
mock_table.from_api_repr.assert_called_once_with(body)
mock_bq_client.return_value.create_table.assert_called_once_with(
table=mock_table.from_api_repr.return_value, exists_ok=True, retry=DEFAULT_RETRY
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_create_empty_table_with_extras_succeed(self, mock_bq_client, mock_table):
schema_fields = [
{'name': 'id', 'type': 'STRING', 'mode': 'REQUIRED'},
{'name': 'name', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'created', 'type': 'DATE', 'mode': 'REQUIRED'},
]
time_partitioning = {"field": "created", "type": "DAY"}
cluster_fields = ['name']
self.hook.create_empty_table(
project_id=PROJECT_ID,
dataset_id=DATASET_ID,
table_id=TABLE_ID,
schema_fields=schema_fields,
time_partitioning=time_partitioning,
cluster_fields=cluster_fields,
)
body = {
'tableReference': {
'tableId': TABLE_ID,
'projectId': PROJECT_ID,
'datasetId': DATASET_ID,
},
'schema': {'fields': schema_fields},
'timePartitioning': time_partitioning,
'clustering': {'fields': cluster_fields},
}
mock_table.from_api_repr.assert_called_once_with(body)
mock_bq_client.return_value.create_table.assert_called_once_with(
table=mock_table.from_api_repr.return_value, exists_ok=True, retry=DEFAULT_RETRY
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_get_tables_list(self, mock_client):
table_list = [
{
"kind": "bigquery#table",
"id": "your-project:your_dataset.table1",
"tableReference": {
"projectId": "your-project",
"datasetId": "your_dataset",
"tableId": "table1",
},
"type": "TABLE",
"creationTime": "1565781859261",
},
{
"kind": "bigquery#table",
"id": "your-project:your_dataset.table2",
"tableReference": {
"projectId": "your-project",
"datasetId": "your_dataset",
"tableId": "table2",
},
"type": "TABLE",
"creationTime": "1565782713480",
},
]
table_list_response = [Table.from_api_repr(t) for t in table_list]
mock_client.return_value.list_tables.return_value = table_list_response
dataset_reference = DatasetReference(PROJECT_ID, DATASET_ID)
result = self.hook.get_dataset_tables(dataset_id=DATASET_ID, project_id=PROJECT_ID)
mock_client.return_value.list_tables.assert_called_once_with(
dataset=dataset_reference,
max_results=None,
retry=DEFAULT_RETRY,
)
for res, exp in zip(result, table_list):
assert res["tableId"] == exp["tableReference"]["tableId"]
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_create_materialized_view(self, mock_bq_client, mock_table):
query = """
SELECT product, SUM(amount)
FROM `test-project-id.test_dataset_id.test_table_prefix*`
GROUP BY product
"""
materialized_view = {
'query': query,
'enableRefresh': True,
'refreshIntervalMs': 2000000,
}
self.hook.create_empty_table(
project_id=PROJECT_ID,
dataset_id=DATASET_ID,
table_id=TABLE_ID,
materialized_view=materialized_view,
retry=DEFAULT_RETRY,
)
body = {'tableReference': TABLE_REFERENCE_REPR, 'materializedView': materialized_view}
mock_table.from_api_repr.assert_called_once_with(body)
mock_bq_client.return_value.create_table.assert_called_once_with(
table=mock_table.from_api_repr.return_value,
exists_ok=True,
retry=DEFAULT_RETRY,
)
class TestBigQueryCursor(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_execute_with_parameters(self, mock_insert, _):
bq_cursor = self.hook.get_cursor()
bq_cursor.execute("SELECT %(foo)s", {"foo": "bar"})
conf = {
'query': {
'query': "SELECT 'bar'",
'priority': 'INTERACTIVE',
'useLegacySql': True,
'schemaUpdateOptions': [],
}
}
mock_insert.assert_called_once_with(configuration=conf, project_id=PROJECT_ID)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_execute_many(self, mock_insert, _):
bq_cursor = self.hook.get_cursor()
bq_cursor.executemany("SELECT %(foo)s", [{"foo": "bar"}, {"foo": "baz"}])
assert mock_insert.call_count == 2
assert mock_insert.has_calls(
mock.call(
configuration={
'query': {
'query': "SELECT 'bar'",
'priority': 'INTERACTIVE',
'useLegacySql': True,
'schemaUpdateOptions': [],
}
},
project_id=PROJECT_ID,
),
mock.call(
configuration={
'query': {
'query': "SELECT 'baz'",
'priority': 'INTERACTIVE',
'useLegacySql': True,
'schemaUpdateOptions': [],
}
},
project_id=PROJECT_ID,
),
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_description(self, mock_get_service):
bq_cursor = self.hook.get_cursor()
with pytest.raises(NotImplementedError):
bq_cursor.description
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_close(self, mock_get_service):
bq_cursor = self.hook.get_cursor()
result = bq_cursor.close() # pylint: disable=assignment-from-no-return
assert result is None
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_rowcount(self, mock_get_service):
bq_cursor = self.hook.get_cursor()
result = bq_cursor.rowcount
assert -1 == result
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryCursor.next")
def test_fetchone(self, mock_next, mock_get_service):
bq_cursor = self.hook.get_cursor()
result = bq_cursor.fetchone()
mock_next.call_count == 1
assert mock_next.return_value == result
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch(
"airflow.providers.google.cloud.hooks.bigquery.BigQueryCursor.fetchone", side_effect=[1, 2, 3, None]
)
def test_fetchall(self, mock_fetchone, mock_get_service):
bq_cursor = self.hook.get_cursor()
result = bq_cursor.fetchall()
assert [1, 2, 3] == result
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryCursor.fetchone")
def test_fetchmany(self, mock_fetchone, mock_get_service):
side_effect_values = [1, 2, 3, None]
bq_cursor = self.hook.get_cursor()
mock_fetchone.side_effect = side_effect_values
result = bq_cursor.fetchmany()
assert [1] == result
mock_fetchone.side_effect = side_effect_values
result = bq_cursor.fetchmany(2)
assert [1, 2] == result
mock_fetchone.side_effect = side_effect_values
result = bq_cursor.fetchmany(5)
assert [1, 2, 3] == result
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_next_no_jobid(self, mock_get_service):
bq_cursor = self.hook.get_cursor()
bq_cursor.job_id = None
result = bq_cursor.next()
assert result is None
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_next_buffer(self, mock_get_service):
bq_cursor = self.hook.get_cursor()
bq_cursor.job_id = JOB_ID
bq_cursor.buffer = [1, 2]
result = bq_cursor.next()
assert 1 == result
result = bq_cursor.next()
assert 2 == result
bq_cursor.all_pages_loaded = True
result = bq_cursor.next()
assert result is None
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_next(self, mock_get_service):
mock_get_query_results = mock_get_service.return_value.jobs.return_value.getQueryResults
mock_execute = mock_get_query_results.return_value.execute
mock_execute.return_value = {
"rows": [
{"f": [{"v": "one"}, {"v": 1}]},
{"f": [{"v": "two"}, {"v": 2}]},
],
"pageToken": None,
"schema": {
"fields": [
{"name": "field_1", "type": "STRING"},
{"name": "field_2", "type": "INTEGER"},
]
},
}
bq_cursor = self.hook.get_cursor()
bq_cursor.job_id = JOB_ID
bq_cursor.location = LOCATION
result = bq_cursor.next()
assert ['one', 1] == result
result = bq_cursor.next()
assert ['two', 2] == result
mock_get_query_results.assert_called_once_with(
jobId=JOB_ID, location=LOCATION, pageToken=None, projectId='bq-project'
)
mock_execute.assert_called_once_with(num_retries=bq_cursor.num_retries)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryCursor.flush_results")
def test_next_no_rows(self, mock_flush_results, mock_get_service):
mock_get_query_results = mock_get_service.return_value.jobs.return_value.getQueryResults
mock_execute = mock_get_query_results.return_value.execute
mock_execute.return_value = {}
bq_cursor = self.hook.get_cursor()
bq_cursor.job_id = JOB_ID
result = bq_cursor.next()
assert result is None
mock_get_query_results.assert_called_once_with(
jobId=JOB_ID, location=None, pageToken=None, projectId='bq-project'
)
mock_execute.assert_called_once_with(num_retries=bq_cursor.num_retries)
assert mock_flush_results.call_count == 1
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryCursor.flush_results")
def test_flush_cursor_in_execute(self, _, mock_insert, mock_get_service):
bq_cursor = self.hook.get_cursor()
bq_cursor.execute("SELECT %(foo)s", {"foo": "bar"})
assert mock_insert.call_count == 1
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_flush_cursor(self, mock_get_service):
bq_cursor = self.hook.get_cursor()
bq_cursor.page_token = '456dcea9-fcbf-4f02-b570-83f5297c685e'
bq_cursor.job_id = 'c0a79ae4-0e72-4593-a0d0-7dbbf726f193'
bq_cursor.all_pages_loaded = True
bq_cursor.buffer = [('a', 100, 200), ('b', 200, 300)]
bq_cursor.flush_results()
assert bq_cursor.page_token is None
assert bq_cursor.job_id is None
assert not bq_cursor.all_pages_loaded
assert bq_cursor.buffer == []
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_arraysize(self, mock_get_service):
bq_cursor = self.hook.get_cursor()
assert bq_cursor.buffersize is None
assert bq_cursor.arraysize == 1
bq_cursor.set_arraysize(10)
assert bq_cursor.buffersize == 10
assert bq_cursor.arraysize == 10
class TestDatasetsOperations(_BigQueryBaseTestClass):
def test_create_empty_dataset_no_dataset_id_err(self):
with pytest.raises(ValueError, match=r"Please specify `datasetId`"):
self.hook.create_empty_dataset(dataset_id=None, project_id=None)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Dataset")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_create_empty_dataset_with_params(self, mock_client, mock_dataset):
self.hook.create_empty_dataset(project_id=PROJECT_ID, dataset_id=DATASET_ID, location=LOCATION)
expected_body = {
"location": LOCATION,
"datasetReference": {"datasetId": DATASET_ID, "projectId": PROJECT_ID},
}
api_repr = mock_dataset.from_api_repr
api_repr.assert_called_once_with(expected_body)
mock_client.return_value.create_dataset.assert_called_once_with(
dataset=api_repr.return_value, exists_ok=True
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Dataset")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_create_empty_dataset_with_object(self, mock_client, mock_dataset):
dataset = {
"location": "LOCATION",
"datasetReference": {"datasetId": "DATASET_ID", "projectId": "PROJECT_ID"},
}
self.hook.create_empty_dataset(dataset_reference=dataset)
api_repr = mock_dataset.from_api_repr
api_repr.assert_called_once_with(dataset)
mock_client.return_value.create_dataset.assert_called_once_with(
dataset=api_repr.return_value, exists_ok=True
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Dataset")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_create_empty_dataset_use_values_from_object(self, mock_client, mock_dataset):
dataset = {
"location": "LOCATION",
"datasetReference": {"datasetId": "DATASET_ID", "projectId": "PROJECT_ID"},
}
self.hook.create_empty_dataset(
dataset_reference=dataset,
location="Unknown location",
dataset_id="Fashionable Dataset",
project_id="Amazing Project",
)
api_repr = mock_dataset.from_api_repr
api_repr.assert_called_once_with(dataset)
mock_client.return_value.create_dataset.assert_called_once_with(
dataset=api_repr.return_value, exists_ok=True
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_get_dataset(self, mock_client):
_expected_result = {
"kind": "bigquery#dataset",
"location": "US",
"id": "your-project:dataset_2_test",
"datasetReference": {"projectId": "your-project", "datasetId": "dataset_2_test"},
}
expected_result = Dataset.from_api_repr(_expected_result)
mock_client.return_value.get_dataset.return_value = expected_result
result = self.hook.get_dataset(dataset_id=DATASET_ID, project_id=PROJECT_ID)
mock_client.return_value.get_dataset.assert_called_once_with(
dataset_ref=DatasetReference(PROJECT_ID, DATASET_ID)
)
assert result == expected_result
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_get_datasets_list(self, mock_client):
datasets = [
{
"kind": "bigquery#dataset",
"location": "US",
"id": "your-project:dataset_2_test",
"datasetReference": {"projectId": "your-project", "datasetId": "dataset_2_test"},
},
{
"kind": "bigquery#dataset",
"location": "US",
"id": "your-project:dataset_1_test",
"datasetReference": {"projectId": "your-project", "datasetId": "dataset_1_test"},
},
]
return_value = [DatasetListItem(d) for d in datasets]
mock_client.return_value.list_datasets.return_value = return_value
result = self.hook.get_datasets_list(project_id=PROJECT_ID)
mock_client.return_value.list_datasets.assert_called_once_with(
project=PROJECT_ID,
include_all=False,
filter=None,
max_results=None,
page_token=None,
retry=DEFAULT_RETRY,
)
for exp, res in zip(datasets, result):
assert res.full_dataset_id == exp["id"]
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_delete_dataset(self, mock_client):
delete_contents = True
self.hook.delete_dataset(
project_id=PROJECT_ID, dataset_id=DATASET_ID, delete_contents=delete_contents
)
mock_client.return_value.delete_dataset.assert_called_once_with(
dataset=DatasetReference(PROJECT_ID, DATASET_ID),
delete_contents=delete_contents,
retry=DEFAULT_RETRY,
not_found_ok=True,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_patch_dataset(self, mock_get_service):
dataset_resource = {"access": [{"role": "WRITER", "groupByEmail": "cloud-logs@google.com"}]}
method = mock_get_service.return_value.datasets.return_value.patch
self.hook.patch_dataset(
dataset_id=DATASET_ID, project_id=PROJECT_ID, dataset_resource=dataset_resource
)
method.assert_called_once_with(projectId=PROJECT_ID, datasetId=DATASET_ID, body=dataset_resource)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Dataset")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_update_dataset(self, mock_client, mock_dataset):
dataset_resource = {
"kind": "bigquery#dataset",
"location": "US",
"id": "your-project:dataset_2_test",
"datasetReference": {"projectId": "your-project", "datasetId": "dataset_2_test"},
}
method = mock_client.return_value.update_dataset
dataset = Dataset.from_api_repr(dataset_resource)
mock_dataset.from_api_repr.return_value = dataset
method.return_value = dataset
result = self.hook.update_dataset(
dataset_id=DATASET_ID,
project_id=PROJECT_ID,
dataset_resource=dataset_resource,
fields=["location"],
)
mock_dataset.from_api_repr.assert_called_once_with(dataset_resource)
method.assert_called_once_with(
dataset=dataset,
fields=["location"],
retry=DEFAULT_RETRY,
)
assert result == dataset
class TestTimePartitioningInRunJob(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_load_default(self, mock_insert):
self.hook.run_load(
destination_project_dataset_table='my_dataset.my_table',
schema_fields=[],
source_uris=[],
)
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['load'].get('timePartitioning') is None
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_with_auto_detect(self, mock_insert):
destination_project_dataset_table = "autodetect.table"
self.hook.run_load(destination_project_dataset_table, [], [], autodetect=True)
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['load']['autodetect'] is True
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_load_with_arg(self, mock_insert):
self.hook.run_load(
destination_project_dataset_table=f"{DATASET_ID}.{TABLE_ID}",
schema_fields=[],
source_uris=[],
time_partitioning={'type': 'DAY', 'field': 'test_field', 'expirationMs': 1000},
)
configuration = {
'load': {
'autodetect': False,
'createDisposition': 'CREATE_IF_NEEDED',
'destinationTable': {'projectId': PROJECT_ID, 'datasetId': DATASET_ID, 'tableId': TABLE_ID},
'sourceFormat': 'CSV',
'sourceUris': [],
'writeDisposition': 'WRITE_EMPTY',
'ignoreUnknownValues': False,
'timePartitioning': {'type': 'DAY', 'field': 'test_field', 'expirationMs': 1000},
'skipLeadingRows': 0,
'fieldDelimiter': ',',
'quote': None,
'allowQuotedNewlines': False,
'encoding': 'UTF-8',
}
}
mock_insert.assert_called_once_with(configuration=configuration, project_id=PROJECT_ID)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_with_arg(self, mock_insert):
self.hook.run_query(
sql='select 1',
destination_dataset_table=f"{DATASET_ID}.{TABLE_ID}",
time_partitioning={'type': 'DAY', 'field': 'test_field', 'expirationMs': 1000},
)
configuration = {
'query': {
'query': 'select 1',
'priority': 'INTERACTIVE',
'useLegacySql': True,
'timePartitioning': {'type': 'DAY', 'field': 'test_field', 'expirationMs': 1000},
'schemaUpdateOptions': [],
'destinationTable': {'projectId': PROJECT_ID, 'datasetId': DATASET_ID, 'tableId': TABLE_ID},
'allowLargeResults': False,
'flattenResults': None,
'writeDisposition': 'WRITE_EMPTY',
'createDisposition': 'CREATE_IF_NEEDED',
}
}
mock_insert.assert_called_once_with(configuration=configuration, project_id=PROJECT_ID)
def test_dollar_makes_partition(self):
tp_out = _cleanse_time_partitioning('test.teast$20170101', {})
expect = {'type': 'DAY'}
assert tp_out == expect
def test_extra_time_partitioning_options(self):
tp_out = _cleanse_time_partitioning(
'test.teast', {'type': 'DAY', 'field': 'test_field', 'expirationMs': 1000}
)
expect = {'type': 'DAY', 'field': 'test_field', 'expirationMs': 1000}
assert tp_out == expect
class TestClusteringInRunJob(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_load_default(self, mock_insert):
self.hook.run_load(
destination_project_dataset_table='my_dataset.my_table',
schema_fields=[],
source_uris=[],
)
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['load'].get('clustering') is None
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_load_with_arg(self, mock_insert):
self.hook.run_load(
destination_project_dataset_table='my_dataset.my_table',
schema_fields=[],
source_uris=[],
cluster_fields=['field1', 'field2'],
time_partitioning={'type': 'DAY'},
)
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['load']['clustering'] == {'fields': ['field1', 'field2']}
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_default(self, mock_insert):
self.hook.run_query(sql='select 1')
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['query'].get('clustering') is None
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_with_arg(self, mock_insert):
self.hook.run_query(
sql='select 1',
destination_dataset_table='my_dataset.my_table',
cluster_fields=['field1', 'field2'],
time_partitioning={'type': 'DAY'},
)
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['query']['clustering'] == {'fields': ['field1', 'field2']}
class TestBigQueryHookLegacySql(_BigQueryBaseTestClass):
"""Ensure `use_legacy_sql` param in `BigQueryHook` propagates properly."""
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_hook_uses_legacy_sql_by_default(self, mock_insert, _):
self.hook.get_first('query')
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['query']['useLegacySql'] is True
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id',
return_value=(CREDENTIALS, PROJECT_ID),
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_legacy_sql_override_propagates_properly(
self, mock_insert, mock_get_service, mock_get_creds_and_proj_id
):
bq_hook = BigQueryHook(use_legacy_sql=False)
bq_hook.get_first('query')
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['query']['useLegacySql'] is False
class TestBigQueryHookRunWithConfiguration(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.LoadJob")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_run_with_configuration_location(self, mock_client, mock_job):
running_job_id = 'job_vjdi28vskdui2onru23'
location = 'asia-east1'
mock_job._JOB_TYPE = "load"
conf = {"load": {}}
self.hook.running_job_id = running_job_id
self.hook.location = location
self.hook.run_with_configuration(conf)
mock_client.assert_called_once_with(project_id=PROJECT_ID, location=location)
mock_job.from_api_repr.assert_called_once_with(
{
"configuration": conf,
"jobReference": {"jobId": mock.ANY, "projectId": PROJECT_ID, "location": location},
},
mock_client.return_value,
)
class TestBigQueryWithKMS(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_create_empty_table_with_kms(self, mock_bq_client, mock_table):
schema_fields = [{"name": "id", "type": "STRING", "mode": "REQUIRED"}]
encryption_configuration = {"kms_key_name": "projects/p/locations/l/keyRings/k/cryptoKeys/c"}
self.hook.create_empty_table(
project_id=PROJECT_ID,
dataset_id=DATASET_ID,
table_id=TABLE_ID,
schema_fields=schema_fields,
encryption_configuration=encryption_configuration,
)
body = {
"tableReference": {"tableId": TABLE_ID, 'projectId': PROJECT_ID, 'datasetId': DATASET_ID},
"schema": {"fields": schema_fields},
"encryptionConfiguration": encryption_configuration,
}
mock_table.from_api_repr.assert_called_once_with(body)
mock_bq_client.return_value.create_table.assert_called_once_with(
table=mock_table.from_api_repr.return_value,
exists_ok=True,
retry=DEFAULT_RETRY,
)
# pylint: disable=too-many-locals
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.create_empty_table")
def test_create_external_table_with_kms(self, mock_create):
external_project_dataset_table = f"{PROJECT_ID}.{DATASET_ID}.{TABLE_ID}"
source_uris = ['test_data.csv']
source_format = 'CSV'
autodetect = False
compression = 'NONE'
ignore_unknown_values = False
max_bad_records = 10
skip_leading_rows = 1
field_delimiter = ','
quote_character = None
allow_quoted_newlines = False
allow_jagged_rows = False
encoding = "UTF-8"
labels = {'label1': 'test1', 'label2': 'test2'}
schema_fields = [{'mode': 'REQUIRED', 'name': 'id', 'type': 'STRING', 'description': None}]
encryption_configuration = {"kms_key_name": "projects/p/locations/l/keyRings/k/cryptoKeys/c"}
self.hook.create_external_table(
external_project_dataset_table=external_project_dataset_table,
source_uris=source_uris,
source_format=source_format,
autodetect=autodetect,
compression=compression,
ignore_unknown_values=ignore_unknown_values,
max_bad_records=max_bad_records,
skip_leading_rows=skip_leading_rows,
field_delimiter=field_delimiter,
quote_character=quote_character,
allow_jagged_rows=allow_jagged_rows,
encoding=encoding,
allow_quoted_newlines=allow_quoted_newlines,
labels=labels,
schema_fields=schema_fields,
encryption_configuration=encryption_configuration,
)
body = {
'externalDataConfiguration': {
'autodetect': autodetect,
'sourceFormat': source_format,
'sourceUris': source_uris,
'compression': compression,
'ignoreUnknownValues': ignore_unknown_values,
'schema': {'fields': schema_fields},
'maxBadRecords': max_bad_records,
'csvOptions': {
'skipLeadingRows': skip_leading_rows,
'fieldDelimiter': field_delimiter,
'quote': quote_character,
'allowQuotedNewlines': allow_quoted_newlines,
'allowJaggedRows': allow_jagged_rows,
'encoding': encoding,
},
},
'tableReference': {
'projectId': PROJECT_ID,
'datasetId': DATASET_ID,
'tableId': TABLE_ID,
},
'labels': labels,
"encryptionConfiguration": encryption_configuration,
}
mock_create.assert_called_once_with(
table_resource=body,
project_id=PROJECT_ID,
location=None,
exists_ok=True,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_update_table(self, mock_client, mock_table):
description_patched = 'Test description.'
expiration_time_patched = 2524608000000
friendly_name_patched = 'Test friendly name.'
labels_patched = {'label1': 'test1', 'label2': 'test2'}
schema_patched = [
{'name': 'id', 'type': 'STRING', 'mode': 'REQUIRED'},
{'name': 'name', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'balance', 'type': 'FLOAT', 'mode': 'NULLABLE'},
{'name': 'new_field', 'type': 'STRING', 'mode': 'NULLABLE'},
]
time_partitioning_patched = {'expirationMs': 10000000}
require_partition_filter_patched = True
view_patched = {
'query': "SELECT * FROM `test-project-id.test_dataset_id.test_table_prefix*` LIMIT 500",
'useLegacySql': False,
}
body = {
"tableReference": {
"projectId": PROJECT_ID,
"datasetId": DATASET_ID,
"tableId": TABLE_ID,
},
"description": description_patched,
"expirationTime": expiration_time_patched,
"friendlyName": friendly_name_patched,
"labels": labels_patched,
"schema": {"fields": schema_patched},
"timePartitioning": time_partitioning_patched,
"view": view_patched,
"requirePartitionFilter": require_partition_filter_patched,
}
fields = list(body.keys())
self.hook.update_table(
table_resource=body,
fields=fields,
dataset_id=DATASET_ID,
table_id=TABLE_ID,
project_id=PROJECT_ID,
)
mock_table.from_api_repr.assert_called_once_with(body)
mock_client.return_value.update_table.assert_called_once_with(
table=mock_table.from_api_repr.return_value, fields=fields
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_with_kms(self, mock_insert):
encryption_configuration = {"kms_key_name": "projects/p/locations/l/keyRings/k/cryptoKeys/c"}
self.hook.run_query(sql='query', encryption_configuration=encryption_configuration)
_, kwargs = mock_insert.call_args
assert (
kwargs["configuration"]['query']['destinationEncryptionConfiguration'] is encryption_configuration
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_copy_with_kms(self, mock_insert):
encryption_configuration = {"kms_key_name": "projects/p/locations/l/keyRings/k/cryptoKeys/c"}
self.hook.run_copy(
source_project_dataset_tables='p.d.st',
destination_project_dataset_table='p.d.dt',
encryption_configuration=encryption_configuration,
)
_, kwargs = mock_insert.call_args
assert (
kwargs["configuration"]['copy']['destinationEncryptionConfiguration'] is encryption_configuration
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_load_with_kms(self, mock_insert):
encryption_configuration = {"kms_key_name": "projects/p/locations/l/keyRings/k/cryptoKeys/c"}
self.hook.run_load(
destination_project_dataset_table='p.d.dt',
source_uris=['abc.csv'],
autodetect=True,
encryption_configuration=encryption_configuration,
)
_, kwargs = mock_insert.call_args
assert (
kwargs["configuration"]['load']['destinationEncryptionConfiguration'] is encryption_configuration
)
class TestBigQueryBaseCursorMethodsDeprecationWarning(unittest.TestCase):
@parameterized.expand(
[
("create_empty_table",),
("create_empty_dataset",),
("get_dataset_tables",),
("delete_dataset",),
("create_external_table",),
("patch_table",),
("insert_all",),
("update_dataset",),
("patch_dataset",),
("get_dataset_tables_list",),
("get_datasets_list",),
("get_dataset",),
("run_grant_dataset_view_access",),
("run_table_upsert",),
("run_table_delete",),
("get_tabledata",),
("get_schema",),
("poll_job_complete",),
("cancel_query",),
("run_with_configuration",),
("run_load",),
("run_copy",),
("run_extract",),
("run_query",),
]
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook")
def test_deprecation_warning(self, func_name, mock_bq_hook):
args, kwargs = [1], {"param1": "val1"}
new_path = re.escape(f"`airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.{func_name}`")
message_pattern = fr"This method is deprecated\.\s+Please use {new_path}"
message_regex = re.compile(message_pattern, re.MULTILINE)
mocked_func = getattr(mock_bq_hook, func_name)
bq_cursor = BigQueryCursor(mock.MagicMock(), PROJECT_ID, mock_bq_hook)
func = getattr(bq_cursor, func_name)
with pytest.warns(DeprecationWarning, match=message_regex):
_ = func(*args, **kwargs)
mocked_func.assert_called_once_with(*args, **kwargs)
assert re.search(f".*{new_path}.*", func.__doc__)
class TestBigQueryWithLabelsAndDescription(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_load_labels(self, mock_insert):
labels = {'label1': 'test1', 'label2': 'test2'}
self.hook.run_load(
destination_project_dataset_table='my_dataset.my_table',
schema_fields=[],
source_uris=[],
labels=labels,
)
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['load']['destinationTableProperties']['labels'] is labels
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_load_description(self, mock_insert):
description = "Test Description"
self.hook.run_load(
destination_project_dataset_table='my_dataset.my_table',
schema_fields=[],
source_uris=[],
description=description,
)
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['load']['destinationTableProperties']['description'] is description
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.create_empty_table")
def test_create_external_table_labels(self, mock_create):
labels = {'label1': 'test1', 'label2': 'test2'}
self.hook.create_external_table(
external_project_dataset_table='my_dataset.my_table',
schema_fields=[],
source_uris=[],
labels=labels,
)
_, kwargs = mock_create.call_args
self.assertDictEqual(kwargs['table_resource']['labels'], labels)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.create_empty_table")
def test_create_external_table_description(self, mock_create):
description = "Test Description"
self.hook.create_external_table(
external_project_dataset_table='my_dataset.my_table',
schema_fields=[],
source_uris=[],
description=description,
)
_, kwargs = mock_create.call_args
assert kwargs['table_resource']['description'] is description
| 42.402087
| 110
| 0.631674
|
import re
import unittest
from unittest import mock
import pytest
from google.cloud.bigquery import DEFAULT_RETRY, DatasetReference, Table, TableReference
from google.cloud.bigquery.dataset import AccessEntry, Dataset, DatasetListItem
from google.cloud.exceptions import NotFound
from parameterized import parameterized
from airflow import AirflowException
from airflow.providers.google.cloud.hooks.bigquery import (
BigQueryCursor,
BigQueryHook,
_api_resource_configs_duplication_check,
_cleanse_time_partitioning,
_split_tablename,
_validate_src_fmt_configs,
_validate_value,
)
PROJECT_ID = "bq-project"
CREDENTIALS = "bq-credentials"
DATASET_ID = "bq_dataset"
TABLE_ID = "bq_table"
PARTITION_ID = "20200101"
VIEW_ID = 'bq_view'
JOB_ID = "1234"
LOCATION = 'europe-north1'
TABLE_REFERENCE_REPR = {
'tableId': TABLE_ID,
'datasetId': DATASET_ID,
'projectId': PROJECT_ID,
}
TABLE_REFERENCE = TableReference.from_api_repr(TABLE_REFERENCE_REPR)
class _BigQueryBaseTestClass(unittest.TestCase):
def setUp(self) -> None:
class MockedBigQueryHook(BigQueryHook):
def _get_credentials_and_project_id(self):
return CREDENTIALS, PROJECT_ID
self.hook = MockedBigQueryHook()
class TestBigQueryHookMethods(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryConnection")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook._authorize")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.build")
def test_bigquery_client_creation(self, mock_build, mock_authorize, mock_bigquery_connection):
result = self.hook.get_conn()
mock_build.assert_called_once_with(
'bigquery', 'v2', http=mock_authorize.return_value, cache_discovery=False
)
mock_bigquery_connection.assert_called_once_with(
service=mock_build.return_value,
project_id=PROJECT_ID,
hook=self.hook,
use_legacy_sql=self.hook.use_legacy_sql,
location=self.hook.location,
num_retries=self.hook.num_retries,
)
assert mock_bigquery_connection.return_value == result
@mock.patch("airflow.providers.google.common.hooks.base_google.GoogleBaseHook.__init__")
def test_bigquery_bigquery_conn_id_deprecation_warning(
self,
mock_base_hook_init,
):
bigquery_conn_id = "bigquery conn id"
warning_message = (
"The bigquery_conn_id parameter has been deprecated. "
"You should pass the gcp_conn_id parameter."
)
with pytest.warns(DeprecationWarning) as warnings:
BigQueryHook(bigquery_conn_id=bigquery_conn_id)
mock_base_hook_init.assert_called_once_with(
delegate_to=None,
gcp_conn_id='bigquery conn id',
impersonation_chain=None,
)
assert warning_message == str(warnings[0].message)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_location_propagates_properly(self, run_with_config, _):
assert self.hook.location is None
self.hook.run_query(sql='select 1', location='US')
assert run_with_config.call_count == 1
assert self.hook.location == 'US'
def test_bigquery_insert_rows_not_implemented(self):
with pytest.raises(NotImplementedError):
self.hook.insert_rows(table="table", rows=[1, 2])
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_bigquery_table_exists_true(self, mock_client):
result = self.hook.table_exists(project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID)
mock_client.return_value.get_table.assert_called_once_with(TABLE_REFERENCE)
mock_client.assert_called_once_with(project_id=PROJECT_ID)
assert result is True
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_bigquery_table_exists_false(self, mock_client):
mock_client.return_value.get_table.side_effect = NotFound("Dataset not found")
result = self.hook.table_exists(project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID)
mock_client.return_value.get_table.assert_called_once_with(TABLE_REFERENCE)
mock_client.assert_called_once_with(project_id=PROJECT_ID)
assert result is False
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_bigquery_table_partition_exists_true(self, mock_client):
mock_client.return_value.list_partitions.return_value = [PARTITION_ID]
result = self.hook.table_partition_exists(
project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID, partition_id=PARTITION_ID
)
mock_client.return_value.list_partitions.assert_called_once_with(TABLE_REFERENCE)
mock_client.assert_called_once_with(project_id=PROJECT_ID)
assert result is True
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_bigquery_table_partition_exists_false_no_table(self, mock_client):
mock_client.return_value.get_table.side_effect = NotFound("Dataset not found")
result = self.hook.table_partition_exists(
project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID, partition_id=PARTITION_ID
)
mock_client.return_value.list_partitions.assert_called_once_with(TABLE_REFERENCE)
mock_client.assert_called_once_with(project_id=PROJECT_ID)
assert result is False
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_bigquery_table_partition_exists_false_no_partition(self, mock_client):
mock_client.return_value.list_partitions.return_value = []
result = self.hook.table_partition_exists(
project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID, partition_id=PARTITION_ID
)
mock_client.return_value.list_partitions.assert_called_once_with(TABLE_REFERENCE)
mock_client.assert_called_once_with(project_id=PROJECT_ID)
assert result is False
@mock.patch('airflow.providers.google.cloud.hooks.bigquery.read_gbq')
def test_get_pandas_df(self, mock_read_gbq):
self.hook.get_pandas_df('select 1')
mock_read_gbq.assert_called_once_with(
'select 1', credentials=CREDENTIALS, dialect='legacy', project_id=PROJECT_ID, verbose=False
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_invalid_schema_update_options(self, mock_get_service):
with pytest.raises(
Exception,
match=(
r"\['THIS IS NOT VALID'\] contains invalid schema update options."
r"Please only use one or more of the following options: "
r"\['ALLOW_FIELD_ADDITION', 'ALLOW_FIELD_RELAXATION'\]"
),
):
self.hook.run_load(
"test.test",
"test_schema.json",
["test_data.json"],
schema_update_options=["THIS IS NOT VALID"],
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_invalid_schema_update_and_write_disposition(self, mock_get_service):
with pytest.raises(
Exception,
match="schema_update_options is only allowed if"
" write_disposition is 'WRITE_APPEND' or 'WRITE_TRUNCATE'.",
):
self.hook.run_load(
"test.test",
"test_schema.json",
["test_data.json"],
schema_update_options=['ALLOW_FIELD_ADDITION'],
write_disposition='WRITE_EMPTY',
)
@mock.patch(
"airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.poll_job_complete",
side_effect=[False, True],
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_cancel_queries(self, mock_client, mock_poll_job_complete):
running_job_id = 3
self.hook.running_job_id = running_job_id
self.hook.cancel_query()
mock_poll_job_complete.has_calls(mock.call(running_job_id), mock.call(running_job_id))
mock_client.assert_called_once_with(project_id=PROJECT_ID, location=None)
mock_client.return_value.cancel_job.assert_called_once_with(job_id=running_job_id)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_sql_dialect_default(
self,
mock_insert,
_,
):
self.hook.run_query('query')
_, kwargs = mock_insert.call_args
assert kwargs['configuration']['query']['useLegacySql'] is True
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_sql_dialect(self, mock_insert, _):
self.hook.run_query('query', use_legacy_sql=False)
_, kwargs = mock_insert.call_args
assert kwargs['configuration']['query']['useLegacySql'] is False
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_sql_dialect_legacy_with_query_params(self, mock_insert, _):
params = [
{
'name': "param_name",
'parameterType': {'type': "STRING"},
'parameterValue': {'value': "param_value"},
}
]
self.hook.run_query('query', use_legacy_sql=False, query_params=params)
_, kwargs = mock_insert.call_args
assert kwargs['configuration']['query']['useLegacySql'] is False
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_run_query_sql_dialect_legacy_with_query_params_fails(self, _):
params = [
{
'name': "param_name",
'parameterType': {'type': "STRING"},
'parameterValue': {'value': "param_value"},
}
]
with pytest.raises(ValueError, match="Query parameters are not allowed when using legacy SQL"):
self.hook.run_query('query', use_legacy_sql=True, query_params=params)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_run_query_without_sql_fails(self, _):
with pytest.raises(
TypeError, match=r"`BigQueryBaseCursor.run_query` missing 1 required positional argument: `sql`"
):
self.hook.run_query(sql=None)
@parameterized.expand(
[
(['ALLOW_FIELD_ADDITION'], 'WRITE_APPEND'),
(['ALLOW_FIELD_RELAXATION'], 'WRITE_APPEND'),
(['ALLOW_FIELD_ADDITION', 'ALLOW_FIELD_RELAXATION'], 'WRITE_APPEND'),
(['ALLOW_FIELD_ADDITION'], 'WRITE_TRUNCATE'),
(['ALLOW_FIELD_RELAXATION'], 'WRITE_TRUNCATE'),
(['ALLOW_FIELD_ADDITION', 'ALLOW_FIELD_RELAXATION'], 'WRITE_TRUNCATE'),
]
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_schema_update_options(
self,
schema_update_options,
write_disposition,
mock_insert,
mock_get_service,
):
self.hook.run_query(
sql='query',
destination_dataset_table='my_dataset.my_table',
schema_update_options=schema_update_options,
write_disposition=write_disposition,
)
_, kwargs = mock_insert.call_args
assert kwargs['configuration']['query']['schemaUpdateOptions'] == schema_update_options
assert kwargs['configuration']['query']['writeDisposition'] == write_disposition
@parameterized.expand(
[
(
['INCORRECT_OPTION'],
None,
r"\['INCORRECT_OPTION'\] contains invalid schema update options\. "
r"Please only use one or more of the following options: "
r"\['ALLOW_FIELD_ADDITION', 'ALLOW_FIELD_RELAXATION'\]",
),
(
['ALLOW_FIELD_ADDITION', 'ALLOW_FIELD_RELAXATION', 'INCORRECT_OPTION'],
None,
r"\['ALLOW_FIELD_ADDITION', 'ALLOW_FIELD_RELAXATION', 'INCORRECT_OPTION'\] contains invalid "
r"schema update options\. Please only use one or more of the following options: "
r"\['ALLOW_FIELD_ADDITION', 'ALLOW_FIELD_RELAXATION'\]",
),
(
['ALLOW_FIELD_ADDITION'],
None,
r"schema_update_options is only allowed if write_disposition is "
r"'WRITE_APPEND' or 'WRITE_TRUNCATE'",
),
]
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_run_query_schema_update_options_incorrect(
self,
schema_update_options,
write_disposition,
expected_regex,
mock_get_service,
):
with pytest.raises(ValueError, match=expected_regex):
self.hook.run_query(
sql='query',
destination_dataset_table='my_dataset.my_table',
schema_update_options=schema_update_options,
write_disposition=write_disposition,
)
@parameterized.expand([(True,), (False,)])
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_api_resource_configs(
self,
bool_val,
mock_insert,
_,
):
self.hook.run_query('query', api_resource_configs={'query': {'useQueryCache': bool_val}})
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['query']['useQueryCache'] is bool_val
assert kwargs["configuration"]['query']['useLegacySql'] is True
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_api_resource_configs_duplication_warning(self, mock_get_service):
with pytest.raises(
ValueError,
match=(
r"Values of useLegacySql param are duplicated\. api_resource_configs "
r"contained useLegacySql param in `query` config and useLegacySql was "
r"also provided with arg to run_query\(\) method\. Please remove duplicates\."
),
):
self.hook.run_query(
'query', use_legacy_sql=True, api_resource_configs={'query': {'useLegacySql': False}}
)
def test_validate_value(self):
with pytest.raises(
TypeError, match="case_1 argument must have a type <class 'dict'> not <class 'str'>"
):
_validate_value("case_1", "a", dict)
assert _validate_value("case_2", 0, int) is None
def test_duplication_check(self):
with pytest.raises(
ValueError,
match=r"Values of key_one param are duplicated. api_resource_configs contained key_one param in"
r" `query` config and key_one was also provided with arg to run_query\(\) method. "
r"Please remove duplicates.",
):
key_one = True
_api_resource_configs_duplication_check("key_one", key_one, {"key_one": False})
assert _api_resource_configs_duplication_check("key_one", key_one, {"key_one": True}) is None
def test_validate_src_fmt_configs(self):
source_format = "test_format"
valid_configs = ["test_config_known", "compatibility_val"]
backward_compatibility_configs = {"compatibility_val": "val"}
with pytest.raises(
ValueError, match="test_config_unknown is not a valid src_fmt_configs for type test_format."
):
src_fmt_configs = {"test_config_unknown": "val"}
_validate_src_fmt_configs(
source_format, src_fmt_configs, valid_configs, backward_compatibility_configs
)
src_fmt_configs = {"test_config_known": "val"}
src_fmt_configs = _validate_src_fmt_configs(
source_format, src_fmt_configs, valid_configs, backward_compatibility_configs
)
assert (
"test_config_known" in src_fmt_configs
), "src_fmt_configs should contain al known src_fmt_configs"
assert (
"compatibility_val" in src_fmt_configs
), "_validate_src_fmt_configs should add backward_compatibility config"
@parameterized.expand([("AVRO",), ("PARQUET",), ("NEWLINE_DELIMITED_JSON",), ("DATASTORE_BACKUP",)])
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_load_with_non_csv_as_src_fmt(self, fmt, _):
try:
self.hook.run_load(
destination_project_dataset_table='my_dataset.my_table',
source_uris=[],
source_format=fmt,
autodetect=True,
)
except ValueError:
self.fail("run_load() raised ValueError unexpectedly!")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_extract(self, mock_insert):
source_project_dataset_table = f"{PROJECT_ID}.{DATASET_ID}.{TABLE_ID}"
destination_cloud_storage_uris = ["gs://bucket/file.csv"]
expected_configuration = {
"extract": {
"sourceTable": {
"projectId": PROJECT_ID,
"datasetId": DATASET_ID,
"tableId": TABLE_ID,
},
"compression": "NONE",
"destinationUris": destination_cloud_storage_uris,
"destinationFormat": "CSV",
"fieldDelimiter": ",",
"printHeader": True,
}
}
self.hook.run_extract(
source_project_dataset_table=source_project_dataset_table,
destination_cloud_storage_uris=destination_cloud_storage_uris,
)
mock_insert.assert_called_once_with(configuration=expected_configuration, project_id=PROJECT_ID)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.SchemaField")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_list_rows(self, mock_client, mock_schema, mock_table):
self.hook.list_rows(
dataset_id=DATASET_ID,
table_id=TABLE_ID,
max_results=10,
selected_fields=["field_1", "field_2"],
page_token="page123",
start_index=5,
location=LOCATION,
)
mock_table.from_api_repr.assert_called_once_with({"tableReference": TABLE_REFERENCE_REPR})
mock_schema.has_calls([mock.call(x, "") for x in ["field_1", "field_2"]])
mock_client.return_value.list_rows.assert_called_once_with(
table=mock_table.from_api_repr.return_value,
max_results=10,
selected_fields=mock.ANY,
page_token='page123',
start_index=5,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_list_rows_with_empty_selected_fields(self, mock_client, mock_table):
self.hook.list_rows(
dataset_id=DATASET_ID,
table_id=TABLE_ID,
max_results=10,
page_token="page123",
selected_fields=[],
start_index=5,
location=LOCATION,
)
mock_table.from_api_repr.assert_called_once_with({"tableReference": TABLE_REFERENCE_REPR})
mock_client.return_value.list_rows.assert_called_once_with(
table=mock_table.from_api_repr.return_value,
max_results=10,
page_token='page123',
selected_fields=None,
start_index=5,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_run_table_delete(self, mock_client, mock_table):
source_project_dataset_table = f"{PROJECT_ID}.{DATASET_ID}.{TABLE_ID}"
self.hook.run_table_delete(source_project_dataset_table, ignore_if_missing=False)
mock_table.from_string.assert_called_once_with(source_project_dataset_table)
mock_client.return_value.delete_table.assert_called_once_with(
table=mock_table.from_string.return_value, not_found_ok=False
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.create_empty_table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset_tables")
def test_table_upsert_create_new_table(self, mock_get, mock_create):
table_resource = {"tableReference": {"tableId": TABLE_ID}}
mock_get.return_value = []
self.hook.run_table_upsert(dataset_id=DATASET_ID, table_resource=table_resource)
mock_get.assert_called_once_with(project_id=PROJECT_ID, dataset_id=DATASET_ID)
mock_create.assert_called_once_with(table_resource=table_resource, project_id=PROJECT_ID)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.update_table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset_tables")
def test_table_upsert_already_exists(self, mock_get, mock_update):
table_resource = {"tableReference": {"tableId": TABLE_ID}}
mock_get.return_value = [{"tableId": TABLE_ID}]
self.hook.run_table_upsert(dataset_id=DATASET_ID, table_resource=table_resource)
mock_get.assert_called_once_with(project_id=PROJECT_ID, dataset_id=DATASET_ID)
mock_update.assert_called_once_with(table_resource=table_resource)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.update_dataset")
def test_run_grant_dataset_view_access_granting(self, mock_update, mock_get):
view_table = f"{TABLE_ID}_view"
view_dataset = f"{DATASET_ID}_view"
view_access = AccessEntry(
role=None,
entity_type="view",
entity_id={'projectId': PROJECT_ID, 'datasetId': view_dataset, 'tableId': view_table},
)
dataset = Dataset(DatasetReference.from_string(DATASET_ID, PROJECT_ID))
dataset.access_entries = []
mock_get.return_value = dataset
self.hook.run_grant_dataset_view_access(
source_dataset=DATASET_ID, view_dataset=view_dataset, view_table=view_table
)
mock_get.assert_called_once_with(project_id=PROJECT_ID, dataset_id=DATASET_ID)
assert view_access in dataset.access_entries
mock_update.assert_called_once_with(
fields=["access"],
dataset_resource=dataset.to_api_repr(),
project_id=PROJECT_ID,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.update_dataset")
def test_run_grant_dataset_view_access_already_granted(self, mock_update, mock_get):
view_table = f"{TABLE_ID}_view"
view_dataset = f"{DATASET_ID}_view"
view_access = AccessEntry(
role=None,
entity_type="view",
entity_id={'projectId': PROJECT_ID, 'datasetId': view_dataset, 'tableId': view_table},
)
dataset = Dataset(DatasetReference.from_string(DATASET_ID, PROJECT_ID))
dataset.access_entries = [view_access]
mock_get.return_value = dataset
self.hook.run_grant_dataset_view_access(
source_dataset=DATASET_ID, view_dataset=view_dataset, view_table=view_table
)
mock_get.assert_called_once_with(project_id=PROJECT_ID, dataset_id=DATASET_ID)
assert len(mock_update.calls) == 0
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_get_dataset_tables_list(self, mock_client):
table_list = [
{"projectId": PROJECT_ID, "datasetId": DATASET_ID, "tableId": "a-1"},
{"projectId": PROJECT_ID, "datasetId": DATASET_ID, "tableId": "b-1"},
{"projectId": PROJECT_ID, "datasetId": DATASET_ID, "tableId": "a-2"},
{"projectId": PROJECT_ID, "datasetId": DATASET_ID, "tableId": "b-2"},
]
table_list_response = [Table.from_api_repr({"tableReference": t}) for t in table_list]
mock_client.return_value.list_tables.return_value = table_list_response
dataset_reference = DatasetReference(PROJECT_ID, DATASET_ID)
result = self.hook.get_dataset_tables_list(dataset_id=DATASET_ID, project_id=PROJECT_ID)
mock_client.return_value.list_tables.assert_called_once_with(
dataset=dataset_reference, max_results=None
)
assert table_list == result
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_poll_job_complete(self, mock_client):
self.hook.poll_job_complete(job_id=JOB_ID, location=LOCATION, project_id=PROJECT_ID)
mock_client.assert_called_once_with(location=LOCATION, project_id=PROJECT_ID)
mock_client.return_value.get_job.assert_called_once_with(job_id=JOB_ID)
mock_client.return_value.get_job.return_value.done.assert_called_once_with(retry=DEFAULT_RETRY)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.poll_job_complete")
@mock.patch("logging.Logger.info")
def test_cancel_query_jobs_to_cancel(
self,
mock_logger_info,
poll_job_complete,
):
poll_job_complete.return_value = True
self.hook.running_job_id = JOB_ID
self.hook.cancel_query()
poll_job_complete.assert_called_once_with(job_id=JOB_ID)
mock_logger_info.has_call(mock.call("No running BigQuery jobs to cancel."))
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.poll_job_complete")
@mock.patch("time.sleep")
@mock.patch("logging.Logger.info")
def test_cancel_query_cancel_timeout(
self,
mock_logger_info,
mock_sleep,
poll_job_complete,
mock_client,
):
poll_job_complete.side_effect = [False] * 13
self.hook.running_job_id = JOB_ID
self.hook.cancel_query()
mock_client.return_value.cancel_job.assert_called_once_with(job_id=JOB_ID)
assert poll_job_complete.call_count == 13
assert mock_sleep.call_count == 11
mock_logger_info.has_call(
mock.call(
f"Stopping polling due to timeout. Job with id {JOB_ID} "
"has not completed cancel and may or may not finish."
)
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.poll_job_complete")
@mock.patch("time.sleep")
@mock.patch("logging.Logger.info")
def test_cancel_query_cancel_completed(
self,
mock_logger_info,
mock_sleep,
poll_job_complete,
mock_client,
):
poll_job_complete.side_effect = [False] * 12 + [True]
self.hook.running_job_id = JOB_ID
self.hook.cancel_query()
mock_client.return_value.cancel_job.assert_called_once_with(job_id=JOB_ID)
assert poll_job_complete.call_count == 13
assert mock_sleep.call_count == 11
mock_logger_info.has_call(mock.call(f"Job successfully canceled: {PROJECT_ID}, {PROJECT_ID}"))
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_get_schema(self, mock_client):
table = {
"tableReference": TABLE_REFERENCE_REPR,
"schema": {
"fields": [
{'name': 'id', 'type': 'STRING', 'mode': 'REQUIRED'},
{'name': 'name', 'type': 'STRING', 'mode': 'NULLABLE'},
]
},
}
mock_client.return_value.get_table.return_value = Table.from_api_repr(table)
result = self.hook.get_schema(dataset_id=DATASET_ID, table_id=TABLE_ID)
mock_client.return_value.get_table.assert_called_once_with(TABLE_REFERENCE)
assert "fields" in result
assert len(result["fields"]) == 2
@mock.patch('airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_schema')
@mock.patch('airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.update_table')
def test_update_table_schema_with_policy_tags(self, mock_update, mock_get_schema):
mock_get_schema.return_value = {
"fields": [
{'name': 'emp_name', 'type': 'STRING', 'mode': 'REQUIRED'},
{
'name': 'salary',
'type': 'INTEGER',
'mode': 'REQUIRED',
'policyTags': {'names': ['sensitive']},
},
{'name': 'not_changed', 'type': 'INTEGER', 'mode': 'REQUIRED'},
{
'name': 'subrecord',
'type': 'RECORD',
'mode': 'REQUIRED',
'fields': [
{
'name': 'field_1',
'type': 'STRING',
'mode': 'REQUIRED',
'policyTags': {'names': ['sensitive']},
},
],
},
]
}
schema_fields_updates = [
{'name': 'emp_name', 'description': 'Name of employee', 'policyTags': {'names': ['sensitive']}},
{
'name': 'salary',
'description': 'Monthly salary in USD',
'policyTags': {},
},
{
'name': 'subrecord',
'description': 'Some Desc',
'fields': [
{'name': 'field_1', 'description': 'Some nested desc'},
],
},
]
expected_result_schema = {
'fields': [
{
'name': 'emp_name',
'type': 'STRING',
'mode': 'REQUIRED',
'description': 'Name of employee',
'policyTags': {'names': ['sensitive']},
},
{
'name': 'salary',
'type': 'INTEGER',
'mode': 'REQUIRED',
'description': 'Monthly salary in USD',
'policyTags': {},
},
{'name': 'not_changed', 'type': 'INTEGER', 'mode': 'REQUIRED'},
{
'name': 'subrecord',
'type': 'RECORD',
'mode': 'REQUIRED',
'description': 'Some Desc',
'fields': [
{
'name': 'field_1',
'type': 'STRING',
'mode': 'REQUIRED',
'description': 'Some nested desc',
'policyTags': {'names': ['sensitive']},
}
],
},
]
}
self.hook.update_table_schema(
schema_fields_updates=schema_fields_updates,
include_policy_tags=True,
dataset_id=DATASET_ID,
table_id=TABLE_ID,
)
mock_update.assert_called_once_with(
dataset_id=DATASET_ID,
table_id=TABLE_ID,
project_id=PROJECT_ID,
table_resource={'schema': expected_result_schema},
fields=['schema'],
)
@mock.patch('airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_schema')
@mock.patch('airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.update_table')
def test_update_table_schema_without_policy_tags(self, mock_update, mock_get_schema):
mock_get_schema.return_value = {
"fields": [
{'name': 'emp_name', 'type': 'STRING', 'mode': 'REQUIRED'},
{'name': 'salary', 'type': 'INTEGER', 'mode': 'REQUIRED'},
{'name': 'not_changed', 'type': 'INTEGER', 'mode': 'REQUIRED'},
{
'name': 'subrecord',
'type': 'RECORD',
'mode': 'REQUIRED',
'fields': [
{'name': 'field_1', 'type': 'STRING', 'mode': 'REQUIRED'},
],
},
]
}
schema_fields_updates = [
{'name': 'emp_name', 'description': 'Name of employee'},
{
'name': 'salary',
'description': 'Monthly salary in USD',
'policyTags': {'names': ['sensitive']},
},
{
'name': 'subrecord',
'description': 'Some Desc',
'fields': [
{'name': 'field_1', 'description': 'Some nested desc'},
],
},
]
expected_result_schema = {
'fields': [
{'name': 'emp_name', 'type': 'STRING', 'mode': 'REQUIRED', 'description': 'Name of employee'},
{
'name': 'salary',
'type': 'INTEGER',
'mode': 'REQUIRED',
'description': 'Monthly salary in USD',
},
{'name': 'not_changed', 'type': 'INTEGER', 'mode': 'REQUIRED'},
{
'name': 'subrecord',
'type': 'RECORD',
'mode': 'REQUIRED',
'description': 'Some Desc',
'fields': [
{
'name': 'field_1',
'type': 'STRING',
'mode': 'REQUIRED',
'description': 'Some nested desc',
}
],
},
]
}
self.hook.update_table_schema(
schema_fields_updates=schema_fields_updates,
include_policy_tags=False,
dataset_id=DATASET_ID,
table_id=TABLE_ID,
)
mock_update.assert_called_once_with(
dataset_id=DATASET_ID,
table_id=TABLE_ID,
project_id=PROJECT_ID,
table_resource={'schema': expected_result_schema},
fields=['schema'],
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_invalid_source_format(self, mock_get_service):
with pytest.raises(
Exception,
match=r"JSON is not a valid source format. Please use one of the following types: \['CSV', "
r"'NEWLINE_DELIMITED_JSON', 'AVRO', 'GOOGLE_SHEETS', 'DATASTORE_BACKUP', 'PARQUET'\]",
):
self.hook.run_load("test.test", "test_schema.json", ["test_data.json"], source_format="json")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_insert_all_succeed(self, mock_client):
rows = [{"json": {"a_key": "a_value_0"}}]
self.hook.insert_all(
project_id=PROJECT_ID,
dataset_id=DATASET_ID,
table_id=TABLE_ID,
rows=rows,
ignore_unknown_values=True,
skip_invalid_rows=True,
)
mock_client.return_value.get_table.assert_called_once_with(TABLE_REFERENCE)
mock_client.return_value.insert_rows.assert_called_once_with(
table=mock_client.return_value.get_table.return_value,
rows=rows,
ignore_unknown_values=True,
skip_invalid_rows=True,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_insert_all_fail(self, mock_client):
rows = [{"json": {"a_key": "a_value_0"}}]
mock_client.return_value.insert_rows.return_value = ["some", "errors"]
with pytest.raises(AirflowException, match="insert error"):
self.hook.insert_all(
project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID, rows=rows, fail_on_error=True
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_with_arg(self, mock_insert):
self.hook.run_query(
sql='select 1',
destination_dataset_table='my_dataset.my_table',
labels={'label1': 'test1', 'label2': 'test2'},
)
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['labels'] == {'label1': 'test1', 'label2': 'test2'}
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.QueryJob")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_insert_job(self, mock_client, mock_query_job):
job_conf = {
"query": {
"query": "SELECT * FROM test",
"useLegacySql": "False",
}
}
mock_query_job._JOB_TYPE = "query"
self.hook.insert_job(
configuration=job_conf,
job_id=JOB_ID,
project_id=PROJECT_ID,
location=LOCATION,
)
mock_client.assert_called_once_with(
project_id=PROJECT_ID,
location=LOCATION,
)
mock_query_job.from_api_repr.assert_called_once_with(
{
'configuration': job_conf,
'jobReference': {'jobId': JOB_ID, 'projectId': PROJECT_ID, 'location': LOCATION},
},
mock_client.return_value,
)
mock_query_job.from_api_repr.return_value.result.assert_called_once_with()
class TestBigQueryTableSplitter(unittest.TestCase):
def test_internal_need_default_project(self):
with pytest.raises(Exception, match="INTERNAL: No default project is specified"):
_split_tablename("dataset.table", None)
@parameterized.expand(
[
("project", "dataset", "table", "dataset.table"),
("alternative", "dataset", "table", "alternative:dataset.table"),
("alternative", "dataset", "table", "alternative.dataset.table"),
("alt1:alt", "dataset", "table", "alt1:alt.dataset.table"),
("alt1:alt", "dataset", "table", "alt1:alt:dataset.table"),
]
)
def test_split_tablename(self, project_expected, dataset_expected, table_expected, table_input):
default_project_id = "project"
project, dataset, table = _split_tablename(table_input, default_project_id)
assert project_expected == project
assert dataset_expected == dataset
assert table_expected == table
@parameterized.expand(
[
("alt1:alt2:alt3:dataset.table", None, "Use either : or . to specify project got {}"),
(
"alt1.alt.dataset.table",
None,
r"Expect format of \(<project\.\|<project\:\)<dataset>\.<table>, got {}",
),
(
"alt1:alt2:alt.dataset.table",
"var_x",
"Format exception for var_x: Use either : or . to specify project got {}",
),
(
"alt1:alt2:alt:dataset.table",
"var_x",
"Format exception for var_x: Use either : or . to specify project got {}",
),
(
"alt1.alt.dataset.table",
"var_x",
r"Format exception for var_x: Expect format of "
r"\(<project\.\|<project:\)<dataset>.<table>, got {}",
),
]
)
def test_invalid_syntax(self, table_input, var_name, exception_message):
default_project_id = "project"
with pytest.raises(Exception, match=exception_message.format(table_input)):
_split_tablename(table_input, default_project_id, var_name)
class TestTableOperations(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_create_view(self, mock_bq_client, mock_table):
view = {
'query': 'SELECT * FROM `test-project-id.test_dataset_id.test_table_prefix*`',
"useLegacySql": False,
}
self.hook.create_empty_table(
project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID, view=view, retry=DEFAULT_RETRY
)
body = {'tableReference': TABLE_REFERENCE_REPR, 'view': view}
mock_table.from_api_repr.assert_called_once_with(body)
mock_bq_client.return_value.create_table.assert_called_once_with(
table=mock_table.from_api_repr.return_value,
exists_ok=True,
retry=DEFAULT_RETRY,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_patch_table(self, mock_client, mock_table):
description_patched = 'Test description.'
expiration_time_patched = 2524608000000
friendly_name_patched = 'Test friendly name.'
labels_patched = {'label1': 'test1', 'label2': 'test2'}
schema_patched = [
{'name': 'id', 'type': 'STRING', 'mode': 'REQUIRED'},
{'name': 'name', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'balance', 'type': 'FLOAT', 'mode': 'NULLABLE'},
{'name': 'new_field', 'type': 'STRING', 'mode': 'NULLABLE'},
]
time_partitioning_patched = {'expirationMs': 10000000}
require_partition_filter_patched = True
view_patched = {
'query': "SELECT * FROM `test-project-id.test_dataset_id.test_table_prefix*` LIMIT 500",
'useLegacySql': False,
}
self.hook.patch_table(
dataset_id=DATASET_ID,
table_id=TABLE_ID,
project_id=PROJECT_ID,
description=description_patched,
expiration_time=expiration_time_patched,
friendly_name=friendly_name_patched,
labels=labels_patched,
schema=schema_patched,
time_partitioning=time_partitioning_patched,
require_partition_filter=require_partition_filter_patched,
view=view_patched,
)
body = {
"description": description_patched,
"expirationTime": expiration_time_patched,
"friendlyName": friendly_name_patched,
"labels": labels_patched,
"schema": {"fields": schema_patched},
"timePartitioning": time_partitioning_patched,
"view": view_patched,
"requirePartitionFilter": require_partition_filter_patched,
}
fields = list(body.keys())
body["tableReference"] = TABLE_REFERENCE_REPR
mock_table.from_api_repr.assert_called_once_with(body)
mock_client.return_value.update_table.assert_called_once_with(
table=mock_table.from_api_repr.return_value, fields=fields
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_create_empty_table_succeed(self, mock_bq_client, mock_table):
self.hook.create_empty_table(project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID)
body = {
'tableReference': {
'tableId': TABLE_ID,
'projectId': PROJECT_ID,
'datasetId': DATASET_ID,
}
}
mock_table.from_api_repr.assert_called_once_with(body)
mock_bq_client.return_value.create_table.assert_called_once_with(
table=mock_table.from_api_repr.return_value, exists_ok=True, retry=DEFAULT_RETRY
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_create_empty_table_with_extras_succeed(self, mock_bq_client, mock_table):
schema_fields = [
{'name': 'id', 'type': 'STRING', 'mode': 'REQUIRED'},
{'name': 'name', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'created', 'type': 'DATE', 'mode': 'REQUIRED'},
]
time_partitioning = {"field": "created", "type": "DAY"}
cluster_fields = ['name']
self.hook.create_empty_table(
project_id=PROJECT_ID,
dataset_id=DATASET_ID,
table_id=TABLE_ID,
schema_fields=schema_fields,
time_partitioning=time_partitioning,
cluster_fields=cluster_fields,
)
body = {
'tableReference': {
'tableId': TABLE_ID,
'projectId': PROJECT_ID,
'datasetId': DATASET_ID,
},
'schema': {'fields': schema_fields},
'timePartitioning': time_partitioning,
'clustering': {'fields': cluster_fields},
}
mock_table.from_api_repr.assert_called_once_with(body)
mock_bq_client.return_value.create_table.assert_called_once_with(
table=mock_table.from_api_repr.return_value, exists_ok=True, retry=DEFAULT_RETRY
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_get_tables_list(self, mock_client):
table_list = [
{
"kind": "bigquery#table",
"id": "your-project:your_dataset.table1",
"tableReference": {
"projectId": "your-project",
"datasetId": "your_dataset",
"tableId": "table1",
},
"type": "TABLE",
"creationTime": "1565781859261",
},
{
"kind": "bigquery#table",
"id": "your-project:your_dataset.table2",
"tableReference": {
"projectId": "your-project",
"datasetId": "your_dataset",
"tableId": "table2",
},
"type": "TABLE",
"creationTime": "1565782713480",
},
]
table_list_response = [Table.from_api_repr(t) for t in table_list]
mock_client.return_value.list_tables.return_value = table_list_response
dataset_reference = DatasetReference(PROJECT_ID, DATASET_ID)
result = self.hook.get_dataset_tables(dataset_id=DATASET_ID, project_id=PROJECT_ID)
mock_client.return_value.list_tables.assert_called_once_with(
dataset=dataset_reference,
max_results=None,
retry=DEFAULT_RETRY,
)
for res, exp in zip(result, table_list):
assert res["tableId"] == exp["tableReference"]["tableId"]
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_create_materialized_view(self, mock_bq_client, mock_table):
query = """
SELECT product, SUM(amount)
FROM `test-project-id.test_dataset_id.test_table_prefix*`
GROUP BY product
"""
materialized_view = {
'query': query,
'enableRefresh': True,
'refreshIntervalMs': 2000000,
}
self.hook.create_empty_table(
project_id=PROJECT_ID,
dataset_id=DATASET_ID,
table_id=TABLE_ID,
materialized_view=materialized_view,
retry=DEFAULT_RETRY,
)
body = {'tableReference': TABLE_REFERENCE_REPR, 'materializedView': materialized_view}
mock_table.from_api_repr.assert_called_once_with(body)
mock_bq_client.return_value.create_table.assert_called_once_with(
table=mock_table.from_api_repr.return_value,
exists_ok=True,
retry=DEFAULT_RETRY,
)
class TestBigQueryCursor(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_execute_with_parameters(self, mock_insert, _):
bq_cursor = self.hook.get_cursor()
bq_cursor.execute("SELECT %(foo)s", {"foo": "bar"})
conf = {
'query': {
'query': "SELECT 'bar'",
'priority': 'INTERACTIVE',
'useLegacySql': True,
'schemaUpdateOptions': [],
}
}
mock_insert.assert_called_once_with(configuration=conf, project_id=PROJECT_ID)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_execute_many(self, mock_insert, _):
bq_cursor = self.hook.get_cursor()
bq_cursor.executemany("SELECT %(foo)s", [{"foo": "bar"}, {"foo": "baz"}])
assert mock_insert.call_count == 2
assert mock_insert.has_calls(
mock.call(
configuration={
'query': {
'query': "SELECT 'bar'",
'priority': 'INTERACTIVE',
'useLegacySql': True,
'schemaUpdateOptions': [],
}
},
project_id=PROJECT_ID,
),
mock.call(
configuration={
'query': {
'query': "SELECT 'baz'",
'priority': 'INTERACTIVE',
'useLegacySql': True,
'schemaUpdateOptions': [],
}
},
project_id=PROJECT_ID,
),
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_description(self, mock_get_service):
bq_cursor = self.hook.get_cursor()
with pytest.raises(NotImplementedError):
bq_cursor.description
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_close(self, mock_get_service):
bq_cursor = self.hook.get_cursor()
result = bq_cursor.close()
assert result is None
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_rowcount(self, mock_get_service):
bq_cursor = self.hook.get_cursor()
result = bq_cursor.rowcount
assert -1 == result
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryCursor.next")
def test_fetchone(self, mock_next, mock_get_service):
bq_cursor = self.hook.get_cursor()
result = bq_cursor.fetchone()
mock_next.call_count == 1
assert mock_next.return_value == result
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch(
"airflow.providers.google.cloud.hooks.bigquery.BigQueryCursor.fetchone", side_effect=[1, 2, 3, None]
)
def test_fetchall(self, mock_fetchone, mock_get_service):
bq_cursor = self.hook.get_cursor()
result = bq_cursor.fetchall()
assert [1, 2, 3] == result
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryCursor.fetchone")
def test_fetchmany(self, mock_fetchone, mock_get_service):
side_effect_values = [1, 2, 3, None]
bq_cursor = self.hook.get_cursor()
mock_fetchone.side_effect = side_effect_values
result = bq_cursor.fetchmany()
assert [1] == result
mock_fetchone.side_effect = side_effect_values
result = bq_cursor.fetchmany(2)
assert [1, 2] == result
mock_fetchone.side_effect = side_effect_values
result = bq_cursor.fetchmany(5)
assert [1, 2, 3] == result
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_next_no_jobid(self, mock_get_service):
bq_cursor = self.hook.get_cursor()
bq_cursor.job_id = None
result = bq_cursor.next()
assert result is None
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_next_buffer(self, mock_get_service):
bq_cursor = self.hook.get_cursor()
bq_cursor.job_id = JOB_ID
bq_cursor.buffer = [1, 2]
result = bq_cursor.next()
assert 1 == result
result = bq_cursor.next()
assert 2 == result
bq_cursor.all_pages_loaded = True
result = bq_cursor.next()
assert result is None
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_next(self, mock_get_service):
mock_get_query_results = mock_get_service.return_value.jobs.return_value.getQueryResults
mock_execute = mock_get_query_results.return_value.execute
mock_execute.return_value = {
"rows": [
{"f": [{"v": "one"}, {"v": 1}]},
{"f": [{"v": "two"}, {"v": 2}]},
],
"pageToken": None,
"schema": {
"fields": [
{"name": "field_1", "type": "STRING"},
{"name": "field_2", "type": "INTEGER"},
]
},
}
bq_cursor = self.hook.get_cursor()
bq_cursor.job_id = JOB_ID
bq_cursor.location = LOCATION
result = bq_cursor.next()
assert ['one', 1] == result
result = bq_cursor.next()
assert ['two', 2] == result
mock_get_query_results.assert_called_once_with(
jobId=JOB_ID, location=LOCATION, pageToken=None, projectId='bq-project'
)
mock_execute.assert_called_once_with(num_retries=bq_cursor.num_retries)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryCursor.flush_results")
def test_next_no_rows(self, mock_flush_results, mock_get_service):
mock_get_query_results = mock_get_service.return_value.jobs.return_value.getQueryResults
mock_execute = mock_get_query_results.return_value.execute
mock_execute.return_value = {}
bq_cursor = self.hook.get_cursor()
bq_cursor.job_id = JOB_ID
result = bq_cursor.next()
assert result is None
mock_get_query_results.assert_called_once_with(
jobId=JOB_ID, location=None, pageToken=None, projectId='bq-project'
)
mock_execute.assert_called_once_with(num_retries=bq_cursor.num_retries)
assert mock_flush_results.call_count == 1
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryCursor.flush_results")
def test_flush_cursor_in_execute(self, _, mock_insert, mock_get_service):
bq_cursor = self.hook.get_cursor()
bq_cursor.execute("SELECT %(foo)s", {"foo": "bar"})
assert mock_insert.call_count == 1
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_flush_cursor(self, mock_get_service):
bq_cursor = self.hook.get_cursor()
bq_cursor.page_token = '456dcea9-fcbf-4f02-b570-83f5297c685e'
bq_cursor.job_id = 'c0a79ae4-0e72-4593-a0d0-7dbbf726f193'
bq_cursor.all_pages_loaded = True
bq_cursor.buffer = [('a', 100, 200), ('b', 200, 300)]
bq_cursor.flush_results()
assert bq_cursor.page_token is None
assert bq_cursor.job_id is None
assert not bq_cursor.all_pages_loaded
assert bq_cursor.buffer == []
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_arraysize(self, mock_get_service):
bq_cursor = self.hook.get_cursor()
assert bq_cursor.buffersize is None
assert bq_cursor.arraysize == 1
bq_cursor.set_arraysize(10)
assert bq_cursor.buffersize == 10
assert bq_cursor.arraysize == 10
class TestDatasetsOperations(_BigQueryBaseTestClass):
def test_create_empty_dataset_no_dataset_id_err(self):
with pytest.raises(ValueError, match=r"Please specify `datasetId`"):
self.hook.create_empty_dataset(dataset_id=None, project_id=None)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Dataset")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_create_empty_dataset_with_params(self, mock_client, mock_dataset):
self.hook.create_empty_dataset(project_id=PROJECT_ID, dataset_id=DATASET_ID, location=LOCATION)
expected_body = {
"location": LOCATION,
"datasetReference": {"datasetId": DATASET_ID, "projectId": PROJECT_ID},
}
api_repr = mock_dataset.from_api_repr
api_repr.assert_called_once_with(expected_body)
mock_client.return_value.create_dataset.assert_called_once_with(
dataset=api_repr.return_value, exists_ok=True
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Dataset")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_create_empty_dataset_with_object(self, mock_client, mock_dataset):
dataset = {
"location": "LOCATION",
"datasetReference": {"datasetId": "DATASET_ID", "projectId": "PROJECT_ID"},
}
self.hook.create_empty_dataset(dataset_reference=dataset)
api_repr = mock_dataset.from_api_repr
api_repr.assert_called_once_with(dataset)
mock_client.return_value.create_dataset.assert_called_once_with(
dataset=api_repr.return_value, exists_ok=True
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Dataset")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_create_empty_dataset_use_values_from_object(self, mock_client, mock_dataset):
dataset = {
"location": "LOCATION",
"datasetReference": {"datasetId": "DATASET_ID", "projectId": "PROJECT_ID"},
}
self.hook.create_empty_dataset(
dataset_reference=dataset,
location="Unknown location",
dataset_id="Fashionable Dataset",
project_id="Amazing Project",
)
api_repr = mock_dataset.from_api_repr
api_repr.assert_called_once_with(dataset)
mock_client.return_value.create_dataset.assert_called_once_with(
dataset=api_repr.return_value, exists_ok=True
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_get_dataset(self, mock_client):
_expected_result = {
"kind": "bigquery#dataset",
"location": "US",
"id": "your-project:dataset_2_test",
"datasetReference": {"projectId": "your-project", "datasetId": "dataset_2_test"},
}
expected_result = Dataset.from_api_repr(_expected_result)
mock_client.return_value.get_dataset.return_value = expected_result
result = self.hook.get_dataset(dataset_id=DATASET_ID, project_id=PROJECT_ID)
mock_client.return_value.get_dataset.assert_called_once_with(
dataset_ref=DatasetReference(PROJECT_ID, DATASET_ID)
)
assert result == expected_result
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_get_datasets_list(self, mock_client):
datasets = [
{
"kind": "bigquery#dataset",
"location": "US",
"id": "your-project:dataset_2_test",
"datasetReference": {"projectId": "your-project", "datasetId": "dataset_2_test"},
},
{
"kind": "bigquery#dataset",
"location": "US",
"id": "your-project:dataset_1_test",
"datasetReference": {"projectId": "your-project", "datasetId": "dataset_1_test"},
},
]
return_value = [DatasetListItem(d) for d in datasets]
mock_client.return_value.list_datasets.return_value = return_value
result = self.hook.get_datasets_list(project_id=PROJECT_ID)
mock_client.return_value.list_datasets.assert_called_once_with(
project=PROJECT_ID,
include_all=False,
filter=None,
max_results=None,
page_token=None,
retry=DEFAULT_RETRY,
)
for exp, res in zip(datasets, result):
assert res.full_dataset_id == exp["id"]
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_delete_dataset(self, mock_client):
delete_contents = True
self.hook.delete_dataset(
project_id=PROJECT_ID, dataset_id=DATASET_ID, delete_contents=delete_contents
)
mock_client.return_value.delete_dataset.assert_called_once_with(
dataset=DatasetReference(PROJECT_ID, DATASET_ID),
delete_contents=delete_contents,
retry=DEFAULT_RETRY,
not_found_ok=True,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_patch_dataset(self, mock_get_service):
dataset_resource = {"access": [{"role": "WRITER", "groupByEmail": "cloud-logs@google.com"}]}
method = mock_get_service.return_value.datasets.return_value.patch
self.hook.patch_dataset(
dataset_id=DATASET_ID, project_id=PROJECT_ID, dataset_resource=dataset_resource
)
method.assert_called_once_with(projectId=PROJECT_ID, datasetId=DATASET_ID, body=dataset_resource)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Dataset")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_update_dataset(self, mock_client, mock_dataset):
dataset_resource = {
"kind": "bigquery#dataset",
"location": "US",
"id": "your-project:dataset_2_test",
"datasetReference": {"projectId": "your-project", "datasetId": "dataset_2_test"},
}
method = mock_client.return_value.update_dataset
dataset = Dataset.from_api_repr(dataset_resource)
mock_dataset.from_api_repr.return_value = dataset
method.return_value = dataset
result = self.hook.update_dataset(
dataset_id=DATASET_ID,
project_id=PROJECT_ID,
dataset_resource=dataset_resource,
fields=["location"],
)
mock_dataset.from_api_repr.assert_called_once_with(dataset_resource)
method.assert_called_once_with(
dataset=dataset,
fields=["location"],
retry=DEFAULT_RETRY,
)
assert result == dataset
class TestTimePartitioningInRunJob(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_load_default(self, mock_insert):
self.hook.run_load(
destination_project_dataset_table='my_dataset.my_table',
schema_fields=[],
source_uris=[],
)
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['load'].get('timePartitioning') is None
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_with_auto_detect(self, mock_insert):
destination_project_dataset_table = "autodetect.table"
self.hook.run_load(destination_project_dataset_table, [], [], autodetect=True)
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['load']['autodetect'] is True
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_load_with_arg(self, mock_insert):
self.hook.run_load(
destination_project_dataset_table=f"{DATASET_ID}.{TABLE_ID}",
schema_fields=[],
source_uris=[],
time_partitioning={'type': 'DAY', 'field': 'test_field', 'expirationMs': 1000},
)
configuration = {
'load': {
'autodetect': False,
'createDisposition': 'CREATE_IF_NEEDED',
'destinationTable': {'projectId': PROJECT_ID, 'datasetId': DATASET_ID, 'tableId': TABLE_ID},
'sourceFormat': 'CSV',
'sourceUris': [],
'writeDisposition': 'WRITE_EMPTY',
'ignoreUnknownValues': False,
'timePartitioning': {'type': 'DAY', 'field': 'test_field', 'expirationMs': 1000},
'skipLeadingRows': 0,
'fieldDelimiter': ',',
'quote': None,
'allowQuotedNewlines': False,
'encoding': 'UTF-8',
}
}
mock_insert.assert_called_once_with(configuration=configuration, project_id=PROJECT_ID)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_with_arg(self, mock_insert):
self.hook.run_query(
sql='select 1',
destination_dataset_table=f"{DATASET_ID}.{TABLE_ID}",
time_partitioning={'type': 'DAY', 'field': 'test_field', 'expirationMs': 1000},
)
configuration = {
'query': {
'query': 'select 1',
'priority': 'INTERACTIVE',
'useLegacySql': True,
'timePartitioning': {'type': 'DAY', 'field': 'test_field', 'expirationMs': 1000},
'schemaUpdateOptions': [],
'destinationTable': {'projectId': PROJECT_ID, 'datasetId': DATASET_ID, 'tableId': TABLE_ID},
'allowLargeResults': False,
'flattenResults': None,
'writeDisposition': 'WRITE_EMPTY',
'createDisposition': 'CREATE_IF_NEEDED',
}
}
mock_insert.assert_called_once_with(configuration=configuration, project_id=PROJECT_ID)
def test_dollar_makes_partition(self):
tp_out = _cleanse_time_partitioning('test.teast$20170101', {})
expect = {'type': 'DAY'}
assert tp_out == expect
def test_extra_time_partitioning_options(self):
tp_out = _cleanse_time_partitioning(
'test.teast', {'type': 'DAY', 'field': 'test_field', 'expirationMs': 1000}
)
expect = {'type': 'DAY', 'field': 'test_field', 'expirationMs': 1000}
assert tp_out == expect
class TestClusteringInRunJob(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_load_default(self, mock_insert):
self.hook.run_load(
destination_project_dataset_table='my_dataset.my_table',
schema_fields=[],
source_uris=[],
)
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['load'].get('clustering') is None
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_load_with_arg(self, mock_insert):
self.hook.run_load(
destination_project_dataset_table='my_dataset.my_table',
schema_fields=[],
source_uris=[],
cluster_fields=['field1', 'field2'],
time_partitioning={'type': 'DAY'},
)
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['load']['clustering'] == {'fields': ['field1', 'field2']}
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_default(self, mock_insert):
self.hook.run_query(sql='select 1')
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['query'].get('clustering') is None
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_with_arg(self, mock_insert):
self.hook.run_query(
sql='select 1',
destination_dataset_table='my_dataset.my_table',
cluster_fields=['field1', 'field2'],
time_partitioning={'type': 'DAY'},
)
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['query']['clustering'] == {'fields': ['field1', 'field2']}
class TestBigQueryHookLegacySql(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_hook_uses_legacy_sql_by_default(self, mock_insert, _):
self.hook.get_first('query')
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['query']['useLegacySql'] is True
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id',
return_value=(CREDENTIALS, PROJECT_ID),
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_legacy_sql_override_propagates_properly(
self, mock_insert, mock_get_service, mock_get_creds_and_proj_id
):
bq_hook = BigQueryHook(use_legacy_sql=False)
bq_hook.get_first('query')
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['query']['useLegacySql'] is False
class TestBigQueryHookRunWithConfiguration(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.LoadJob")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_run_with_configuration_location(self, mock_client, mock_job):
running_job_id = 'job_vjdi28vskdui2onru23'
location = 'asia-east1'
mock_job._JOB_TYPE = "load"
conf = {"load": {}}
self.hook.running_job_id = running_job_id
self.hook.location = location
self.hook.run_with_configuration(conf)
mock_client.assert_called_once_with(project_id=PROJECT_ID, location=location)
mock_job.from_api_repr.assert_called_once_with(
{
"configuration": conf,
"jobReference": {"jobId": mock.ANY, "projectId": PROJECT_ID, "location": location},
},
mock_client.return_value,
)
class TestBigQueryWithKMS(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_create_empty_table_with_kms(self, mock_bq_client, mock_table):
schema_fields = [{"name": "id", "type": "STRING", "mode": "REQUIRED"}]
encryption_configuration = {"kms_key_name": "projects/p/locations/l/keyRings/k/cryptoKeys/c"}
self.hook.create_empty_table(
project_id=PROJECT_ID,
dataset_id=DATASET_ID,
table_id=TABLE_ID,
schema_fields=schema_fields,
encryption_configuration=encryption_configuration,
)
body = {
"tableReference": {"tableId": TABLE_ID, 'projectId': PROJECT_ID, 'datasetId': DATASET_ID},
"schema": {"fields": schema_fields},
"encryptionConfiguration": encryption_configuration,
}
mock_table.from_api_repr.assert_called_once_with(body)
mock_bq_client.return_value.create_table.assert_called_once_with(
table=mock_table.from_api_repr.return_value,
exists_ok=True,
retry=DEFAULT_RETRY,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.create_empty_table")
def test_create_external_table_with_kms(self, mock_create):
external_project_dataset_table = f"{PROJECT_ID}.{DATASET_ID}.{TABLE_ID}"
source_uris = ['test_data.csv']
source_format = 'CSV'
autodetect = False
compression = 'NONE'
ignore_unknown_values = False
max_bad_records = 10
skip_leading_rows = 1
field_delimiter = ','
quote_character = None
allow_quoted_newlines = False
allow_jagged_rows = False
encoding = "UTF-8"
labels = {'label1': 'test1', 'label2': 'test2'}
schema_fields = [{'mode': 'REQUIRED', 'name': 'id', 'type': 'STRING', 'description': None}]
encryption_configuration = {"kms_key_name": "projects/p/locations/l/keyRings/k/cryptoKeys/c"}
self.hook.create_external_table(
external_project_dataset_table=external_project_dataset_table,
source_uris=source_uris,
source_format=source_format,
autodetect=autodetect,
compression=compression,
ignore_unknown_values=ignore_unknown_values,
max_bad_records=max_bad_records,
skip_leading_rows=skip_leading_rows,
field_delimiter=field_delimiter,
quote_character=quote_character,
allow_jagged_rows=allow_jagged_rows,
encoding=encoding,
allow_quoted_newlines=allow_quoted_newlines,
labels=labels,
schema_fields=schema_fields,
encryption_configuration=encryption_configuration,
)
body = {
'externalDataConfiguration': {
'autodetect': autodetect,
'sourceFormat': source_format,
'sourceUris': source_uris,
'compression': compression,
'ignoreUnknownValues': ignore_unknown_values,
'schema': {'fields': schema_fields},
'maxBadRecords': max_bad_records,
'csvOptions': {
'skipLeadingRows': skip_leading_rows,
'fieldDelimiter': field_delimiter,
'quote': quote_character,
'allowQuotedNewlines': allow_quoted_newlines,
'allowJaggedRows': allow_jagged_rows,
'encoding': encoding,
},
},
'tableReference': {
'projectId': PROJECT_ID,
'datasetId': DATASET_ID,
'tableId': TABLE_ID,
},
'labels': labels,
"encryptionConfiguration": encryption_configuration,
}
mock_create.assert_called_once_with(
table_resource=body,
project_id=PROJECT_ID,
location=None,
exists_ok=True,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_update_table(self, mock_client, mock_table):
description_patched = 'Test description.'
expiration_time_patched = 2524608000000
friendly_name_patched = 'Test friendly name.'
labels_patched = {'label1': 'test1', 'label2': 'test2'}
schema_patched = [
{'name': 'id', 'type': 'STRING', 'mode': 'REQUIRED'},
{'name': 'name', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'balance', 'type': 'FLOAT', 'mode': 'NULLABLE'},
{'name': 'new_field', 'type': 'STRING', 'mode': 'NULLABLE'},
]
time_partitioning_patched = {'expirationMs': 10000000}
require_partition_filter_patched = True
view_patched = {
'query': "SELECT * FROM `test-project-id.test_dataset_id.test_table_prefix*` LIMIT 500",
'useLegacySql': False,
}
body = {
"tableReference": {
"projectId": PROJECT_ID,
"datasetId": DATASET_ID,
"tableId": TABLE_ID,
},
"description": description_patched,
"expirationTime": expiration_time_patched,
"friendlyName": friendly_name_patched,
"labels": labels_patched,
"schema": {"fields": schema_patched},
"timePartitioning": time_partitioning_patched,
"view": view_patched,
"requirePartitionFilter": require_partition_filter_patched,
}
fields = list(body.keys())
self.hook.update_table(
table_resource=body,
fields=fields,
dataset_id=DATASET_ID,
table_id=TABLE_ID,
project_id=PROJECT_ID,
)
mock_table.from_api_repr.assert_called_once_with(body)
mock_client.return_value.update_table.assert_called_once_with(
table=mock_table.from_api_repr.return_value, fields=fields
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_with_kms(self, mock_insert):
encryption_configuration = {"kms_key_name": "projects/p/locations/l/keyRings/k/cryptoKeys/c"}
self.hook.run_query(sql='query', encryption_configuration=encryption_configuration)
_, kwargs = mock_insert.call_args
assert (
kwargs["configuration"]['query']['destinationEncryptionConfiguration'] is encryption_configuration
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_copy_with_kms(self, mock_insert):
encryption_configuration = {"kms_key_name": "projects/p/locations/l/keyRings/k/cryptoKeys/c"}
self.hook.run_copy(
source_project_dataset_tables='p.d.st',
destination_project_dataset_table='p.d.dt',
encryption_configuration=encryption_configuration,
)
_, kwargs = mock_insert.call_args
assert (
kwargs["configuration"]['copy']['destinationEncryptionConfiguration'] is encryption_configuration
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_load_with_kms(self, mock_insert):
encryption_configuration = {"kms_key_name": "projects/p/locations/l/keyRings/k/cryptoKeys/c"}
self.hook.run_load(
destination_project_dataset_table='p.d.dt',
source_uris=['abc.csv'],
autodetect=True,
encryption_configuration=encryption_configuration,
)
_, kwargs = mock_insert.call_args
assert (
kwargs["configuration"]['load']['destinationEncryptionConfiguration'] is encryption_configuration
)
class TestBigQueryBaseCursorMethodsDeprecationWarning(unittest.TestCase):
@parameterized.expand(
[
("create_empty_table",),
("create_empty_dataset",),
("get_dataset_tables",),
("delete_dataset",),
("create_external_table",),
("patch_table",),
("insert_all",),
("update_dataset",),
("patch_dataset",),
("get_dataset_tables_list",),
("get_datasets_list",),
("get_dataset",),
("run_grant_dataset_view_access",),
("run_table_upsert",),
("run_table_delete",),
("get_tabledata",),
("get_schema",),
("poll_job_complete",),
("cancel_query",),
("run_with_configuration",),
("run_load",),
("run_copy",),
("run_extract",),
("run_query",),
]
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook")
def test_deprecation_warning(self, func_name, mock_bq_hook):
args, kwargs = [1], {"param1": "val1"}
new_path = re.escape(f"`airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.{func_name}`")
message_pattern = fr"This method is deprecated\.\s+Please use {new_path}"
message_regex = re.compile(message_pattern, re.MULTILINE)
mocked_func = getattr(mock_bq_hook, func_name)
bq_cursor = BigQueryCursor(mock.MagicMock(), PROJECT_ID, mock_bq_hook)
func = getattr(bq_cursor, func_name)
with pytest.warns(DeprecationWarning, match=message_regex):
_ = func(*args, **kwargs)
mocked_func.assert_called_once_with(*args, **kwargs)
assert re.search(f".*{new_path}.*", func.__doc__)
class TestBigQueryWithLabelsAndDescription(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_load_labels(self, mock_insert):
labels = {'label1': 'test1', 'label2': 'test2'}
self.hook.run_load(
destination_project_dataset_table='my_dataset.my_table',
schema_fields=[],
source_uris=[],
labels=labels,
)
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['load']['destinationTableProperties']['labels'] is labels
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_load_description(self, mock_insert):
description = "Test Description"
self.hook.run_load(
destination_project_dataset_table='my_dataset.my_table',
schema_fields=[],
source_uris=[],
description=description,
)
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['load']['destinationTableProperties']['description'] is description
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.create_empty_table")
def test_create_external_table_labels(self, mock_create):
labels = {'label1': 'test1', 'label2': 'test2'}
self.hook.create_external_table(
external_project_dataset_table='my_dataset.my_table',
schema_fields=[],
source_uris=[],
labels=labels,
)
_, kwargs = mock_create.call_args
self.assertDictEqual(kwargs['table_resource']['labels'], labels)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.create_empty_table")
def test_create_external_table_description(self, mock_create):
description = "Test Description"
self.hook.create_external_table(
external_project_dataset_table='my_dataset.my_table',
schema_fields=[],
source_uris=[],
description=description,
)
_, kwargs = mock_create.call_args
assert kwargs['table_resource']['description'] is description
| true
| true
|
7905747535ec9cb9dbd9f0843e083b2ae9fb07f1
| 20,354
|
py
|
Python
|
yolo3_video.py
|
BG4WCE/keras-yolo3
|
be5afc9a8ac7c353941072960e1c099009946895
|
[
"MIT"
] | null | null | null |
yolo3_video.py
|
BG4WCE/keras-yolo3
|
be5afc9a8ac7c353941072960e1c099009946895
|
[
"MIT"
] | null | null | null |
yolo3_video.py
|
BG4WCE/keras-yolo3
|
be5afc9a8ac7c353941072960e1c099009946895
|
[
"MIT"
] | null | null | null |
import argparse
import os
import numpy as np
from keras.layers import Conv2D, Input, BatchNormalization, LeakyReLU, ZeroPadding2D, UpSampling2D
from keras.layers.merge import add, concatenate
from keras.models import Model
import struct
import cv2
import time
from pathlib import Path
#np.set_printoptions(threshold=np.nan)
np.set_printoptions(threshold=30)
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0"
argparser = argparse.ArgumentParser(
description='test yolov3 network with coco weights')
argparser.add_argument(
'-w',
'--weights',
help='path to weights file')
argparser.add_argument(
'-v',
'--video',
help='path to video file')
class WeightReader:
def __init__(self, weight_file):
with open(weight_file, 'rb') as w_f:
major, = struct.unpack('i', w_f.read(4))
minor, = struct.unpack('i', w_f.read(4))
revision, = struct.unpack('i', w_f.read(4))
if (major*10 + minor) >= 2 and major < 1000 and minor < 1000:
w_f.read(8)
else:
w_f.read(4)
transpose = (major > 1000) or (minor > 1000)
binary = w_f.read()
self.offset = 0
self.all_weights = np.frombuffer(binary, dtype='float32')
def read_bytes(self, size):
self.offset = self.offset + size
return self.all_weights[self.offset-size:self.offset]
def load_weights(self, model):
for i in range(106):
try:
conv_layer = model.get_layer('conv_' + str(i))
print("loading weights of convolution #" + str(i))
if i not in [81, 93, 105]:
norm_layer = model.get_layer('bnorm_' + str(i))
size = np.prod(norm_layer.get_weights()[0].shape)
beta = self.read_bytes(size) # bias
gamma = self.read_bytes(size) # scale
mean = self.read_bytes(size) # mean
var = self.read_bytes(size) # variance
weights = norm_layer.set_weights([gamma, beta, mean, var])
if len(conv_layer.get_weights()) > 1:
bias = self.read_bytes(np.prod(conv_layer.get_weights()[1].shape))
kernel = self.read_bytes(np.prod(conv_layer.get_weights()[0].shape))
kernel = kernel.reshape(list(reversed(conv_layer.get_weights()[0].shape)))
kernel = kernel.transpose([2,3,1,0])
conv_layer.set_weights([kernel, bias])
else:
kernel = self.read_bytes(np.prod(conv_layer.get_weights()[0].shape))
kernel = kernel.reshape(list(reversed(conv_layer.get_weights()[0].shape)))
kernel = kernel.transpose([2,3,1,0])
conv_layer.set_weights([kernel])
except ValueError:
print("no convolution #" + str(i))
def reset(self):
self.offset = 0
class BoundBox:
def __init__(self, xmin, ymin, xmax, ymax, objness = None, classes = None):
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
self.objness = objness
self.classes = classes
self.label = -1
self.score = -1
def get_label(self):
if self.label == -1:
self.label = np.argmax(self.classes)
return self.label
def get_score(self):
if self.score == -1:
self.score = self.classes[self.get_label()]
return self.score
def _conv_block(inp, convs, skip=True):
x = inp
count = 0
for conv in convs:
if count == (len(convs) - 2) and skip:
skip_connection = x
count += 1
if conv['stride'] > 1: x = ZeroPadding2D(((1,0),(1,0)))(x) # peculiar padding as darknet prefer left and top
x = Conv2D(conv['filter'],
conv['kernel'],
strides=conv['stride'],
padding='valid' if conv['stride'] > 1 else 'same', # peculiar padding as darknet prefer left and top
name='conv_' + str(conv['layer_idx']),
use_bias=False if conv['bnorm'] else True)(x)
if conv['bnorm']: x = BatchNormalization(epsilon=0.001, name='bnorm_' + str(conv['layer_idx']))(x)
if conv['leaky']: x = LeakyReLU(alpha=0.1, name='leaky_' + str(conv['layer_idx']))(x)
return add([skip_connection, x]) if skip else x
def _interval_overlap(interval_a, interval_b):
x1, x2 = interval_a
x3, x4 = interval_b
if x3 < x1:
if x4 < x1:
return 0
else:
return min(x2,x4) - x1
else:
if x2 < x3:
return 0
else:
return min(x2,x4) - x3
def _sigmoid(x):
return 1. / (1. + np.exp(-x))
def bbox_iou(box1, box2):
intersect_w = _interval_overlap([box1.xmin, box1.xmax], [box2.xmin, box2.xmax])
intersect_h = _interval_overlap([box1.ymin, box1.ymax], [box2.ymin, box2.ymax])
intersect = intersect_w * intersect_h
w1, h1 = box1.xmax-box1.xmin, box1.ymax-box1.ymin
w2, h2 = box2.xmax-box2.xmin, box2.ymax-box2.ymin
union = w1*h1 + w2*h2 - intersect
return float(intersect) / union
def make_yolov3_model():
input_image = Input(shape=(None, None, 3))
# Layer 0 => 4
x = _conv_block(input_image, [{'filter': 32, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 0},
{'filter': 64, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 1},
{'filter': 32, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 2},
{'filter': 64, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 3}])
# Layer 5 => 8
x = _conv_block(x, [{'filter': 128, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 5},
{'filter': 64, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 6},
{'filter': 128, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 7}])
# Layer 9 => 11
x = _conv_block(x, [{'filter': 64, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 9},
{'filter': 128, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 10}])
# Layer 12 => 15
x = _conv_block(x, [{'filter': 256, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 12},
{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 13},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 14}])
# Layer 16 => 36
for i in range(7):
x = _conv_block(x, [{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 16+i*3},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 17+i*3}])
skip_36 = x
# Layer 37 => 40
x = _conv_block(x, [{'filter': 512, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 37},
{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 38},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 39}])
# Layer 41 => 61
for i in range(7):
x = _conv_block(x, [{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 41+i*3},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 42+i*3}])
skip_61 = x
# Layer 62 => 65
x = _conv_block(x, [{'filter': 1024, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 62},
{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 63},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 64}])
# Layer 66 => 74
for i in range(3):
x = _conv_block(x, [{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 66+i*3},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 67+i*3}])
# Layer 75 => 79
x = _conv_block(x, [{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 75},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 76},
{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 77},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 78},
{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 79}], skip=False)
# Layer 80 => 82
yolo_82 = _conv_block(x, [{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 80},
{'filter': 255, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'layer_idx': 81}], skip=False)
# Layer 83 => 86
x = _conv_block(x, [{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 84}], skip=False)
x = UpSampling2D(2)(x)
x = concatenate([x, skip_61])
# Layer 87 => 91
x = _conv_block(x, [{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 87},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 88},
{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 89},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 90},
{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 91}], skip=False)
# Layer 92 => 94
yolo_94 = _conv_block(x, [{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 92},
{'filter': 255, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'layer_idx': 93}], skip=False)
# Layer 95 => 98
x = _conv_block(x, [{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 96}], skip=False)
x = UpSampling2D(2)(x)
x = concatenate([x, skip_36])
# Layer 99 => 106
yolo_106 = _conv_block(x, [{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 99},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 100},
{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 101},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 102},
{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 103},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 104},
{'filter': 255, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'layer_idx': 105}], skip=False)
model = Model(input_image, [yolo_82, yolo_94, yolo_106])
return model
def preprocess_input(image, net_h, net_w):
#new_h, new_w, _ = image.shape
new_h = 480
new_w = 640
# determine the new size of the image
if (float(net_w)/new_w) < (float(net_h)/new_h):
new_h = (new_h * net_w)/new_w
new_w = net_w
else:
new_w = (new_w * net_h)/new_h
new_h = net_h
# resize the image to the new size
resized = cv2.resize(image[:,:,::-1]/255., (int(new_w), int(new_h)))
# embed the image into the standard letter box
new_image = np.ones((net_h, net_w, 3)) * 0.5
new_image[int((net_h-new_h)//2):int((net_h+new_h)//2), int((net_w-new_w)//2):int((net_w+new_w)//2), :] = resized
new_image = np.expand_dims(new_image, 0)
return new_image
def decode_netout(netout, anchors, obj_thresh, nms_thresh, net_h, net_w):
grid_h, grid_w = netout.shape[:2]
nb_box = 3
netout = netout.reshape((grid_h, grid_w, nb_box, -1))
nb_class = netout.shape[-1] - 5
boxes = []
netout[..., :2] = _sigmoid(netout[..., :2])
netout[..., 4:] = _sigmoid(netout[..., 4:])
netout[..., 5:] = netout[..., 4][..., np.newaxis] * netout[..., 5:]
netout[..., 5:] *= netout[..., 5:] > obj_thresh
for i in range(grid_h*grid_w):
row = i / grid_w
col = i % grid_w
for b in range(nb_box):
# 4th element is objectness score
objectness = netout[int(row)][int(col)][b][4]
#objectness = netout[..., :4]
if(objectness.all() <= obj_thresh): continue
# first 4 elements are x, y, w, and h
x, y, w, h = netout[int(row)][int(col)][b][:4]
x = (col + x) / grid_w # center position, unit: image width
y = (row + y) / grid_h # center position, unit: image height
w = anchors[2 * b + 0] * np.exp(w) / net_w # unit: image width
h = anchors[2 * b + 1] * np.exp(h) / net_h # unit: image height
# last elements are class probabilities
classes = netout[int(row)][col][b][5:]
box = BoundBox(x-w/2, y-h/2, x+w/2, y+h/2, objectness, classes)
#box = BoundBox(x-w/2, y-h/2, x+w/2, y+h/2, None, classes)
boxes.append(box)
return boxes
def correct_yolo_boxes(boxes, image_h, image_w, net_h, net_w):
if (float(net_w)/image_w) < (float(net_h)/image_h):
new_w = net_w
new_h = (image_h*net_w)/image_w
else:
new_h = net_w
new_w = (image_w*net_h)/image_h
for i in range(len(boxes)):
x_offset, x_scale = (net_w - new_w)/2./net_w, float(new_w)/net_w
y_offset, y_scale = (net_h - new_h)/2./net_h, float(new_h)/net_h
boxes[i].xmin = int((boxes[i].xmin - x_offset) / x_scale * image_w)
boxes[i].xmax = int((boxes[i].xmax - x_offset) / x_scale * image_w)
boxes[i].ymin = int((boxes[i].ymin - y_offset) / y_scale * image_h)
boxes[i].ymax = int((boxes[i].ymax - y_offset) / y_scale * image_h)
def do_nms(boxes, nms_thresh):
if len(boxes) > 0:
nb_class = len(boxes[0].classes)
else:
return
for c in range(nb_class):
sorted_indices = np.argsort([-box.classes[c] for box in boxes])
for i in range(len(sorted_indices)):
index_i = sorted_indices[i]
if boxes[index_i].classes[c] == 0: continue
for j in range(i+1, len(sorted_indices)):
index_j = sorted_indices[j]
if bbox_iou(boxes[index_i], boxes[index_j]) >= nms_thresh:
boxes[index_j].classes[c] = 0
def draw_boxes(image, boxes, labels, obj_thresh):
#highest_conf_label = ''
#highest_conf = 0
for box in boxes:
label_str = ''
label = -1
for i in range(len(labels)):
if box.classes[i] > obj_thresh:
label_str += labels[i]
label = i
print(labels[i] + ': ' + str(box.classes[i]*100) + '%')
#if box.classes[i] > highest_conf:
# highest_conf = box.classes[i]
# highest_conf_label = labels[i]
if label >= 0:
cv2.rectangle(image, (box.xmin,box.ymin), (box.xmax,box.ymax), (0,255,0), 3)
#print(type(box.get_score()))
#print(np.format_float_positional(box.get_score(), precision=2))
cv2.putText(image,
label_str + ' ' + str(np.format_float_positional(box.get_score(), precision=2)),
(box.xmin, box.ymin - 13),
cv2.FONT_HERSHEY_SIMPLEX,
1e-3 * image.shape[0],
(0,255,0), 2)
return image
def _main_(args):
weights_path = args.weights
video_path = args.video
# set some parameters
net_h, net_w = 416, 416
obj_thresh, nms_thresh = 0.65, 0.45
anchors = [[116,90, 156,198, 373,326], [30,61, 62,45, 59,119], [10,13, 16,30, 33,23]]
labels = ["person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck", \
"boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", \
"bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", \
"backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", \
"sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", \
"tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", \
"apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", \
"chair", "sofa", "pottedplant", "bed", "diningtable", "toilet", "tvmonitor", "laptop", "mouse", \
"remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", \
"book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"]
# make the yolov3 model to predict 80 classes on COCO
yolov3 = make_yolov3_model()
# load the weights trained on COCO into the model
weight_reader = WeightReader(weights_path)
weight_reader.load_weights(yolov3)
'''
# set webcam
cap = cv2.VideoCapture(1)
while(True):
ret, image = cap.read()
#image_h, image_w, _ = image.shape
image_w = cap.get(3)
image_h = cap.get(4)
if cv2.waitKey(1) & 0xFF == ord(' '):
new_image = preprocess_input(image, net_h, net_w)
yolos = yolov3.predict(new_image)
boxes = []
for i in range(len(yolos)):
boxes += decode_netout(yolos[i][0], anchors[i], obj_thresh, nms_thresh, net_h, net_w)
correct_yolo_boxes(boxes, image_h, image_w, net_h, net_w)
do_nms(boxes, nms_thresh)
draw_boxes_play_music(image, boxes, labels, obj_thresh)
cv2.imshow('frame',image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
'''
# preprocess the video
cap = cv2.VideoCapture(video_path)
print("open video file from", video_path)
if Path(video_path).is_file():
print("Video file exists")
else:
print("cannot find video file")
print(cap.isOpened())
while(cap.isOpened()):
ret, image = cap.read()
image_w = cap.get(3)
image_h = cap.get(4)
image = cv2.flip(image, 0)
new_image = preprocess_input(image, net_h, net_w)
yolos = yolov3.predict(new_image)
boxes = []
for i in range(len(yolos)):
# decode the output of the network
boxes += decode_netout(yolos[i][0], anchors[i], obj_thresh, nms_thresh, net_h, net_w)
# correct the sizes of the bounding boxes
correct_yolo_boxes(boxes, image_h, image_w, net_h, net_w)
# suppress non-maximal boxes
do_nms(boxes, nms_thresh)
# draw bounding boxes on the image using labels
draw_boxes(image, boxes, labels, obj_thresh)
# write the image with bounding boxes to video
cv2.imshow('frame',image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
args = argparser.parse_args()
_main_(args)
| 42.228216
| 136
| 0.532131
|
import argparse
import os
import numpy as np
from keras.layers import Conv2D, Input, BatchNormalization, LeakyReLU, ZeroPadding2D, UpSampling2D
from keras.layers.merge import add, concatenate
from keras.models import Model
import struct
import cv2
import time
from pathlib import Path
np.set_printoptions(threshold=30)
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0"
argparser = argparse.ArgumentParser(
description='test yolov3 network with coco weights')
argparser.add_argument(
'-w',
'--weights',
help='path to weights file')
argparser.add_argument(
'-v',
'--video',
help='path to video file')
class WeightReader:
def __init__(self, weight_file):
with open(weight_file, 'rb') as w_f:
major, = struct.unpack('i', w_f.read(4))
minor, = struct.unpack('i', w_f.read(4))
revision, = struct.unpack('i', w_f.read(4))
if (major*10 + minor) >= 2 and major < 1000 and minor < 1000:
w_f.read(8)
else:
w_f.read(4)
transpose = (major > 1000) or (minor > 1000)
binary = w_f.read()
self.offset = 0
self.all_weights = np.frombuffer(binary, dtype='float32')
def read_bytes(self, size):
self.offset = self.offset + size
return self.all_weights[self.offset-size:self.offset]
def load_weights(self, model):
for i in range(106):
try:
conv_layer = model.get_layer('conv_' + str(i))
print("loading weights of convolution #" + str(i))
if i not in [81, 93, 105]:
norm_layer = model.get_layer('bnorm_' + str(i))
size = np.prod(norm_layer.get_weights()[0].shape)
beta = self.read_bytes(size)
gamma = self.read_bytes(size)
mean = self.read_bytes(size)
var = self.read_bytes(size)
weights = norm_layer.set_weights([gamma, beta, mean, var])
if len(conv_layer.get_weights()) > 1:
bias = self.read_bytes(np.prod(conv_layer.get_weights()[1].shape))
kernel = self.read_bytes(np.prod(conv_layer.get_weights()[0].shape))
kernel = kernel.reshape(list(reversed(conv_layer.get_weights()[0].shape)))
kernel = kernel.transpose([2,3,1,0])
conv_layer.set_weights([kernel, bias])
else:
kernel = self.read_bytes(np.prod(conv_layer.get_weights()[0].shape))
kernel = kernel.reshape(list(reversed(conv_layer.get_weights()[0].shape)))
kernel = kernel.transpose([2,3,1,0])
conv_layer.set_weights([kernel])
except ValueError:
print("no convolution #" + str(i))
def reset(self):
self.offset = 0
class BoundBox:
def __init__(self, xmin, ymin, xmax, ymax, objness = None, classes = None):
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
self.objness = objness
self.classes = classes
self.label = -1
self.score = -1
def get_label(self):
if self.label == -1:
self.label = np.argmax(self.classes)
return self.label
def get_score(self):
if self.score == -1:
self.score = self.classes[self.get_label()]
return self.score
def _conv_block(inp, convs, skip=True):
x = inp
count = 0
for conv in convs:
if count == (len(convs) - 2) and skip:
skip_connection = x
count += 1
if conv['stride'] > 1: x = ZeroPadding2D(((1,0),(1,0)))(x)
x = Conv2D(conv['filter'],
conv['kernel'],
strides=conv['stride'],
padding='valid' if conv['stride'] > 1 else 'same',
name='conv_' + str(conv['layer_idx']),
use_bias=False if conv['bnorm'] else True)(x)
if conv['bnorm']: x = BatchNormalization(epsilon=0.001, name='bnorm_' + str(conv['layer_idx']))(x)
if conv['leaky']: x = LeakyReLU(alpha=0.1, name='leaky_' + str(conv['layer_idx']))(x)
return add([skip_connection, x]) if skip else x
def _interval_overlap(interval_a, interval_b):
x1, x2 = interval_a
x3, x4 = interval_b
if x3 < x1:
if x4 < x1:
return 0
else:
return min(x2,x4) - x1
else:
if x2 < x3:
return 0
else:
return min(x2,x4) - x3
def _sigmoid(x):
return 1. / (1. + np.exp(-x))
def bbox_iou(box1, box2):
intersect_w = _interval_overlap([box1.xmin, box1.xmax], [box2.xmin, box2.xmax])
intersect_h = _interval_overlap([box1.ymin, box1.ymax], [box2.ymin, box2.ymax])
intersect = intersect_w * intersect_h
w1, h1 = box1.xmax-box1.xmin, box1.ymax-box1.ymin
w2, h2 = box2.xmax-box2.xmin, box2.ymax-box2.ymin
union = w1*h1 + w2*h2 - intersect
return float(intersect) / union
def make_yolov3_model():
input_image = Input(shape=(None, None, 3))
x = _conv_block(input_image, [{'filter': 32, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 0},
{'filter': 64, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 1},
{'filter': 32, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 2},
{'filter': 64, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 3}])
x = _conv_block(x, [{'filter': 128, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 5},
{'filter': 64, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 6},
{'filter': 128, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 7}])
x = _conv_block(x, [{'filter': 64, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 9},
{'filter': 128, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 10}])
x = _conv_block(x, [{'filter': 256, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 12},
{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 13},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 14}])
for i in range(7):
x = _conv_block(x, [{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 16+i*3},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 17+i*3}])
skip_36 = x
x = _conv_block(x, [{'filter': 512, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 37},
{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 38},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 39}])
for i in range(7):
x = _conv_block(x, [{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 41+i*3},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 42+i*3}])
skip_61 = x
x = _conv_block(x, [{'filter': 1024, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 62},
{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 63},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 64}])
for i in range(3):
x = _conv_block(x, [{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 66+i*3},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 67+i*3}])
x = _conv_block(x, [{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 75},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 76},
{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 77},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 78},
{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 79}], skip=False)
yolo_82 = _conv_block(x, [{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 80},
{'filter': 255, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'layer_idx': 81}], skip=False)
x = _conv_block(x, [{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 84}], skip=False)
x = UpSampling2D(2)(x)
x = concatenate([x, skip_61])
x = _conv_block(x, [{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 87},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 88},
{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 89},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 90},
{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 91}], skip=False)
yolo_94 = _conv_block(x, [{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 92},
{'filter': 255, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'layer_idx': 93}], skip=False)
x = _conv_block(x, [{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 96}], skip=False)
x = UpSampling2D(2)(x)
x = concatenate([x, skip_36])
yolo_106 = _conv_block(x, [{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 99},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 100},
{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 101},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 102},
{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 103},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 104},
{'filter': 255, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'layer_idx': 105}], skip=False)
model = Model(input_image, [yolo_82, yolo_94, yolo_106])
return model
def preprocess_input(image, net_h, net_w):
new_h = 480
new_w = 640
if (float(net_w)/new_w) < (float(net_h)/new_h):
new_h = (new_h * net_w)/new_w
new_w = net_w
else:
new_w = (new_w * net_h)/new_h
new_h = net_h
resized = cv2.resize(image[:,:,::-1]/255., (int(new_w), int(new_h)))
new_image = np.ones((net_h, net_w, 3)) * 0.5
new_image[int((net_h-new_h)//2):int((net_h+new_h)//2), int((net_w-new_w)//2):int((net_w+new_w)//2), :] = resized
new_image = np.expand_dims(new_image, 0)
return new_image
def decode_netout(netout, anchors, obj_thresh, nms_thresh, net_h, net_w):
grid_h, grid_w = netout.shape[:2]
nb_box = 3
netout = netout.reshape((grid_h, grid_w, nb_box, -1))
nb_class = netout.shape[-1] - 5
boxes = []
netout[..., :2] = _sigmoid(netout[..., :2])
netout[..., 4:] = _sigmoid(netout[..., 4:])
netout[..., 5:] = netout[..., 4][..., np.newaxis] * netout[..., 5:]
netout[..., 5:] *= netout[..., 5:] > obj_thresh
for i in range(grid_h*grid_w):
row = i / grid_w
col = i % grid_w
for b in range(nb_box):
objectness = netout[int(row)][int(col)][b][4]
if(objectness.all() <= obj_thresh): continue
x, y, w, h = netout[int(row)][int(col)][b][:4]
x = (col + x) / grid_w
y = (row + y) / grid_h
w = anchors[2 * b + 0] * np.exp(w) / net_w
h = anchors[2 * b + 1] * np.exp(h) / net_h
classes = netout[int(row)][col][b][5:]
box = BoundBox(x-w/2, y-h/2, x+w/2, y+h/2, objectness, classes)
boxes.append(box)
return boxes
def correct_yolo_boxes(boxes, image_h, image_w, net_h, net_w):
if (float(net_w)/image_w) < (float(net_h)/image_h):
new_w = net_w
new_h = (image_h*net_w)/image_w
else:
new_h = net_w
new_w = (image_w*net_h)/image_h
for i in range(len(boxes)):
x_offset, x_scale = (net_w - new_w)/2./net_w, float(new_w)/net_w
y_offset, y_scale = (net_h - new_h)/2./net_h, float(new_h)/net_h
boxes[i].xmin = int((boxes[i].xmin - x_offset) / x_scale * image_w)
boxes[i].xmax = int((boxes[i].xmax - x_offset) / x_scale * image_w)
boxes[i].ymin = int((boxes[i].ymin - y_offset) / y_scale * image_h)
boxes[i].ymax = int((boxes[i].ymax - y_offset) / y_scale * image_h)
def do_nms(boxes, nms_thresh):
if len(boxes) > 0:
nb_class = len(boxes[0].classes)
else:
return
for c in range(nb_class):
sorted_indices = np.argsort([-box.classes[c] for box in boxes])
for i in range(len(sorted_indices)):
index_i = sorted_indices[i]
if boxes[index_i].classes[c] == 0: continue
for j in range(i+1, len(sorted_indices)):
index_j = sorted_indices[j]
if bbox_iou(boxes[index_i], boxes[index_j]) >= nms_thresh:
boxes[index_j].classes[c] = 0
def draw_boxes(image, boxes, labels, obj_thresh):
for box in boxes:
label_str = ''
label = -1
for i in range(len(labels)):
if box.classes[i] > obj_thresh:
label_str += labels[i]
label = i
print(labels[i] + ': ' + str(box.classes[i]*100) + '%')
if label >= 0:
cv2.rectangle(image, (box.xmin,box.ymin), (box.xmax,box.ymax), (0,255,0), 3)
cv2.putText(image,
label_str + ' ' + str(np.format_float_positional(box.get_score(), precision=2)),
(box.xmin, box.ymin - 13),
cv2.FONT_HERSHEY_SIMPLEX,
1e-3 * image.shape[0],
(0,255,0), 2)
return image
def _main_(args):
weights_path = args.weights
video_path = args.video
net_h, net_w = 416, 416
obj_thresh, nms_thresh = 0.65, 0.45
anchors = [[116,90, 156,198, 373,326], [30,61, 62,45, 59,119], [10,13, 16,30, 33,23]]
labels = ["person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck", \
"boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", \
"bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", \
"backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", \
"sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", \
"tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", \
"apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", \
"chair", "sofa", "pottedplant", "bed", "diningtable", "toilet", "tvmonitor", "laptop", "mouse", \
"remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", \
"book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"]
yolov3 = make_yolov3_model()
weight_reader = WeightReader(weights_path)
weight_reader.load_weights(yolov3)
cap = cv2.VideoCapture(video_path)
print("open video file from", video_path)
if Path(video_path).is_file():
print("Video file exists")
else:
print("cannot find video file")
print(cap.isOpened())
while(cap.isOpened()):
ret, image = cap.read()
image_w = cap.get(3)
image_h = cap.get(4)
image = cv2.flip(image, 0)
new_image = preprocess_input(image, net_h, net_w)
yolos = yolov3.predict(new_image)
boxes = []
for i in range(len(yolos)):
boxes += decode_netout(yolos[i][0], anchors[i], obj_thresh, nms_thresh, net_h, net_w)
correct_yolo_boxes(boxes, image_h, image_w, net_h, net_w)
do_nms(boxes, nms_thresh)
draw_boxes(image, boxes, labels, obj_thresh)
cv2.imshow('frame',image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
args = argparser.parse_args()
_main_(args)
| true
| true
|
790575afb1e0a7c80b439a0b93665148b0e79233
| 1,500
|
py
|
Python
|
src/data/process_functions.py
|
acwooding/docmap_playground
|
388c0f357cadb9b6e4b4b6e25fb131713111dc48
|
[
"MIT"
] | null | null | null |
src/data/process_functions.py
|
acwooding/docmap_playground
|
388c0f357cadb9b6e4b4b6e25fb131713111dc48
|
[
"MIT"
] | null | null | null |
src/data/process_functions.py
|
acwooding/docmap_playground
|
388c0f357cadb9b6e4b4b6e25fb131713111dc48
|
[
"MIT"
] | null | null | null |
"""
Custom dataset processing/generation functions should be added to this file
"""
import pathlib
from sklearn.datasets import fetch_20newsgroups
from functools import partial
from src import workflow, paths
from src.log import logger
import src.log.debug
from tqdm.auto import tqdm
from .. import paths
from ..log import logger
__all__ = [
'process_20_newsgroups'
]
def process_20_newsgroups(*, extract_dir='20_newsgroups',
metadata=None, unpack_dir=None,
opts={"subset":"all", "remove":"('headers', 'footers', 'quotes')"}):
"""
Process 20 newsgroups into (data, target, metadata) format.
Parameters
----------
unpack_dir: path
The interim parent directory the dataset files have been unpacked into.
extract_dir: str
Name of the directory of the unpacked files relative to the unpack_dir. Note that
opts: dict default {"subset":"all", "remove"="('headers', 'footers', 'quotes')"}
Options to pass to sklearn.datasets.fetch_20newsgroups.
Returns
-------
A tuple:
(data, target, additional_metadata)
"""
if metadata is None:
metadata = {}
if unpack_dir is None:
unpack_dir = paths['interim_data_path']
else:
unpack_dir = pathlib.Path(unpack_dir)
data_dir = unpack_dir / f"{extract_dir}"
news = fetch_20newsgroups(**opts)
metadata['target_names'] = news.target_names
return news.data, news.target, metadata
| 25.423729
| 94
| 0.662667
|
import pathlib
from sklearn.datasets import fetch_20newsgroups
from functools import partial
from src import workflow, paths
from src.log import logger
import src.log.debug
from tqdm.auto import tqdm
from .. import paths
from ..log import logger
__all__ = [
'process_20_newsgroups'
]
def process_20_newsgroups(*, extract_dir='20_newsgroups',
metadata=None, unpack_dir=None,
opts={"subset":"all", "remove":"('headers', 'footers', 'quotes')"}):
if metadata is None:
metadata = {}
if unpack_dir is None:
unpack_dir = paths['interim_data_path']
else:
unpack_dir = pathlib.Path(unpack_dir)
data_dir = unpack_dir / f"{extract_dir}"
news = fetch_20newsgroups(**opts)
metadata['target_names'] = news.target_names
return news.data, news.target, metadata
| true
| true
|
790577160bc25eb556d764dd1eb42760f709d08b
| 2,425
|
py
|
Python
|
blogs/beamadvent/day2a.py
|
laurenzberger/training-data-analyst
|
3e2ef4668c5088ab50ad50a4f29673c88fb1bcd3
|
[
"Apache-2.0"
] | 6,140
|
2016-05-23T16:09:35.000Z
|
2022-03-30T19:00:46.000Z
|
blogs/beamadvent/day2a.py
|
laurenzberger/training-data-analyst
|
3e2ef4668c5088ab50ad50a4f29673c88fb1bcd3
|
[
"Apache-2.0"
] | 1,384
|
2016-07-08T22:26:41.000Z
|
2022-03-24T16:39:43.000Z
|
blogs/beamadvent/day2a.py
|
laurenzberger/training-data-analyst
|
3e2ef4668c5088ab50ad50a4f29673c88fb1bcd3
|
[
"Apache-2.0"
] | 5,110
|
2016-05-27T13:45:18.000Z
|
2022-03-31T18:40:42.000Z
|
#!/usr/bin/env python3
"""
Copyright Google Inc. 2019
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import apache_beam as beam
import numpy as np
import argparse, logging
def handle_ints(ints, startpos=0):
if ints[startpos] == 99:
return ints
x1 = ints[startpos+1]
x2 = ints[startpos+2]
outpos = ints[startpos+3]
if ints[startpos] == 1:
ints[outpos] = ints[x1] + ints[x2]
elif ints[startpos] == 2:
ints[outpos] = ints[x1] * ints[x2]
return handle_ints(ints, startpos+4)
def handle_intcode(intcode):
input = [int(x) for x in intcode.split(',')]
output = handle_ints(input)
return ','.join([str(x) for x in output])
def run_1202(intcode):
input = [int(x) for x in intcode.split(',')]
input[1] = 12
input[2] = 2
output = handle_ints(input)
return output[0]
def try_working():
assert handle_intcode('1,0,0,0,99') == '2,0,0,0,99'
assert handle_intcode('2,3,0,3,99') == '2,3,0,6,99'
assert handle_intcode('2,4,4,5,99,0') == '2,4,4,5,99,9801'
assert handle_intcode('1,1,1,4,99,5,6,0,99') == '30,1,1,4,2,5,6,0,99'
print('Assertions passed')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Solutions to https://adventofcode.com/2019/ using Apache Beam')
parser.add_argument('--input', required=True, help='Specify input file')
parser.add_argument('--output', required=True, help='Specify output file')
options = parser.parse_args()
runner = 'DirectRunner' # run Beam on local machine, but write outputs to cloud
logging.basicConfig(level=getattr(logging, 'INFO', None))
opts = beam.pipeline.PipelineOptions(flags=[])
p = beam.Pipeline(runner, options=opts)
(p
| 'read' >> beam.io.textio.ReadFromText(options.input)
| 'run_1202' >> beam.Map(run_1202)
| 'output' >> beam.io.textio.WriteToText(options.output)
)
job = p.run()
if runner == 'DirectRunner':
job.wait_until_finish()
| 34.15493
| 112
| 0.684536
|
import apache_beam as beam
import numpy as np
import argparse, logging
def handle_ints(ints, startpos=0):
if ints[startpos] == 99:
return ints
x1 = ints[startpos+1]
x2 = ints[startpos+2]
outpos = ints[startpos+3]
if ints[startpos] == 1:
ints[outpos] = ints[x1] + ints[x2]
elif ints[startpos] == 2:
ints[outpos] = ints[x1] * ints[x2]
return handle_ints(ints, startpos+4)
def handle_intcode(intcode):
input = [int(x) for x in intcode.split(',')]
output = handle_ints(input)
return ','.join([str(x) for x in output])
def run_1202(intcode):
input = [int(x) for x in intcode.split(',')]
input[1] = 12
input[2] = 2
output = handle_ints(input)
return output[0]
def try_working():
assert handle_intcode('1,0,0,0,99') == '2,0,0,0,99'
assert handle_intcode('2,3,0,3,99') == '2,3,0,6,99'
assert handle_intcode('2,4,4,5,99,0') == '2,4,4,5,99,9801'
assert handle_intcode('1,1,1,4,99,5,6,0,99') == '30,1,1,4,2,5,6,0,99'
print('Assertions passed')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Solutions to https://adventofcode.com/2019/ using Apache Beam')
parser.add_argument('--input', required=True, help='Specify input file')
parser.add_argument('--output', required=True, help='Specify output file')
options = parser.parse_args()
runner = 'DirectRunner'
logging.basicConfig(level=getattr(logging, 'INFO', None))
opts = beam.pipeline.PipelineOptions(flags=[])
p = beam.Pipeline(runner, options=opts)
(p
| 'read' >> beam.io.textio.ReadFromText(options.input)
| 'run_1202' >> beam.Map(run_1202)
| 'output' >> beam.io.textio.WriteToText(options.output)
)
job = p.run()
if runner == 'DirectRunner':
job.wait_until_finish()
| true
| true
|
79057734c28313400a8b07ebf7f0004fe4fa55c7
| 2,453
|
py
|
Python
|
manimlib/utils/rate_functions.py
|
sunkisser/manim
|
39673a80d7bbbea258c35ce5a1d37a0911aae4f1
|
[
"MIT"
] | 1
|
2022-03-23T06:27:22.000Z
|
2022-03-23T06:27:22.000Z
|
manimlib/utils/rate_functions.py
|
sunkisser/manim
|
39673a80d7bbbea258c35ce5a1d37a0911aae4f1
|
[
"MIT"
] | null | null | null |
manimlib/utils/rate_functions.py
|
sunkisser/manim
|
39673a80d7bbbea258c35ce5a1d37a0911aae4f1
|
[
"MIT"
] | null | null | null |
from typing import Callable
import numpy as np
from manimlib.utils.bezier import bezier
def linear(t: float) -> float:
return t
def smooth(t: float) -> float:
# Zero first and second derivatives at t=0 and t=1.
# Equivalent to bezier([0, 0, 0, 1, 1, 1])
s = 1 - t
return (t**3) * (10 * s * s + 5 * s * t + t * t)
def rush_into(t: float) -> float:
return 2 * smooth(0.5 * t)
def rush_from(t: float) -> float:
return 2 * smooth(0.5 * (t + 1)) - 1
def slow_into(t: float) -> float:
return np.sqrt(1 - (1 - t) * (1 - t))
def double_smooth(t: float) -> float:
if t < 0.5:
return 0.5 * smooth(2 * t)
else:
return 0.5 * (1 + smooth(2 * t - 1))
def there_and_back(t: float) -> float:
new_t = 2 * t if t < 0.5 else 2 * (1 - t)
return smooth(new_t)
def there_and_back_with_pause(t: float, pause_ratio: float = 1. / 3) -> float:
a = 1. / pause_ratio
if t < 0.5 - pause_ratio / 2:
return smooth(a * t)
elif t < 0.5 + pause_ratio / 2:
return 1
else:
return smooth(a - a * t)
def running_start(t: float, pull_factor: float = -0.5) -> float:
return bezier([0, 0, pull_factor, pull_factor, 1, 1, 1])(t)
def not_quite_there(
func: Callable[[float], float] = smooth,
proportion: float = 0.7
) -> Callable[[float], float]:
def result(t):
return proportion * func(t)
return result
def wiggle(t: float, wiggles: float = 2) -> float:
return there_and_back(t) * np.sin(wiggles * np.pi * t)
def squish_rate_func(
func: Callable[[float], float],
a: float = 0.4,
b: float = 0.6
) -> Callable[[float], float]:
def result(t):
if a == b:
return a
elif t < a:
return func(0)
elif t > b:
return func(1)
else:
return func((t - a) / (b - a))
return result
# Stylistically, should this take parameters (with default values)?
# Ultimately, the functionality is entirely subsumed by squish_rate_func,
# but it may be useful to have a nice name for with nice default params for
# "lingering", different from squish_rate_func's default params
def lingering(t: float) -> float:
return squish_rate_func(lambda t: t, 0, 0.8)(t)
def exponential_decay(t: float, half_life: float = 0.1) -> float:
# The half-life should be rather small to minimize
# the cut-off error at the end
return 1 - np.exp(-t / half_life)
| 24.287129
| 78
| 0.593967
|
from typing import Callable
import numpy as np
from manimlib.utils.bezier import bezier
def linear(t: float) -> float:
return t
def smooth(t: float) -> float:
s = 1 - t
return (t**3) * (10 * s * s + 5 * s * t + t * t)
def rush_into(t: float) -> float:
return 2 * smooth(0.5 * t)
def rush_from(t: float) -> float:
return 2 * smooth(0.5 * (t + 1)) - 1
def slow_into(t: float) -> float:
return np.sqrt(1 - (1 - t) * (1 - t))
def double_smooth(t: float) -> float:
if t < 0.5:
return 0.5 * smooth(2 * t)
else:
return 0.5 * (1 + smooth(2 * t - 1))
def there_and_back(t: float) -> float:
new_t = 2 * t if t < 0.5 else 2 * (1 - t)
return smooth(new_t)
def there_and_back_with_pause(t: float, pause_ratio: float = 1. / 3) -> float:
a = 1. / pause_ratio
if t < 0.5 - pause_ratio / 2:
return smooth(a * t)
elif t < 0.5 + pause_ratio / 2:
return 1
else:
return smooth(a - a * t)
def running_start(t: float, pull_factor: float = -0.5) -> float:
return bezier([0, 0, pull_factor, pull_factor, 1, 1, 1])(t)
def not_quite_there(
func: Callable[[float], float] = smooth,
proportion: float = 0.7
) -> Callable[[float], float]:
def result(t):
return proportion * func(t)
return result
def wiggle(t: float, wiggles: float = 2) -> float:
return there_and_back(t) * np.sin(wiggles * np.pi * t)
def squish_rate_func(
func: Callable[[float], float],
a: float = 0.4,
b: float = 0.6
) -> Callable[[float], float]:
def result(t):
if a == b:
return a
elif t < a:
return func(0)
elif t > b:
return func(1)
else:
return func((t - a) / (b - a))
return result
def lingering(t: float) -> float:
return squish_rate_func(lambda t: t, 0, 0.8)(t)
def exponential_decay(t: float, half_life: float = 0.1) -> float:
# The half-life should be rather small to minimize
# the cut-off error at the end
return 1 - np.exp(-t / half_life)
| true
| true
|
7905785aeb01e7ad23f5075ce8852726143b76d7
| 405
|
py
|
Python
|
BlogComment/BlogComment/urls.py
|
collins-hue/Django-Blog-Comment
|
3af6a624367b01abee296b13c46dce11c7ee7cec
|
[
"MIT"
] | 1
|
2022-03-18T15:51:43.000Z
|
2022-03-18T15:51:43.000Z
|
BlogComment/BlogComment/urls.py
|
collins-hue/Django-Blog-Comment
|
3af6a624367b01abee296b13c46dce11c7ee7cec
|
[
"MIT"
] | null | null | null |
BlogComment/BlogComment/urls.py
|
collins-hue/Django-Blog-Comment
|
3af6a624367b01abee296b13c46dce11c7ee7cec
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from django.conf.urls import include
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('Blog.urls')),
path('tinymce/', include('tinymce.urls')),
]
urlpatterns = urlpatterns + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 28.928571
| 89
| 0.750617
|
from django.conf import settings
from django.conf.urls import include
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('Blog.urls')),
path('tinymce/', include('tinymce.urls')),
]
urlpatterns = urlpatterns + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| true
| true
|
790579adf83f040af3e85de91119edb0e3623608
| 143
|
py
|
Python
|
Systemcode/imental-Flask/init.py
|
vemodalen-x/IRS_imental
|
050fd6a4694e4e7dfc396c1c7f13fd1ad97fbae6
|
[
"MIT"
] | 3
|
2021-11-23T04:36:04.000Z
|
2022-01-18T08:05:10.000Z
|
Systemcode/imental-Flask/init.py
|
vemodalen-x/IRS_imental
|
050fd6a4694e4e7dfc396c1c7f13fd1ad97fbae6
|
[
"MIT"
] | null | null | null |
Systemcode/imental-Flask/init.py
|
vemodalen-x/IRS_imental
|
050fd6a4694e4e7dfc396c1c7f13fd1ad97fbae6
|
[
"MIT"
] | 2
|
2021-10-17T08:16:18.000Z
|
2021-11-23T04:36:10.000Z
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config.from_object('setting')
db = SQLAlchemy(app)
| 17.875
| 39
| 0.797203
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config.from_object('setting')
db = SQLAlchemy(app)
| true
| true
|
79057a1b9c6eeab7bb40ba4fcfb7fda297b2b665
| 176
|
py
|
Python
|
src/comments/urls.py
|
samrika25/TRAVIS_HEROKU_GIT
|
bcae6d0422d9a0369810944a91dd03db7df0d058
|
[
"MIT"
] | null | null | null |
src/comments/urls.py
|
samrika25/TRAVIS_HEROKU_GIT
|
bcae6d0422d9a0369810944a91dd03db7df0d058
|
[
"MIT"
] | 4
|
2021-03-30T12:35:36.000Z
|
2021-06-10T18:11:24.000Z
|
src/comments/urls.py
|
samrika25/TRAVIS_HEROKU_GIT
|
bcae6d0422d9a0369810944a91dd03db7df0d058
|
[
"MIT"
] | 2
|
2021-02-07T16:16:36.000Z
|
2021-07-13T05:26:51.000Z
|
from django.urls import path
from .views import DetailView
app_name = 'comments'
urlpatterns = [
path('<slug:model>/<slug:slug>', DetailView.as_view(), name='detail')
]
| 17.6
| 73
| 0.704545
|
from django.urls import path
from .views import DetailView
app_name = 'comments'
urlpatterns = [
path('<slug:model>/<slug:slug>', DetailView.as_view(), name='detail')
]
| true
| true
|
79057a8012c36ee0c48205c123c30860a54cd615
| 2,930
|
py
|
Python
|
src/models/layers/subpixel.py
|
TECHENGINESSRL/audio-super-res
|
2f90a288e86ddca50c98c17b0513e73ab49087d3
|
[
"MIT"
] | 712
|
2017-03-15T14:36:24.000Z
|
2022-03-27T08:51:43.000Z
|
src/models/layers/subpixel.py
|
YA07/audio-super-res
|
2f90a288e86ddca50c98c17b0513e73ab49087d3
|
[
"MIT"
] | 43
|
2017-05-05T19:51:23.000Z
|
2022-02-17T05:57:47.000Z
|
src/models/layers/subpixel.py
|
YA07/audio-super-res
|
2f90a288e86ddca50c98c17b0513e73ab49087d3
|
[
"MIT"
] | 173
|
2017-03-18T22:36:16.000Z
|
2022-03-19T07:06:43.000Z
|
import numpy as np
import tensorflow as tf
# ----------------------------------------------------------------------------
def SubPixel1D_v2(I, r):
"""One-dimensional subpixel upsampling layer
Based on https://github.com/Tetrachrome/subpixel/blob/master/subpixel.py
"""
with tf.compat.v1.name_scope('subpixel'):
bsize, a, r = I.get_shape().as_list()
bsize = tf.shape(input=I)[0] # Handling Dimension(None) type for undefined batch dim
X = tf.split(1, a, I) # a, [bsize, 1, r]
if 'axis' in tf.squeeze.__code__.co_varnames:
X = tf.concat(1, [tf.squeeze(x, axis=1) for x in X]) # bsize, a*r
elif 'squeeze_dims' in tf.squeeze.__code__.co_varnames:
X = tf.concat(1, [tf.squeeze(x, axis=[1]) for x in X]) # bsize, a*r
else:
raise Exception('Unsupported version of tensorflow')
return tf.reshape(X, (bsize, a*r, 1))
def SubPixel1D(I, r):
"""One-dimensional subpixel upsampling layer
Calls a tensorflow function that directly implements this functionality.
We assume input has dim (batch, width, r)
"""
with tf.compat.v1.name_scope('subpixel'):
X = tf.transpose(a=I, perm=[2,1,0]) # (r, w, b)
X = tf.batch_to_space(X, [r], [[0,0]]) # (1, r*w, b)
X = tf.transpose(a=X, perm=[2,1,0])
return X
def SubPixel1D_multichan(I, r):
"""One-dimensional subpixel upsampling layer
Calls a tensorflow function that directly implements this functionality.
We assume input has dim (batch, width, r).
Works with multiple channels: (B,L,rC) -> (B,rL,C)
"""
with tf.compat.v1.name_scope('subpixel'):
_, w, rc = I.get_shape()
assert rc % r == 0
c = rc / r
X = tf.transpose(a=I, perm=[2,1,0]) # (rc, w, b)
X = tf.batch_to_space(X, [r], [[0,0]]) # (c, r*w, b)
X = tf.transpose(a=X, perm=[2,1,0])
return X
# ----------------------------------------------------------------------------
# demonstration
if __name__ == "__main__":
with tf.compat.v1.Session() as sess:
x = np.arange(2*4*2).reshape(2, 4, 2)
X = tf.compat.v1.placeholder("float32", shape=(2, 4, 2), name="X")
Y = SubPixel1D(X, 2)
y = sess.run(Y, feed_dict={X: x})
print('single-channel:')
print('original, element 0 (2 channels):', x[0,:,0], x[0,:,1])
print('rescaled, element 1:', y[0,:,0])
print()
print('original, element 0 (2 channels) :', x[1,:,0], x[1,:,1])
print('rescaled, element 1:', y[1,:,0])
print()
x = np.arange(2*4*4).reshape(2, 4, 4)
X = tf.compat.v1.placeholder("float32", shape=(2, 4, 4), name="X")
Y = SubPixel1D(X, 2)
y = sess.run(Y, feed_dict={X: x})
print('multichannel:')
print('original, element 0 (4 channels):', x[0,:,0], x[0,:,1], x[0,:,2], x[0,:,3])
print('rescaled, element 1:', y[0,:,0], y[0,:,1])
print()
print('original, element 0 (2 channels) :', x[1,:,0], x[1,:,1], x[1,:,2], x[1,:,3])
print('rescaled, element 1:', y[1,:,0], y[1,:,1], end=' ')
| 36.17284
| 88
| 0.567918
|
import numpy as np
import tensorflow as tf
def SubPixel1D_v2(I, r):
with tf.compat.v1.name_scope('subpixel'):
bsize, a, r = I.get_shape().as_list()
bsize = tf.shape(input=I)[0]
X = tf.split(1, a, I)
if 'axis' in tf.squeeze.__code__.co_varnames:
X = tf.concat(1, [tf.squeeze(x, axis=1) for x in X])
elif 'squeeze_dims' in tf.squeeze.__code__.co_varnames:
X = tf.concat(1, [tf.squeeze(x, axis=[1]) for x in X])
else:
raise Exception('Unsupported version of tensorflow')
return tf.reshape(X, (bsize, a*r, 1))
def SubPixel1D(I, r):
with tf.compat.v1.name_scope('subpixel'):
X = tf.transpose(a=I, perm=[2,1,0])
X = tf.batch_to_space(X, [r], [[0,0]])
X = tf.transpose(a=X, perm=[2,1,0])
return X
def SubPixel1D_multichan(I, r):
with tf.compat.v1.name_scope('subpixel'):
_, w, rc = I.get_shape()
assert rc % r == 0
c = rc / r
X = tf.transpose(a=I, perm=[2,1,0])
X = tf.batch_to_space(X, [r], [[0,0]])
X = tf.transpose(a=X, perm=[2,1,0])
return X
if __name__ == "__main__":
with tf.compat.v1.Session() as sess:
x = np.arange(2*4*2).reshape(2, 4, 2)
X = tf.compat.v1.placeholder("float32", shape=(2, 4, 2), name="X")
Y = SubPixel1D(X, 2)
y = sess.run(Y, feed_dict={X: x})
print('single-channel:')
print('original, element 0 (2 channels):', x[0,:,0], x[0,:,1])
print('rescaled, element 1:', y[0,:,0])
print()
print('original, element 0 (2 channels) :', x[1,:,0], x[1,:,1])
print('rescaled, element 1:', y[1,:,0])
print()
x = np.arange(2*4*4).reshape(2, 4, 4)
X = tf.compat.v1.placeholder("float32", shape=(2, 4, 4), name="X")
Y = SubPixel1D(X, 2)
y = sess.run(Y, feed_dict={X: x})
print('multichannel:')
print('original, element 0 (4 channels):', x[0,:,0], x[0,:,1], x[0,:,2], x[0,:,3])
print('rescaled, element 1:', y[0,:,0], y[0,:,1])
print()
print('original, element 0 (2 channels) :', x[1,:,0], x[1,:,1], x[1,:,2], x[1,:,3])
print('rescaled, element 1:', y[1,:,0], y[1,:,1], end=' ')
| true
| true
|
79057aad6749bbaf366bec0fafa663b6742e5216
| 224
|
py
|
Python
|
Exam-Prep/Exam_16-Aug-20/project/hardware/heavy_hardware.py
|
geodimitrov/PythonOOP_SoftUni
|
f1c6718c878b618b3ab3f174cd4d187bd178940b
|
[
"MIT"
] | 1
|
2021-06-30T11:53:44.000Z
|
2021-06-30T11:53:44.000Z
|
Exam-Prep/Exam_16-Aug-20/project/hardware/heavy_hardware.py
|
geodimitrov/PythonOOP_SoftUni
|
f1c6718c878b618b3ab3f174cd4d187bd178940b
|
[
"MIT"
] | null | null | null |
Exam-Prep/Exam_16-Aug-20/project/hardware/heavy_hardware.py
|
geodimitrov/PythonOOP_SoftUni
|
f1c6718c878b618b3ab3f174cd4d187bd178940b
|
[
"MIT"
] | null | null | null |
from project.hardware.hardware import Hardware
class HeavyHardware(Hardware):
TYPE = "Heavy"
def __init__(self, name, capacity, memory):
super().__init__(name, self.TYPE, capacity * 2, int(memory * 0.75))
| 24.888889
| 75
| 0.691964
|
from project.hardware.hardware import Hardware
class HeavyHardware(Hardware):
TYPE = "Heavy"
def __init__(self, name, capacity, memory):
super().__init__(name, self.TYPE, capacity * 2, int(memory * 0.75))
| true
| true
|
79057b33de621f99661374c933fe56c46dfde3d0
| 567
|
py
|
Python
|
plots/stereoisomer_gen.py
|
Reaction-Space-Explorer/reac-space-exp
|
02c91247d9ee5107cbf9fa113e87edaf4bd392b0
|
[
"BSD-3-Clause"
] | 4
|
2020-06-27T23:08:41.000Z
|
2022-01-09T16:20:48.000Z
|
plots/stereoisomer_gen.py
|
sahilrajiv/reac-space-exp
|
52f4b4eab755bd4a6830d838828c958149567396
|
[
"BSD-3-Clause"
] | 15
|
2020-07-27T23:14:32.000Z
|
2022-03-12T00:59:20.000Z
|
plots/stereoisomer_gen.py
|
sahilrajiv/reac-space-exp
|
52f4b4eab755bd4a6830d838828c958149567396
|
[
"BSD-3-Clause"
] | 3
|
2020-06-27T23:08:46.000Z
|
2021-04-20T09:29:33.000Z
|
from rdkit import Chem
from rdkit.Chem.EnumerateStereoisomers import EnumerateStereoisomers, StereoEnumerationOptions
molecules = open('glucose_degradation_output.csv','r')
lines = molecules.readlines()
counter = 0
with open('Glucose_Desc.csv', 'w') as the_file:
the_file.write("Generation,Id,NumStereoIsomers"+'\n')
for line in lines:
counter +=1
line=line.rstrip('\n')
line=line.split('\t')
m = Chem.MolFromSmiles(line[1])
isomers = tuple(EnumerateStereoisomers(m))
numste = str(len(isomers))
the_file.write(line[0]+","+line[1]+","+numste+'\n')
| 33.352941
| 94
| 0.730159
|
from rdkit import Chem
from rdkit.Chem.EnumerateStereoisomers import EnumerateStereoisomers, StereoEnumerationOptions
molecules = open('glucose_degradation_output.csv','r')
lines = molecules.readlines()
counter = 0
with open('Glucose_Desc.csv', 'w') as the_file:
the_file.write("Generation,Id,NumStereoIsomers"+'\n')
for line in lines:
counter +=1
line=line.rstrip('\n')
line=line.split('\t')
m = Chem.MolFromSmiles(line[1])
isomers = tuple(EnumerateStereoisomers(m))
numste = str(len(isomers))
the_file.write(line[0]+","+line[1]+","+numste+'\n')
| true
| true
|
79057c05cf4261cfbc8bca3de4c15b352a44373a
| 1,478
|
py
|
Python
|
tests/integration/location/test_location_logout.py
|
Joeyt1008/dash-core-components
|
c806ea66eb5b674ef84fd9efae01cfa5292f143e
|
[
"MIT"
] | null | null | null |
tests/integration/location/test_location_logout.py
|
Joeyt1008/dash-core-components
|
c806ea66eb5b674ef84fd9efae01cfa5292f143e
|
[
"MIT"
] | null | null | null |
tests/integration/location/test_location_logout.py
|
Joeyt1008/dash-core-components
|
c806ea66eb5b674ef84fd9efae01cfa5292f143e
|
[
"MIT"
] | null | null | null |
import dash
from dash.dependencies import Input, Output
from dash.exceptions import PreventUpdate
import dash_core_components as dcc
import dash_html_components as html
import flask
import time
def test_llgo001_location_logout(dash_dcc):
app = dash.Dash(__name__)
@app.server.route("/_logout", methods=["POST"])
def on_logout():
rep = flask.redirect("/logged-out")
rep.set_cookie("logout-cookie", "", 0)
return rep
app.layout = html.Div(
[html.H2("Logout test"), dcc.Location(id="location"), html.Div(id="content")]
)
@app.callback(Output("content", "children"), [Input("location", "pathname")])
def on_location(location_path):
if location_path is None:
raise PreventUpdate
if "logged-out" in location_path:
return "Logged out"
else:
@flask.after_this_request
def _insert_cookie(rep):
rep.set_cookie("logout-cookie", "logged-in")
return rep
return dcc.LogoutButton(id="logout-btn", logout_url="/_logout")
dash_dcc.start_server(app)
time.sleep(1)
dash_dcc.percy_snapshot("Logout button")
assert dash_dcc.driver.get_cookie("logout-cookie")["value"] == "logged-in"
dash_dcc.wait_for_element("#logout-btn").click()
dash_dcc.wait_for_text_to_equal("#content", "Logged out")
assert not dash_dcc.driver.get_cookie("logout-cookie")
assert dash_dcc.get_logs() == []
| 28.980392
| 85
| 0.656292
|
import dash
from dash.dependencies import Input, Output
from dash.exceptions import PreventUpdate
import dash_core_components as dcc
import dash_html_components as html
import flask
import time
def test_llgo001_location_logout(dash_dcc):
app = dash.Dash(__name__)
@app.server.route("/_logout", methods=["POST"])
def on_logout():
rep = flask.redirect("/logged-out")
rep.set_cookie("logout-cookie", "", 0)
return rep
app.layout = html.Div(
[html.H2("Logout test"), dcc.Location(id="location"), html.Div(id="content")]
)
@app.callback(Output("content", "children"), [Input("location", "pathname")])
def on_location(location_path):
if location_path is None:
raise PreventUpdate
if "logged-out" in location_path:
return "Logged out"
else:
@flask.after_this_request
def _insert_cookie(rep):
rep.set_cookie("logout-cookie", "logged-in")
return rep
return dcc.LogoutButton(id="logout-btn", logout_url="/_logout")
dash_dcc.start_server(app)
time.sleep(1)
dash_dcc.percy_snapshot("Logout button")
assert dash_dcc.driver.get_cookie("logout-cookie")["value"] == "logged-in"
dash_dcc.wait_for_element("#logout-btn").click()
dash_dcc.wait_for_text_to_equal("#content", "Logged out")
assert not dash_dcc.driver.get_cookie("logout-cookie")
assert dash_dcc.get_logs() == []
| true
| true
|
79057c2b7bd9133f76d3760604909e6f651db56c
| 2,461
|
py
|
Python
|
cog/plugins/esgf/objects.py
|
William-Hill/COG
|
4f87fa7cb19d67ee27bae3b991be73427ee449bf
|
[
"BSD-3-Clause"
] | 6
|
2016-03-10T19:38:17.000Z
|
2021-02-23T09:34:59.000Z
|
cog/plugins/esgf/objects.py
|
William-Hill/COG
|
4f87fa7cb19d67ee27bae3b991be73427ee449bf
|
[
"BSD-3-Clause"
] | 602
|
2015-01-05T16:30:08.000Z
|
2021-02-02T21:44:38.000Z
|
cog/plugins/esgf/objects.py
|
cedadev/COG
|
6167f9114c7cf0422b34fb9f5f3f07f9657a7dbe
|
[
"BSD-3-Clause"
] | 18
|
2015-02-12T15:50:17.000Z
|
2021-04-27T16:40:36.000Z
|
'''
Module containing python objects matching the ESGF database tables.
'''
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, Boolean, ForeignKey
from sqlalchemy.orm import relationship
Base = declarative_base()
ROLE_USER = 'user'
ROLE_PUBLISHER = 'publisher'
ROLE_ADMIN = 'admin'
ROLE_SUPERUSER = 'super'
class ESGFUser(Base):
""" Class that represents the 'esgf_security.user' table in the ESGF database."""
__tablename__ = 'user'
#__table_args__ = { 'autoload':True, 'schema':'esgf_security' }
__table_args__ = { 'schema':'esgf_security' }
id = Column(Integer, primary_key=True)
firstname = Column(String)
middlename = Column(String)
lastname = Column(String)
email = Column(String)
username = Column(String)
password = Column(String)
dn = Column(String)
openid = Column(String)
organization = Column(String)
organization_type = Column(String)
city = Column(String)
state = Column(String)
country = Column(String)
status_code = Column(Integer)
verification_token = Column(String)
notification_code = Column(Integer)
class ESGFGroup(Base):
""" Class that represents the 'esgf_secitity.group' table in the ESGF database."""
__tablename__ = 'group'
__table_args__ = { 'schema':'esgf_security' }
id = Column(Integer, primary_key=True)
name = Column(String)
description = Column(String)
visible = Column(Boolean)
automatic_approval = Column(Boolean)
class ESGFRole(Base):
""" Class that represents the 'esgf_security.role' table in the ESGF database."""
__tablename__ = 'role'
__table_args__ = { 'schema':'esgf_security' }
id = Column(Integer, primary_key=True)
name = Column(String)
description = Column(String)
class ESGFPermission(Base):
""" Class that represents the 'esgf_security.permission' table in the ESGF database."""
__tablename__ = 'permission'
__table_args__ = { 'schema':'esgf_security' }
user_id = Column(Integer, ForeignKey('esgf_security.user.id'), primary_key=True)
group_id = Column(Integer, ForeignKey('esgf_security.group.id'), primary_key=True)
role_id = Column(Integer, ForeignKey('esgf_security.role.id'), primary_key=True)
approved = Column(Boolean)
user = relationship("ESGFUser")
group = relationship("ESGFGroup")
role = relationship("ESGFRole")
| 30.7625
| 91
| 0.697278
|
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, Boolean, ForeignKey
from sqlalchemy.orm import relationship
Base = declarative_base()
ROLE_USER = 'user'
ROLE_PUBLISHER = 'publisher'
ROLE_ADMIN = 'admin'
ROLE_SUPERUSER = 'super'
class ESGFUser(Base):
__tablename__ = 'user'
__table_args__ = { 'schema':'esgf_security' }
id = Column(Integer, primary_key=True)
firstname = Column(String)
middlename = Column(String)
lastname = Column(String)
email = Column(String)
username = Column(String)
password = Column(String)
dn = Column(String)
openid = Column(String)
organization = Column(String)
organization_type = Column(String)
city = Column(String)
state = Column(String)
country = Column(String)
status_code = Column(Integer)
verification_token = Column(String)
notification_code = Column(Integer)
class ESGFGroup(Base):
__tablename__ = 'group'
__table_args__ = { 'schema':'esgf_security' }
id = Column(Integer, primary_key=True)
name = Column(String)
description = Column(String)
visible = Column(Boolean)
automatic_approval = Column(Boolean)
class ESGFRole(Base):
__tablename__ = 'role'
__table_args__ = { 'schema':'esgf_security' }
id = Column(Integer, primary_key=True)
name = Column(String)
description = Column(String)
class ESGFPermission(Base):
__tablename__ = 'permission'
__table_args__ = { 'schema':'esgf_security' }
user_id = Column(Integer, ForeignKey('esgf_security.user.id'), primary_key=True)
group_id = Column(Integer, ForeignKey('esgf_security.group.id'), primary_key=True)
role_id = Column(Integer, ForeignKey('esgf_security.role.id'), primary_key=True)
approved = Column(Boolean)
user = relationship("ESGFUser")
group = relationship("ESGFGroup")
role = relationship("ESGFRole")
| true
| true
|
79057c456b6e09ef603aef955b42d08da09abfd3
| 7,570
|
py
|
Python
|
main_algo.py
|
ikekilinc/Columbus
|
aa6ff64ecf04d384184998227a2d16003aa2fe60
|
[
"MIT"
] | null | null | null |
main_algo.py
|
ikekilinc/Columbus
|
aa6ff64ecf04d384184998227a2d16003aa2fe60
|
[
"MIT"
] | null | null | null |
main_algo.py
|
ikekilinc/Columbus
|
aa6ff64ecf04d384184998227a2d16003aa2fe60
|
[
"MIT"
] | null | null | null |
# Columbus - A Smart Navigation System for the Visually-Impaired
# Ike Kilinc
# This file integrates Columbus' primary start location and destination input
# features with its core pathfinding algorithm. This file also facilitates
# Columbus' speech recognition and audio functionalities.
from speech_to_text import *
from node_mapper import *
from path_finder import *
#####################################################################
#####################################################################
def run():
# Columbus asks what the user would like to do (with help option). directions, popular dests, directions
pathMode = startupModeSelection()
if pathMode == "specificDestination":
# User inputs destination.
destination = destinationInput()
startLocation = startLocationInput()
elif pathMode == "nearestRestroom":
# Columbus asks where user is (TEMP).
startLocation = startLocationInput()
# Columbus finds nearest Restroom and sets as destination
destination = None
elif pathMode == "nearestPrinter":
# Columbus asks where user is (TEMP).
startLocation = startLocationInput()
# Columbus finds nearest Printer and sets as destination
destination = None
elif pathMode == "popularDestinations":
# Columbus gives user choice options of popular destinations.
# Sets user input as the destination.
destination = popularLocationsInput(data)
startLocation = startLocationInput()
elif pathMode == "savedDestinations":
# Columbus gives user choice of previously saved destinations and sets
# user input as the destination.
destination = savedLocationsInput(data)
startLocation = startLocationInput()
elif pathMode == "findGod":
pass
# Columbus searches for and determines path to destination.
nodesPath = pathFinder(startLocation, destination, pathMode)
#####################################################################
#####################################################################
class Segment(object):
def __init__(self, startCoords, endCoords, segNumber, isActive, isFloorChange):
self.segmentBounds = (startCoords[0], startCoords[1], endCoords[0], endCoords[1])
self.floor = startCoords[2]
self.segNumber = segNumber
self.isActive = isActive
self.isFloorChange = isFloorChange
# self.direction = direction
def __repr__(self):
return str(self.segNumber)
def __hash__(self):
return hash(self.segNumber)
def getSegBounds(self):
return self.segmentBounds
def getSegNum(self):
return self.segNumber
def getSegFloor(self):
return self.floor
def getIsActive(self):
return self.isActive
def getIsFloorChange(self):
return self.isFloorChange
def getCenter(self):
centerX = (self.segmentBounds[0] + self.segmentBounds[2])/2
centerY = (self.segmentBounds[1] + self.segmentBounds[3])/2
return (centerX, centerY)
def getSegmentDirection(self):
(x0,y0,x1,y1) = self.segmentBounds
if (x1-x0) > 0: return "E"
elif (x1-x0) < 0: return "W"
elif (y1-y0) > 0: return "S"
elif (y1-y0) < 0: return "N"
else: return None
def createAllSegments(nodesPath):
allSegments = []
isFloorChange = False
intNodesPath = []
for i in range(len(nodesPath)):
node = nodesPath[i]
if (isinstance(node, Intersection) or isinstance(node, Elevator) or
i==0 or i==(len(nodesPath)-1)):
intNodesPath.append(node)
for i in range(len(intNodesPath)-1):
(node, nextNode) = (intNodesPath[i], intNodesPath[i+1])
if (isinstance(node, Elevator) and isinstance(nextNode, Elevator)):
isFloorChange = True
segment = Segment(node.getCoords(), nextNode.getCoords(), i, False, isFloorChange)
isFloorChange = False
allSegments.append(segment)
allSegments.append(Segment(intNodesPath[-1].getCoords(), intNodesPath[-1].getCoords(), i, False, False))
return allSegments
#####################################################################
#####################################################################
def startupModeSelection(repeat=False):
# Used to select mode for operating Columbus. Mode options include:
# Finding directions to a specific destination, directions to the nearest
# restroom, directions to popular destinations, and directions to previously
# saved destinations.
if repeat == True:
play("voiceCommands/sorryPleaseRepeat.wav")
else:
play("voiceCommands/modeSelectionInputPrompt.wav")
userInput = recognizeSpeech("mode")
if userInput == "help":
play("voiceCommands/modeSelectionHelp.wav")
userInput = recognizeSpeech("mode")
if userInput in ["nearestRestroom", "popularDestinations",
"savedDestinations", "nearestPrinter",
"specificDestination", "findGod", "help"]:
return userInput
else:
return startupModeSelection(True)
def destinationInput(repeat=False):
if repeat==True:
play("voiceCommands/sorryPleaseRepeat.wav")
else:
# Columbus asks where user would like to go.
play("voiceCommands/destinationInputPrompt.wav")
# User inputs destination
destination = recognizeSpeech("location")
if isLegalNode(destination):
return destination
else:
return destinationInput(True)
def startLocationInput(repeat=False):
if repeat==True:
play("voiceCommands/sorryPleaseRepeat.wav")
else:
# Columbus asks where user is now.
play("voiceCommands/startLocationInputPrompt.wav")
# User inputs start location.
startLocation = recognizeSpeech("location")
if isLegalNode(startLocation):
return startLocation
else:
return startLocationInput(True)
def popularLocationsInput(data, repeat=False):
print("popLocsInput")
if repeat==True:
play("voiceCommands/sorryPleaseRepeat.wav")
else:
# Columbus asks where user would like to go.
play("voiceCommands/destinationInputPromptWithHelp.wav")
userInput = recognizeSpeech("popularDest")
if userInput == "help":
play("voiceCommands/popularLocationSelectionHelp.wav")
userInput = recognizeSpeech("popularDest")
if userInput in ["5Prima", "4Sorrells"]:
return userInput
else:
return popularLocationsInput(data, True)
def savedLocationsInput(data, repeat=False):
if len(data.savedLocations) == 0:
play("voiceCommands/noSavedDestinations.wav")
else:
if repeat==True:
play("voiceCommands/sorryPleaseRepeat.wav")
else:
# Columbus asks where user would like to go.
play("voiceCommands/destinationInputPromptWithHelp.wav")
userInput = recognizeSpeech("savedDest")
if userInput == "help":
play("voiceCommands/modeSelectionHelp.wav")
userInput = recognizeSpeech("savedDest")
if userInput in data.savedLocations:
return userInput
else:
return savedLocationsInput(data, True)
def isLegalNode(string):
allNodesMap = mapAllNodes()
for floor in allNodesMap:
for roomStr in allNodesMap[floor]:
if string == roomStr:
return True
return False
| 32.212766
| 108
| 0.628666
|
# features with its core pathfinding algorithm. This file also facilitates
# Columbus' speech recognition and audio functionalities.
from speech_to_text import *
from node_mapper import *
from path_finder import *
| true
| true
|
79057cdd75a786f5e425e5ce9db0527dc62f7973
| 12,840
|
py
|
Python
|
irrd/server/graphql/schema_generator.py
|
morrowc/irrd
|
8a2af9a6648a73fc3c31d21cf07ef80a49031a14
|
[
"BSD-2-Clause"
] | null | null | null |
irrd/server/graphql/schema_generator.py
|
morrowc/irrd
|
8a2af9a6648a73fc3c31d21cf07ef80a49031a14
|
[
"BSD-2-Clause"
] | 1
|
2021-04-20T14:57:52.000Z
|
2021-04-20T14:57:52.000Z
|
irrd/server/graphql/schema_generator.py
|
morrowc/irrd
|
8a2af9a6648a73fc3c31d21cf07ef80a49031a14
|
[
"BSD-2-Clause"
] | null | null | null |
from collections import OrderedDict, defaultdict
from typing import Optional, Dict, Tuple, List
import ariadne
from irrd.rpki.status import RPKIStatus
from irrd.rpsl.fields import RPSLFieldListMixin, RPSLTextField, RPSLReferenceField
from irrd.rpsl.rpsl_objects import (lookup_field_names, OBJECT_CLASS_MAPPING, RPSLAutNum,
RPSLInetRtr, RPSLPerson, RPSLRole)
from irrd.scopefilter.status import ScopeFilterStatus
from irrd.utils.text import snake_to_camel_case
class SchemaGenerator:
def __init__(self):
"""
The schema generator generates a GraphQL schema.
The purpose is to provide a schema to which resolvers are then
attached, which is then given to Ariadne, and for resolvers to
have information about expected types.
For RPSL queries and types, this is dynamically generated based on
the RPSL objects from irrd.rpsl. Other parts are fixed.
This means that the schema is always the same for a given IRRd
codebase - there are no runtime or user configurable parts.
Along with generating the schema, some metadata is saved, e.g.
self.graphql_types which allows resolvers to learn the GraphQL
type for a certain field.
This generator also creates Ariadne object types on self, which
are used to attach resolvers to them.
"""
self._set_rpsl_query_fields()
self._set_rpsl_object_interface_schema()
self._set_rpsl_contact_schema()
self._set_rpsl_object_schemas()
self._set_enums()
schema = self.enums
schema += """
scalar ASN
scalar IP
schema {
query: Query
}
type Query {
rpslObjects(""" + self.rpsl_query_fields + """): [RPSLObject!]
databaseStatus(sources: [String!]): [DatabaseStatus]
asnPrefixes(asns: [ASN!]!, ipVersion: Int, sources: [String!]): [ASNPrefixes!]
asSetPrefixes(setNames: [String!]!, ipVersion: Int, sources: [String!], excludeSets: [String!], sqlTrace: Boolean): [AsSetPrefixes!]
recursiveSetMembers(setNames: [String!]!, depth: Int, sources: [String!], excludeSets: [String!], sqlTrace: Boolean): [SetMembers!]
}
type DatabaseStatus {
source: String!
authoritative: Boolean!
objectClassFilter: [String!]
rpkiRovFilter: Boolean!
scopefilterEnabled: Boolean!
localJournalKept: Boolean!
serialOldestJournal: Int
serialNewestJournal: Int
serialLastExport: Int
serialNewestMirror: Int
lastUpdate: String
synchronisedSerials: Boolean!
}
type RPSLJournalEntry {
rpslPk: String!
source: String!
serialNrtm: Int!
operation: String!
origin: String
objectClass: String!
objectText: String!
timestamp: String!
}
type ASNPrefixes {
asn: ASN!
prefixes: [IP!]
}
type AsSetPrefixes {
rpslPk: String!
prefixes: [IP!]
}
type SetMembers {
rpslPk: String!
members: [String!]
}
"""
schema += self.rpsl_object_interface_schema
schema += self.rpsl_contact_schema
schema += ''.join(self.rpsl_object_schemas.values())
schema += 'union RPSLContactUnion = RPSLPerson | RPSLRole'
self.type_defs = ariadne.gql(schema)
self.query_type = ariadne.QueryType()
self.rpsl_object_type = ariadne.InterfaceType("RPSLObject")
self.rpsl_contact_union_type = ariadne.UnionType("RPSLContactUnion")
self.asn_scalar_type = ariadne.ScalarType("ASN")
self.ip_scalar_type = ariadne.ScalarType("IP")
self.object_types = [self.query_type, self.rpsl_object_type, self.rpsl_contact_union_type,
self.asn_scalar_type, self.ip_scalar_type]
for name in self.rpsl_object_schemas.keys():
self.object_types.append(ariadne.ObjectType(name))
self.object_types.append(ariadne.ObjectType("ASNPrefixes"))
self.object_types.append(ariadne.ObjectType("AsSetPrefixes"))
self.object_types.append(ariadne.ObjectType("SetMembers"))
self.object_types.append(ariadne.EnumType("RPKIStatus", RPKIStatus))
self.object_types.append(ariadne.EnumType("ScopeFilterStatus", ScopeFilterStatus))
def _set_rpsl_query_fields(self):
"""
Create a sub-schema for the fields that can be queried for RPSL objects.
This includes all fields from all objects, along with a few
special fields.
"""
string_list_fields = {'rpsl_pk', 'sources', 'object_class'}.union(lookup_field_names())
params = [snake_to_camel_case(p) + ': [String!]' for p in sorted(string_list_fields)]
params += [
'ipExact: IP',
'ipLessSpecific: IP',
'ipLessSpecificOneLevel: IP',
'ipMoreSpecific: IP',
'ipAny: IP',
'asn: [ASN!]',
'rpkiStatus: [RPKIStatus!]',
'scopeFilterStatus: [ScopeFilterStatus!]',
'textSearch: String',
'recordLimit: Int',
'sqlTrace: Boolean',
]
self.rpsl_query_fields = ', '.join(params)
def _set_enums(self):
"""
Create the schema for enums, current RPKI and scope filter status.
"""
self.enums = ''
for enum in [RPKIStatus, ScopeFilterStatus]:
self.enums += f'enum {enum.__name__} {{\n'
for value in enum:
self.enums += f' {value.name}\n'
self.enums += '}\n\n'
def _set_rpsl_object_interface_schema(self):
"""
Create the schema for RPSLObject, which contains only fields that
are common to every known RPSL object, along with meta
"""
common_fields = None
for rpsl_object_class in OBJECT_CLASS_MAPPING.values():
if common_fields is None:
common_fields = set(rpsl_object_class.fields.keys())
else:
common_fields = common_fields.intersection(set(rpsl_object_class.fields.keys()))
common_fields = list(common_fields)
common_fields = ['rpslPk', 'objectClass', 'objectText', 'updated'] + common_fields
common_field_dict = self._dict_for_common_fields(common_fields)
common_field_dict['journal'] = '[RPSLJournalEntry]'
schema = self._generate_schema_str('RPSLObject', 'interface', common_field_dict)
self.rpsl_object_interface_schema = schema
def _set_rpsl_contact_schema(self):
"""
Create the schema for RPSLContact. This contains shared fields between
RPSLPerson and RPSLRole, as they are so similar.
"""
common_fields = set(RPSLPerson.fields.keys()).intersection(set(RPSLRole.fields.keys()))
common_fields = common_fields.union({'rpslPk', 'objectClass', 'objectText', 'updated'})
common_field_dict = self._dict_for_common_fields(list(common_fields))
schema = self._generate_schema_str('RPSLContact', 'interface', common_field_dict)
self.rpsl_contact_schema = schema
def _dict_for_common_fields(self, common_fields: List[str]):
common_field_dict = OrderedDict()
for field_name in sorted(common_fields):
try:
# These fields are present in all relevant object, so this is a safe check
rpsl_field = RPSLPerson.fields[field_name]
graphql_type = self._graphql_type_for_rpsl_field(rpsl_field)
reference_name, reference_type = self._grapql_type_for_reference_field(
field_name, rpsl_field)
if reference_name and reference_type:
common_field_dict[reference_name] = reference_type
except KeyError:
graphql_type = 'String'
common_field_dict[snake_to_camel_case(field_name)] = graphql_type
return common_field_dict
def _set_rpsl_object_schemas(self):
"""
Create the schemas for each specific RPSL object class.
Each of these implements RPSLObject, and RPSLPerson/RPSLRole
implement RPSLContact as well.
"""
self.graphql_types = defaultdict(dict)
schemas = OrderedDict()
for object_class, klass in OBJECT_CLASS_MAPPING.items():
object_name = klass.__name__
graphql_fields = OrderedDict()
graphql_fields['rpslPk'] = 'String'
graphql_fields['objectClass'] = 'String'
graphql_fields['objectText'] = 'String'
graphql_fields['updated'] = 'String'
graphql_fields['journal'] = '[RPSLJournalEntry]'
for field_name, field in klass.fields.items():
graphql_type = self._graphql_type_for_rpsl_field(field)
graphql_fields[snake_to_camel_case(field_name)] = graphql_type
self.graphql_types[snake_to_camel_case(object_name)][field_name] = graphql_type
reference_name, reference_type = self._grapql_type_for_reference_field(field_name, field)
if reference_name and reference_type:
graphql_fields[reference_name] = reference_type
self.graphql_types[object_name][reference_name] = reference_type
for field_name in klass.field_extracts:
if field_name.startswith('asn'):
graphql_type = 'ASN'
elif field_name == 'prefix':
graphql_type = 'IP'
elif field_name == 'prefix_length':
graphql_type = 'Int'
else:
graphql_type = 'String'
graphql_fields[snake_to_camel_case(field_name)] = graphql_type
if klass.rpki_relevant:
graphql_fields['rpkiStatus'] = 'RPKIStatus'
graphql_fields['rpkiMaxLength'] = 'Int'
self.graphql_types[object_name]['rpki_max_length'] = 'Int'
implements = 'RPSLContact & RPSLObject' if klass in [RPSLPerson, RPSLRole] else 'RPSLObject'
schema = self._generate_schema_str(object_name, 'type', graphql_fields, implements)
schemas[object_name] = schema
self.rpsl_object_schemas = schemas
def _graphql_type_for_rpsl_field(self, field: RPSLTextField) -> str:
"""
Return the GraphQL type for a regular RPSL field.
This is always a list of strings if the field is a list and/or
can occur multiple times.
"""
if RPSLFieldListMixin in field.__class__.__bases__ or field.multiple:
return '[String!]'
return 'String'
def _grapql_type_for_reference_field(self, field_name: str, rpsl_field: RPSLTextField) -> Tuple[Optional[str], Optional[str]]:
"""
Return the GraphQL name and type for a reference field.
For example, for a field "admin-c" that refers to person/role,
returns ('adminC', '[RPSLContactUnion!]').
Some fields are excluded because they are syntactical references,
not real references.
"""
if isinstance(rpsl_field, RPSLReferenceField) and getattr(rpsl_field, 'referring', None):
rpsl_field.resolve_references()
graphql_name = snake_to_camel_case(field_name) + 'Objs'
grapql_referring = set(rpsl_field.referring_object_classes)
if RPSLAutNum in grapql_referring:
grapql_referring.remove(RPSLAutNum)
if RPSLInetRtr in grapql_referring:
grapql_referring.remove(RPSLInetRtr)
if grapql_referring == {RPSLPerson, RPSLRole}:
graphql_type = '[RPSLContactUnion!]'
else:
graphql_type = '[' + grapql_referring.pop().__name__ + '!]'
return graphql_name, graphql_type
return None, None
def _generate_schema_str(self, name: str, graphql_type: str, fields: Dict[str, str], implements: Optional[str]=None) -> str:
"""
Generate a schema string for a given name, object type and dict of fields.
"""
schema = f'{graphql_type} {name} '
if implements:
schema += f'implements {implements} '
schema += '{\n'
for field, field_type in fields.items():
schema += f' {field}: {field_type}\n'
schema += '}\n\n'
return schema
| 43.822526
| 146
| 0.616745
|
from collections import OrderedDict, defaultdict
from typing import Optional, Dict, Tuple, List
import ariadne
from irrd.rpki.status import RPKIStatus
from irrd.rpsl.fields import RPSLFieldListMixin, RPSLTextField, RPSLReferenceField
from irrd.rpsl.rpsl_objects import (lookup_field_names, OBJECT_CLASS_MAPPING, RPSLAutNum,
RPSLInetRtr, RPSLPerson, RPSLRole)
from irrd.scopefilter.status import ScopeFilterStatus
from irrd.utils.text import snake_to_camel_case
class SchemaGenerator:
def __init__(self):
self._set_rpsl_query_fields()
self._set_rpsl_object_interface_schema()
self._set_rpsl_contact_schema()
self._set_rpsl_object_schemas()
self._set_enums()
schema = self.enums
schema += """
scalar ASN
scalar IP
schema {
query: Query
}
type Query {
rpslObjects(""" + self.rpsl_query_fields + """): [RPSLObject!]
databaseStatus(sources: [String!]): [DatabaseStatus]
asnPrefixes(asns: [ASN!]!, ipVersion: Int, sources: [String!]): [ASNPrefixes!]
asSetPrefixes(setNames: [String!]!, ipVersion: Int, sources: [String!], excludeSets: [String!], sqlTrace: Boolean): [AsSetPrefixes!]
recursiveSetMembers(setNames: [String!]!, depth: Int, sources: [String!], excludeSets: [String!], sqlTrace: Boolean): [SetMembers!]
}
type DatabaseStatus {
source: String!
authoritative: Boolean!
objectClassFilter: [String!]
rpkiRovFilter: Boolean!
scopefilterEnabled: Boolean!
localJournalKept: Boolean!
serialOldestJournal: Int
serialNewestJournal: Int
serialLastExport: Int
serialNewestMirror: Int
lastUpdate: String
synchronisedSerials: Boolean!
}
type RPSLJournalEntry {
rpslPk: String!
source: String!
serialNrtm: Int!
operation: String!
origin: String
objectClass: String!
objectText: String!
timestamp: String!
}
type ASNPrefixes {
asn: ASN!
prefixes: [IP!]
}
type AsSetPrefixes {
rpslPk: String!
prefixes: [IP!]
}
type SetMembers {
rpslPk: String!
members: [String!]
}
"""
schema += self.rpsl_object_interface_schema
schema += self.rpsl_contact_schema
schema += ''.join(self.rpsl_object_schemas.values())
schema += 'union RPSLContactUnion = RPSLPerson | RPSLRole'
self.type_defs = ariadne.gql(schema)
self.query_type = ariadne.QueryType()
self.rpsl_object_type = ariadne.InterfaceType("RPSLObject")
self.rpsl_contact_union_type = ariadne.UnionType("RPSLContactUnion")
self.asn_scalar_type = ariadne.ScalarType("ASN")
self.ip_scalar_type = ariadne.ScalarType("IP")
self.object_types = [self.query_type, self.rpsl_object_type, self.rpsl_contact_union_type,
self.asn_scalar_type, self.ip_scalar_type]
for name in self.rpsl_object_schemas.keys():
self.object_types.append(ariadne.ObjectType(name))
self.object_types.append(ariadne.ObjectType("ASNPrefixes"))
self.object_types.append(ariadne.ObjectType("AsSetPrefixes"))
self.object_types.append(ariadne.ObjectType("SetMembers"))
self.object_types.append(ariadne.EnumType("RPKIStatus", RPKIStatus))
self.object_types.append(ariadne.EnumType("ScopeFilterStatus", ScopeFilterStatus))
def _set_rpsl_query_fields(self):
string_list_fields = {'rpsl_pk', 'sources', 'object_class'}.union(lookup_field_names())
params = [snake_to_camel_case(p) + ': [String!]' for p in sorted(string_list_fields)]
params += [
'ipExact: IP',
'ipLessSpecific: IP',
'ipLessSpecificOneLevel: IP',
'ipMoreSpecific: IP',
'ipAny: IP',
'asn: [ASN!]',
'rpkiStatus: [RPKIStatus!]',
'scopeFilterStatus: [ScopeFilterStatus!]',
'textSearch: String',
'recordLimit: Int',
'sqlTrace: Boolean',
]
self.rpsl_query_fields = ', '.join(params)
def _set_enums(self):
self.enums = ''
for enum in [RPKIStatus, ScopeFilterStatus]:
self.enums += f'enum {enum.__name__} {{\n'
for value in enum:
self.enums += f' {value.name}\n'
self.enums += '}\n\n'
def _set_rpsl_object_interface_schema(self):
common_fields = None
for rpsl_object_class in OBJECT_CLASS_MAPPING.values():
if common_fields is None:
common_fields = set(rpsl_object_class.fields.keys())
else:
common_fields = common_fields.intersection(set(rpsl_object_class.fields.keys()))
common_fields = list(common_fields)
common_fields = ['rpslPk', 'objectClass', 'objectText', 'updated'] + common_fields
common_field_dict = self._dict_for_common_fields(common_fields)
common_field_dict['journal'] = '[RPSLJournalEntry]'
schema = self._generate_schema_str('RPSLObject', 'interface', common_field_dict)
self.rpsl_object_interface_schema = schema
def _set_rpsl_contact_schema(self):
common_fields = set(RPSLPerson.fields.keys()).intersection(set(RPSLRole.fields.keys()))
common_fields = common_fields.union({'rpslPk', 'objectClass', 'objectText', 'updated'})
common_field_dict = self._dict_for_common_fields(list(common_fields))
schema = self._generate_schema_str('RPSLContact', 'interface', common_field_dict)
self.rpsl_contact_schema = schema
def _dict_for_common_fields(self, common_fields: List[str]):
common_field_dict = OrderedDict()
for field_name in sorted(common_fields):
try:
rpsl_field = RPSLPerson.fields[field_name]
graphql_type = self._graphql_type_for_rpsl_field(rpsl_field)
reference_name, reference_type = self._grapql_type_for_reference_field(
field_name, rpsl_field)
if reference_name and reference_type:
common_field_dict[reference_name] = reference_type
except KeyError:
graphql_type = 'String'
common_field_dict[snake_to_camel_case(field_name)] = graphql_type
return common_field_dict
def _set_rpsl_object_schemas(self):
self.graphql_types = defaultdict(dict)
schemas = OrderedDict()
for object_class, klass in OBJECT_CLASS_MAPPING.items():
object_name = klass.__name__
graphql_fields = OrderedDict()
graphql_fields['rpslPk'] = 'String'
graphql_fields['objectClass'] = 'String'
graphql_fields['objectText'] = 'String'
graphql_fields['updated'] = 'String'
graphql_fields['journal'] = '[RPSLJournalEntry]'
for field_name, field in klass.fields.items():
graphql_type = self._graphql_type_for_rpsl_field(field)
graphql_fields[snake_to_camel_case(field_name)] = graphql_type
self.graphql_types[snake_to_camel_case(object_name)][field_name] = graphql_type
reference_name, reference_type = self._grapql_type_for_reference_field(field_name, field)
if reference_name and reference_type:
graphql_fields[reference_name] = reference_type
self.graphql_types[object_name][reference_name] = reference_type
for field_name in klass.field_extracts:
if field_name.startswith('asn'):
graphql_type = 'ASN'
elif field_name == 'prefix':
graphql_type = 'IP'
elif field_name == 'prefix_length':
graphql_type = 'Int'
else:
graphql_type = 'String'
graphql_fields[snake_to_camel_case(field_name)] = graphql_type
if klass.rpki_relevant:
graphql_fields['rpkiStatus'] = 'RPKIStatus'
graphql_fields['rpkiMaxLength'] = 'Int'
self.graphql_types[object_name]['rpki_max_length'] = 'Int'
implements = 'RPSLContact & RPSLObject' if klass in [RPSLPerson, RPSLRole] else 'RPSLObject'
schema = self._generate_schema_str(object_name, 'type', graphql_fields, implements)
schemas[object_name] = schema
self.rpsl_object_schemas = schemas
def _graphql_type_for_rpsl_field(self, field: RPSLTextField) -> str:
if RPSLFieldListMixin in field.__class__.__bases__ or field.multiple:
return '[String!]'
return 'String'
def _grapql_type_for_reference_field(self, field_name: str, rpsl_field: RPSLTextField) -> Tuple[Optional[str], Optional[str]]:
if isinstance(rpsl_field, RPSLReferenceField) and getattr(rpsl_field, 'referring', None):
rpsl_field.resolve_references()
graphql_name = snake_to_camel_case(field_name) + 'Objs'
grapql_referring = set(rpsl_field.referring_object_classes)
if RPSLAutNum in grapql_referring:
grapql_referring.remove(RPSLAutNum)
if RPSLInetRtr in grapql_referring:
grapql_referring.remove(RPSLInetRtr)
if grapql_referring == {RPSLPerson, RPSLRole}:
graphql_type = '[RPSLContactUnion!]'
else:
graphql_type = '[' + grapql_referring.pop().__name__ + '!]'
return graphql_name, graphql_type
return None, None
def _generate_schema_str(self, name: str, graphql_type: str, fields: Dict[str, str], implements: Optional[str]=None) -> str:
schema = f'{graphql_type} {name} '
if implements:
schema += f'implements {implements} '
schema += '{\n'
for field, field_type in fields.items():
schema += f' {field}: {field_type}\n'
schema += '}\n\n'
return schema
| true
| true
|
79057d644bd6f6676a3e83031a983e2a2886b351
| 1,030
|
py
|
Python
|
WEEKS/CD_Sata-Structures/_RESOURCES/python-prac/projecteuler/euler041_pandigital_prime.py
|
webdevhub42/Lambda
|
b04b84fb5b82fe7c8b12680149e25ae0d27a0960
|
[
"MIT"
] | 5
|
2021-06-02T23:44:25.000Z
|
2021-12-27T16:21:57.000Z
|
WEEKS/CD_Sata-Structures/_RESOURCES/python-prac/projecteuler/euler041_pandigital_prime.py
|
webdevhub42/Lambda
|
b04b84fb5b82fe7c8b12680149e25ae0d27a0960
|
[
"MIT"
] | 22
|
2021-05-31T01:33:25.000Z
|
2021-10-18T18:32:39.000Z
|
WEEKS/CD_Sata-Structures/_RESOURCES/python-prac/projecteuler/euler041_pandigital_prime.py
|
webdevhub42/Lambda
|
b04b84fb5b82fe7c8b12680149e25ae0d27a0960
|
[
"MIT"
] | 3
|
2021-06-19T03:37:47.000Z
|
2021-08-31T00:49:51.000Z
|
#!/usr/bin/env python
"""
Solution to Project Euler Problem
http://projecteuler.net/
by Apalala <apalala@gmail.com>
(cc) Attribution-ShareAlike
http://creativecommons.org/licenses/by-sa/3.0/
We shall say that an n-digit number is pandigital if it makes use of all
the digits 1 to n exactly once. For example, 2143 is a 4-digit pandigital
and is also prime.
What is the largest n-digit pandigital prime that exists?
"""
from digits import is_pandigital
from primality import primes_upto, is_prime
def pandigital_primes(digits=7):
for p in primes_upto(int("9" * digits)):
if is_pandigital(p):
yield p
def test():
assert not is_prime(123)
assert not is_prime(132)
assert not is_prime(213)
assert not is_prime(231)
assert not is_prime(312)
assert not is_prime(321)
assert is_prime(2143)
assert is_pandigital(2143)
assert 2143 in set(pandigital_primes(digits=4))
def run():
print(list(pandigital_primes())[-1])
if __name__ == "__main__":
test()
run()
| 22.391304
| 73
| 0.707767
|
from digits import is_pandigital
from primality import primes_upto, is_prime
def pandigital_primes(digits=7):
for p in primes_upto(int("9" * digits)):
if is_pandigital(p):
yield p
def test():
assert not is_prime(123)
assert not is_prime(132)
assert not is_prime(213)
assert not is_prime(231)
assert not is_prime(312)
assert not is_prime(321)
assert is_prime(2143)
assert is_pandigital(2143)
assert 2143 in set(pandigital_primes(digits=4))
def run():
print(list(pandigital_primes())[-1])
if __name__ == "__main__":
test()
run()
| true
| true
|
79057e3faa199906fcf81398d881cfbc3f238795
| 2,160
|
py
|
Python
|
tools/doxygen_utils.py
|
MicrohexHQ/src
|
c079873c182067002b6a7a5564094ea0a4fe0aef
|
[
"BSD-3-Clause"
] | 2
|
2019-07-08T11:58:27.000Z
|
2019-07-08T13:23:57.000Z
|
tools/doxygen_utils.py
|
Bia10/src
|
15b9ab2535222e492cd21b8528c27f763fb799d6
|
[
"BSD-3-Clause"
] | null | null | null |
tools/doxygen_utils.py
|
Bia10/src
|
15b9ab2535222e492cd21b8528c27f763fb799d6
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import print_function
import os
import xml.etree.ElementTree as ET
def load_xml_for_module(xml_dir_path, module_name, or_dummy=True):
xml_tree = ET.Element("dummy") if or_dummy else None
for sfx in ["_8hpp", "_8h"]:
xml_path = os.path.join(xml_dir_path, "%s%s.xml" % (module_name, sfx))
if os.path.isfile(xml_path):
with open(xml_path, "rb") as fin:
xml_tree = ET.fromstring(fin.read())
return xml_tree
def get_toplevel_functions(xml_tree, name=None):
path = "./compounddef/sectiondef[@kind='%s']/memberdef[@kind='function']"
if name:
path = "%s/[name='%s']" % (path, name)
all_nodes = []
for section_kind in ["func", "user-defined"]:
nodes = xml_tree.findall(path % section_kind)
all_nodes.extend(map(lambda n: n, nodes))
return all_nodes
def get_single_child_element_text_contents(el, child_element_tag):
nodes = el.findall("./%s" % child_element_tag)
nnodes = len(nodes)
if nnodes == 0:
return None
text = nodes[0].text
if nnodes > 1:
print("Warning: more than 1 child element with tag '%s' found; picking first" % (child_element_tag,))
return text
def for_each_param(node, callback):
assert(node.tag == "memberdef" and node.attrib.get("kind") == "function")
plist = node.find("./detaileddescription/para/parameterlist[@kind='param']")
def get_direct_text(n, tag):
c = n.find("./%s" % tag)
if c is not None:
return " ".join(c.itertext()).strip()
for param in node.findall("./param"):
name, ptyp, desc = None, None, None
name = get_direct_text(param, "declname")
ptyp = get_direct_text(param, "type")
if name and plist is not None:
for plist_item in plist.findall("parameteritem"):
if plist_item.find("./parameternamelist/[parametername='%s']" % name) is not None:
pdesc_node = plist_item.find("./parameterdescription")
if pdesc_node is not None:
desc = " ".join(pdesc_node.itertext()).strip()
callback(name, ptyp, desc)
| 40.754717
| 109
| 0.622685
|
from __future__ import print_function
import os
import xml.etree.ElementTree as ET
def load_xml_for_module(xml_dir_path, module_name, or_dummy=True):
xml_tree = ET.Element("dummy") if or_dummy else None
for sfx in ["_8hpp", "_8h"]:
xml_path = os.path.join(xml_dir_path, "%s%s.xml" % (module_name, sfx))
if os.path.isfile(xml_path):
with open(xml_path, "rb") as fin:
xml_tree = ET.fromstring(fin.read())
return xml_tree
def get_toplevel_functions(xml_tree, name=None):
path = "./compounddef/sectiondef[@kind='%s']/memberdef[@kind='function']"
if name:
path = "%s/[name='%s']" % (path, name)
all_nodes = []
for section_kind in ["func", "user-defined"]:
nodes = xml_tree.findall(path % section_kind)
all_nodes.extend(map(lambda n: n, nodes))
return all_nodes
def get_single_child_element_text_contents(el, child_element_tag):
nodes = el.findall("./%s" % child_element_tag)
nnodes = len(nodes)
if nnodes == 0:
return None
text = nodes[0].text
if nnodes > 1:
print("Warning: more than 1 child element with tag '%s' found; picking first" % (child_element_tag,))
return text
def for_each_param(node, callback):
assert(node.tag == "memberdef" and node.attrib.get("kind") == "function")
plist = node.find("./detaileddescription/para/parameterlist[@kind='param']")
def get_direct_text(n, tag):
c = n.find("./%s" % tag)
if c is not None:
return " ".join(c.itertext()).strip()
for param in node.findall("./param"):
name, ptyp, desc = None, None, None
name = get_direct_text(param, "declname")
ptyp = get_direct_text(param, "type")
if name and plist is not None:
for plist_item in plist.findall("parameteritem"):
if plist_item.find("./parameternamelist/[parametername='%s']" % name) is not None:
pdesc_node = plist_item.find("./parameterdescription")
if pdesc_node is not None:
desc = " ".join(pdesc_node.itertext()).strip()
callback(name, ptyp, desc)
| true
| true
|
79057f7f2c81cfb2c6a87ad7a662320755b5d019
| 1,449
|
py
|
Python
|
benchmarks/bnb.py
|
alexchamberlain/mutant
|
3f4ec0df8b83b2de18766e2c9e1808cff4fd52a9
|
[
"MIT"
] | 3
|
2019-06-15T13:13:39.000Z
|
2020-02-07T19:54:12.000Z
|
benchmarks/bnb.py
|
alexchamberlain/mutant
|
3f4ec0df8b83b2de18766e2c9e1808cff4fd52a9
|
[
"MIT"
] | 276
|
2019-07-03T06:18:37.000Z
|
2021-07-28T05:24:59.000Z
|
benchmarks/bnb.py
|
alexchamberlain/mutant
|
3f4ec0df8b83b2de18766e2c9e1808cff4fd52a9
|
[
"MIT"
] | null | null | null |
import logging
import sys
import time
from rdflib.graph import Graph
from hexastore import turtle
from hexastore.memory import InMemoryHexastore
logger = logging.getLogger(__name__)
root = logging.getLogger()
root.setLevel(logging.DEBUG)
class Timer:
def __enter__(self):
self.start = time.perf_counter()
return self
def __exit__(self, *args):
self.end = time.perf_counter()
self.interval = self.end - self.start
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
handler.setFormatter(formatter)
root.addHandler(handler)
try:
with Timer() as t:
store = InMemoryHexastore()
with Timer() as t1:
triples = []
with open("/Users/alex/Downloads/BNBLODBooks_sample_nt/BNBLODB_sample.nt") as fo:
turtle.parse(fo.read(), lambda s, p, o: triples.append((s, p, o)))
logger.info(f"library=mutant-parse time={t1.interval}")
with Timer() as t2:
store.bulk_insert(triples)
logger.info(f"library=mutant-bulk-insert time={t2.interval}")
finally:
logger.info(f"library=mutant time={t.interval}")
try:
with Timer() as t:
g = Graph()
g.parse("/Users/alex/Downloads/BNBLODBooks_sample_nt/BNBLODB_sample.nt", format="nt")
finally:
logger.info(f"library=rdflib time={t.interval}")
| 25.421053
| 93
| 0.668737
|
import logging
import sys
import time
from rdflib.graph import Graph
from hexastore import turtle
from hexastore.memory import InMemoryHexastore
logger = logging.getLogger(__name__)
root = logging.getLogger()
root.setLevel(logging.DEBUG)
class Timer:
def __enter__(self):
self.start = time.perf_counter()
return self
def __exit__(self, *args):
self.end = time.perf_counter()
self.interval = self.end - self.start
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
handler.setFormatter(formatter)
root.addHandler(handler)
try:
with Timer() as t:
store = InMemoryHexastore()
with Timer() as t1:
triples = []
with open("/Users/alex/Downloads/BNBLODBooks_sample_nt/BNBLODB_sample.nt") as fo:
turtle.parse(fo.read(), lambda s, p, o: triples.append((s, p, o)))
logger.info(f"library=mutant-parse time={t1.interval}")
with Timer() as t2:
store.bulk_insert(triples)
logger.info(f"library=mutant-bulk-insert time={t2.interval}")
finally:
logger.info(f"library=mutant time={t.interval}")
try:
with Timer() as t:
g = Graph()
g.parse("/Users/alex/Downloads/BNBLODBooks_sample_nt/BNBLODB_sample.nt", format="nt")
finally:
logger.info(f"library=rdflib time={t.interval}")
| true
| true
|
79057fbb3e6cc4b94d57c855cce54d732abfd431
| 459
|
py
|
Python
|
ServerScript/recievejson(legacy).py
|
wmizzi/tn2capstone
|
e9855ba6b49e2d05293df74846c64fa0c220a25d
|
[
"BSD-2-Clause"
] | null | null | null |
ServerScript/recievejson(legacy).py
|
wmizzi/tn2capstone
|
e9855ba6b49e2d05293df74846c64fa0c220a25d
|
[
"BSD-2-Clause"
] | null | null | null |
ServerScript/recievejson(legacy).py
|
wmizzi/tn2capstone
|
e9855ba6b49e2d05293df74846c64fa0c220a25d
|
[
"BSD-2-Clause"
] | null | null | null |
#created by Angus Clark on 8/01/2017
# toDo incoperate the saving program into this_dir
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = '130.56.253.43'
print host # remove when done debugging
port = 5201 # edit when port for comm is decided
s.bind((host,port))
f = open('temp.json','wb')
s.listen(5)
while True:
c, addr = s.accept()
while(l):
f.write(l)
l = c.recv(1024)
f.close()
c.close()
| 19.956522
| 53
| 0.651416
|
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = '130.56.253.43'
print host
port = 5201
s.bind((host,port))
f = open('temp.json','wb')
s.listen(5)
while True:
c, addr = s.accept()
while(l):
f.write(l)
l = c.recv(1024)
f.close()
c.close()
| false
| true
|
79057ff8f54339970083c98c62e0ab3da30f4036
| 11,982
|
py
|
Python
|
example/case_example.py
|
casework/CASE-API-Python
|
389a6eb7f0b248aa976b37228923106163e743ae
|
[
"Apache-2.0"
] | 1
|
2019-11-09T03:45:32.000Z
|
2019-11-09T03:45:32.000Z
|
example/case_example.py
|
casework/CASE-API-Python
|
389a6eb7f0b248aa976b37228923106163e743ae
|
[
"Apache-2.0"
] | 1
|
2019-06-28T18:44:46.000Z
|
2019-06-28T18:44:47.000Z
|
example/case_example.py
|
casework/CASE-API-Python
|
389a6eb7f0b248aa976b37228923106163e743ae
|
[
"Apache-2.0"
] | null | null | null |
# NOTICE
#
# This software was produced for the U.S. Government under
# contract SB-1341-14-CQ-0010, and is subject to the Rights
# in Data-General Clause 52.227-14, Alt. IV (DEC 2007)
#
# (c) 2018 The MITRE Corporation. All Rights Reserved.
#====================================================
# CASE API
#!/usr/bin/env python
import datetime
import uuid
import rdflib
from rdflib import RDF
CASE = rdflib.Namespace('http://case.example.org/core#')
#====================================================
#-- CREATE A CASE DOCUMENT FOR A SINGLE REPORT
class Document(object):
def __init__(self, graph=None):
"""
Initializes the CASE document.
Args:
graph: The graph to populate (instance of rdflib.Graph)
If not provided, a graph in memory will be used.
"""
if not graph:
graph = rdflib.Graph()
graph.namespace_manager.bind('case', CASE)
self.graph = graph
def _sanitize_triple(self, triple):
"""Santizes the triple to contains pure rdflib terms."""
s, p, o = triple
if isinstance(s, Node):
s = s._node
if isinstance(o, Node):
o = o._node
elif o is not None and not isinstance(o, rdflib.term.Node):
o = rdflib.Literal(o)
if p is not None and not isinstance(p, rdflib.term.Node):
p = CASE[p]
return s, p, o
def __iter__(self):
"""Wrapper for iterating over all triples in the graph"""
return iter(self.graph)
def __contains__(self, triple):
"""Wrapper for checking if triple is contained in the graph."""
return self._sanitize_triple(triple) in self.graph
def triples(self, triple):
"""Generator over the triple store in graph."""
return self.graph.triples(self._sanitize_triple(triple))
def _json_ld_context(self):
context = dict(
(pfx, str(ns))
for (pfx, ns) in self.graph.namespaces() if pfx and
str(ns) != u"http://www.w3.org/XML/1998/namespace")
context['@vocab'] = str(CASE)
return context
# Manually specify properties to help inforce both properties are supplied.
def create_hash(self, hashMethod, hashValue):
return self.create_Node(
CASE.Hash, bnode=True, hashMethod=hashMethod, hashValue=hashValue)
# We are going to default to json-ld instead of rdflib's default of xml.
def serialize(self, format='json-ld', **kwargs):
"""Serializes the document's graph to a destination.
(Follows same arguments as rdflib.Graph().serialize())"""
if format == 'json-ld':
if 'context' not in kwargs:
kwargs['context'] = self._json_ld_context()
if 'auto_compact' not in kwargs:
kwargs['auto_compact'] = True
return self.graph.serialize(format=format, **kwargs)
# def serialize_append(self, format='json-ld', destination="new-api_output.json", **kwargs):
# """
# Serializes the document's graph to append to a destination file.
# """
# if format == 'json-ld':
# if 'context' not in kwargs:
# kwargs['context'] = self._json_ld_context()
# if 'auto_compact' not in kwargs:
# kwargs['auto_compact'] = True
# graph = self.graph.serialize(format=format, **kwargs)
# with open(destination, "a") as fin:
# fin.write(graph)
# fin.close()
#====================================================
#-- CREATE A CASE OBJECT
def create_Node(self, rdf_type=None, uri=None, bnode=False, **kwargs):
return Node(self.graph, rdf_type=rdf_type, uri=uri, bnode=bnode, **kwargs)
def create_CoreObject(self, _type=None, **kwargs):
"""
Creates and returns a CoreObject.
"""
return CoreObject(self.graph, rdf_type=_type, **kwargs)
def create_ContextObject(self, _type=None, **kwargs):
"""
Creates and returns a Context.
This class may not have PropertyBundles.
"""
return ContextObject(self.graph, rdf_type=_type, **kwargs)
def create_SubObject(self, _type=None, **kwargs):
"""
Creates and returns a Sub.
This class is for children of one of the above CASE classes.
This class may not have PropertyBundles.
"""
return SubObject(self.graph, rdf_type=_type, **kwargs)
def create_DuckObject(self, _type=None, **kwargs):
"""
Creates and returns a Duck.
These lonely Ducks have no parents and are fully duck-typed.
This class may not have PropertyBundles.
"""
return DuckObject(self.graph, rdf_type=_type, **kwargs)
#====================================================
#-- CASE OBJECT CLASSES
class Node(object):
"""Implements a generic node in the graph."""
RDF_TYPE = None
# Namespace to use when adding properties that are not of type rdflib.URIRef.
NAMESPACE = CASE
def __init__(self, graph, uri=None, bnode=False, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
uri: Optional string to set th URI to. (If not provided a UUID will be generated.)
bnode: Whether to create a blank node or a uri reference.
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
super(Node, self).__init__()
if uri:
self.uri = uri
else:
self.uri = str(uuid.uuid4())
if bnode:
self._node = rdflib.BNode(self.uri)
else:
self._node = rdflib.URIRef(self.uri)
self._graph = graph
if not rdf_type:
rdf_type = self.RDF_TYPE
# Add namespace prefix to non URIRef to allow abstraction from rdflib.
if not isinstance(rdf_type, rdflib.term.Node):
rdf_type = self.NAMESPACE[rdf_type]
self.add(RDF.type, rdf_type)
for key, value in iter(kwargs.items()):
self.add(key, value)
def add(self, property, value):
"""Adds a property and its value to the node."""
# type: (object, object) -> object
# Ignore setting properties with a None value.
if value is None:
return
# Lists and other iterables as values are the equivelent of having multiple properties.
# NOTE: Lists obviously lose their order.
# TODO: Add support for ordered lists.
if isinstance(value, (list, tuple, set)):
for item in value:
self.add(property, item)
return
if isinstance(value, Node):
value = value._node
# Convert basic python datatypes to literals.
elif not isinstance(value, rdflib.term.Node):
value = rdflib.Literal(value)
# Automatically convert non-node properties to URIRef using default prefix.
if not isinstance(property, rdflib.term.Node):
property = self.NAMESPACE[property]
self._graph.add((self._node, property, value))
class CoreObject(Node):
RDF_TYPE = CASE.CoreObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(CoreObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('CoreObjectCreationTime', datetime.datetime.utcnow())
self.pb = ""
def create_PropertyBundle(self, prop_type=None, **kwargs):
"""Convenience function for adding property bundles to this Trace.
Args:
type: The @type of property bundle (can be of type rdflib.URIRef or string).
properties: Properties to add to the created property bundle.
Returns:
The property bundle created (instance of PropertyBundle).
"""
self.pb = PropertyBundle(self._graph, rdf_type=prop_type, **kwargs)
self.add(CASE.propertyBundle, self.pb)
return self.pb
class PropertyBundle(Node):
RDF_TYPE = CASE.PropertyBundle
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
# Property bundles should be blank nodes because we should be referencing them
# through CoreObjects.
self.propObj = kwargs
super(PropertyBundle, self).__init__(
graph, bnode=True, rdf_type=rdf_type, **kwargs)
class ContextObject(Node):
RDF_TYPE = CASE.ContextObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(ContextObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('ContextObjectCreationTime', datetime.datetime.utcnow())
class SubObject(Node):
RDF_TYPE = CASE.SubObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(SubObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('SubObjectCreationTime', datetime.datetime.utcnow())
class DuckObject(Node):
RDF_TYPE = CASE.DuckObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(DuckObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('DuckObjectCreationTime', datetime.datetime.utcnow())
| 33.657303
| 95
| 0.608746
|
import datetime
import uuid
import rdflib
from rdflib import RDF
CASE = rdflib.Namespace('http://case.example.org/core#')
class Document(object):
def __init__(self, graph=None):
if not graph:
graph = rdflib.Graph()
graph.namespace_manager.bind('case', CASE)
self.graph = graph
def _sanitize_triple(self, triple):
s, p, o = triple
if isinstance(s, Node):
s = s._node
if isinstance(o, Node):
o = o._node
elif o is not None and not isinstance(o, rdflib.term.Node):
o = rdflib.Literal(o)
if p is not None and not isinstance(p, rdflib.term.Node):
p = CASE[p]
return s, p, o
def __iter__(self):
return iter(self.graph)
def __contains__(self, triple):
return self._sanitize_triple(triple) in self.graph
def triples(self, triple):
return self.graph.triples(self._sanitize_triple(triple))
def _json_ld_context(self):
context = dict(
(pfx, str(ns))
for (pfx, ns) in self.graph.namespaces() if pfx and
str(ns) != u"http://www.w3.org/XML/1998/namespace")
context['@vocab'] = str(CASE)
return context
def create_hash(self, hashMethod, hashValue):
return self.create_Node(
CASE.Hash, bnode=True, hashMethod=hashMethod, hashValue=hashValue)
def serialize(self, format='json-ld', **kwargs):
if format == 'json-ld':
if 'context' not in kwargs:
kwargs['context'] = self._json_ld_context()
if 'auto_compact' not in kwargs:
kwargs['auto_compact'] = True
return self.graph.serialize(format=format, **kwargs)
# def serialize_append(self, format='json-ld', destination="new-api_output.json", **kwargs):
# """
# Serializes the document's graph to append to a destination file.
# """
def create_Node(self, rdf_type=None, uri=None, bnode=False, **kwargs):
return Node(self.graph, rdf_type=rdf_type, uri=uri, bnode=bnode, **kwargs)
def create_CoreObject(self, _type=None, **kwargs):
return CoreObject(self.graph, rdf_type=_type, **kwargs)
def create_ContextObject(self, _type=None, **kwargs):
return ContextObject(self.graph, rdf_type=_type, **kwargs)
def create_SubObject(self, _type=None, **kwargs):
return SubObject(self.graph, rdf_type=_type, **kwargs)
def create_DuckObject(self, _type=None, **kwargs):
return DuckObject(self.graph, rdf_type=_type, **kwargs)
class Node(object):
RDF_TYPE = None
NAMESPACE = CASE
def __init__(self, graph, uri=None, bnode=False, rdf_type=None, **kwargs):
super(Node, self).__init__()
if uri:
self.uri = uri
else:
self.uri = str(uuid.uuid4())
if bnode:
self._node = rdflib.BNode(self.uri)
else:
self._node = rdflib.URIRef(self.uri)
self._graph = graph
if not rdf_type:
rdf_type = self.RDF_TYPE
if not isinstance(rdf_type, rdflib.term.Node):
rdf_type = self.NAMESPACE[rdf_type]
self.add(RDF.type, rdf_type)
for key, value in iter(kwargs.items()):
self.add(key, value)
def add(self, property, value):
if value is None:
return
if isinstance(value, (list, tuple, set)):
for item in value:
self.add(property, item)
return
if isinstance(value, Node):
value = value._node
elif not isinstance(value, rdflib.term.Node):
value = rdflib.Literal(value)
if not isinstance(property, rdflib.term.Node):
property = self.NAMESPACE[property]
self._graph.add((self._node, property, value))
class CoreObject(Node):
RDF_TYPE = CASE.CoreObject
def __init__(self, graph, rdf_type=None, **kwargs):
self.type = rdf_type
super(CoreObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('CoreObjectCreationTime', datetime.datetime.utcnow())
self.pb = ""
def create_PropertyBundle(self, prop_type=None, **kwargs):
self.pb = PropertyBundle(self._graph, rdf_type=prop_type, **kwargs)
self.add(CASE.propertyBundle, self.pb)
return self.pb
class PropertyBundle(Node):
RDF_TYPE = CASE.PropertyBundle
def __init__(self, graph, rdf_type=None, **kwargs):
self.type = rdf_type
self.propObj = kwargs
super(PropertyBundle, self).__init__(
graph, bnode=True, rdf_type=rdf_type, **kwargs)
class ContextObject(Node):
RDF_TYPE = CASE.ContextObject
def __init__(self, graph, rdf_type=None, **kwargs):
self.type = rdf_type
super(ContextObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('ContextObjectCreationTime', datetime.datetime.utcnow())
class SubObject(Node):
RDF_TYPE = CASE.SubObject
def __init__(self, graph, rdf_type=None, **kwargs):
self.type = rdf_type
super(SubObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('SubObjectCreationTime', datetime.datetime.utcnow())
class DuckObject(Node):
RDF_TYPE = CASE.DuckObject
def __init__(self, graph, rdf_type=None, **kwargs):
self.type = rdf_type
super(DuckObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('DuckObjectCreationTime', datetime.datetime.utcnow())
| true
| true
|
790580d80b8ef5203e3d00531be7c705e2e0a7bc
| 813
|
py
|
Python
|
pDeep/config/element.py
|
zhouxiexuan/pDeep3
|
3a95dc8d1479df96e491ef68accd775dac46af62
|
[
"Apache-2.0"
] | 10
|
2020-05-28T17:04:19.000Z
|
2021-05-13T12:11:22.000Z
|
pDeep/config/element.py
|
zhouxiexuan/pDeep3
|
3a95dc8d1479df96e491ef68accd775dac46af62
|
[
"Apache-2.0"
] | 7
|
2020-05-21T02:13:05.000Z
|
2021-02-21T15:29:15.000Z
|
pDeep/config/element.py
|
zhouxiexuan/pDeep3
|
3a95dc8d1479df96e491ef68accd775dac46af62
|
[
"Apache-2.0"
] | 6
|
2020-02-25T15:53:39.000Z
|
2021-12-10T03:54:09.000Z
|
element_list = ["X", "H", "He", "Li", "Be", "B", "C", "N", "O", "F", "Ne", "Na", "Mg", "Al", "Si", "P", "S", "Cl", "Ar",
"K", "Ca", "Sc", "Ti", "V", "Cr", "Mn", "Fe", "Co", "Ni", "Cu", "Zn", "Ga", "Ge", "As", "Se", "Br",
"Kr", "Rb", "Sr", "Y", "Zr", "Nb", "Mo", "Tc", "Ru", "Rh", "Pd", "Ag", "Cd", "In", "Sn", "Sb", "Te",
"I", "Xe", "Cs", "Ba", "La", "Ce", "Pr", "Nd", "Pm", "Sm", "Eu", "Gd", "Tb", "Dy", "Ho", "Er", "Tm",
"Yb", "Lu", "Hf", "Ta", "W", "Re", "Os", "Ir", "Pt", "Au", "Hg", "Tl", "Pb", "Bi", "Po", "At", "Rn",
"Fr", "Ra", "Ac", "Th", "Pa", "U", "Np", "Pu", "Am", "Cm", "Bk", "Cf", "Es", "Fm", "Md", "No", "Lr",
"15N", "14N", "Hex", "HexNAc", "dHex", "NeuAc", "Pent", "18O", "Hep", "NeuGc", "2H", "13C"]
| 101.625
| 120
| 0.306273
|
element_list = ["X", "H", "He", "Li", "Be", "B", "C", "N", "O", "F", "Ne", "Na", "Mg", "Al", "Si", "P", "S", "Cl", "Ar",
"K", "Ca", "Sc", "Ti", "V", "Cr", "Mn", "Fe", "Co", "Ni", "Cu", "Zn", "Ga", "Ge", "As", "Se", "Br",
"Kr", "Rb", "Sr", "Y", "Zr", "Nb", "Mo", "Tc", "Ru", "Rh", "Pd", "Ag", "Cd", "In", "Sn", "Sb", "Te",
"I", "Xe", "Cs", "Ba", "La", "Ce", "Pr", "Nd", "Pm", "Sm", "Eu", "Gd", "Tb", "Dy", "Ho", "Er", "Tm",
"Yb", "Lu", "Hf", "Ta", "W", "Re", "Os", "Ir", "Pt", "Au", "Hg", "Tl", "Pb", "Bi", "Po", "At", "Rn",
"Fr", "Ra", "Ac", "Th", "Pa", "U", "Np", "Pu", "Am", "Cm", "Bk", "Cf", "Es", "Fm", "Md", "No", "Lr",
"15N", "14N", "Hex", "HexNAc", "dHex", "NeuAc", "Pent", "18O", "Hep", "NeuGc", "2H", "13C"]
| true
| true
|
7905813833e1a1d2bd03b59cec4012115c6135a5
| 702
|
py
|
Python
|
src/decisionengine/framework/modules/tests/test_Publisher.py
|
moibenko/decisionengine
|
4c458e0c225ec2ce1e82d56e752724983331b7d1
|
[
"Apache-2.0"
] | 9
|
2018-06-11T20:06:50.000Z
|
2020-10-01T17:02:02.000Z
|
src/decisionengine/framework/modules/tests/test_Publisher.py
|
moibenko/decisionengine
|
4c458e0c225ec2ce1e82d56e752724983331b7d1
|
[
"Apache-2.0"
] | 551
|
2018-06-25T21:06:37.000Z
|
2022-03-31T13:47:32.000Z
|
src/decisionengine/framework/modules/tests/test_Publisher.py
|
goodenou/decisionengine
|
b203e2c493cf501562accf1013c6257c348711b7
|
[
"Apache-2.0"
] | 70
|
2018-06-11T20:07:01.000Z
|
2022-02-10T16:18:24.000Z
|
# SPDX-FileCopyrightText: 2017 Fermi Research Alliance, LLC
# SPDX-License-Identifier: Apache-2.0
from decisionengine.framework.modules.Publisher import Publisher
def test_publisher_structure():
"""
The module.publisher itself is a bit of a skeleton...
"""
params = {"1": 1, "2": 2, "channel_name": "test"}
test_publisher = Publisher(params)
assert test_publisher.get_parameters() == {"1": 1, "2": 2, "channel_name": "test"}
test_publisher.set_data_block("example")
assert test_publisher.get_data_block() == "example"
assert test_publisher._consumes == {}
test_publisher.publish()
test_publisher.publish(data_block="asdf")
test_publisher.shutdown()
| 31.909091
| 86
| 0.706553
|
from decisionengine.framework.modules.Publisher import Publisher
def test_publisher_structure():
params = {"1": 1, "2": 2, "channel_name": "test"}
test_publisher = Publisher(params)
assert test_publisher.get_parameters() == {"1": 1, "2": 2, "channel_name": "test"}
test_publisher.set_data_block("example")
assert test_publisher.get_data_block() == "example"
assert test_publisher._consumes == {}
test_publisher.publish()
test_publisher.publish(data_block="asdf")
test_publisher.shutdown()
| true
| true
|
7905813c1a802be246df68221752b8b3b9928ce2
| 733
|
py
|
Python
|
intel_query.py
|
sudo-rushil/DGA_Intel
|
6fcdba787dda999661cc2ee4f34da4feacd6e012
|
[
"MIT"
] | 3
|
2019-11-27T08:06:12.000Z
|
2020-12-10T06:54:41.000Z
|
intel_query.py
|
sudo-rushil/DGA_Intel
|
6fcdba787dda999661cc2ee4f34da4feacd6e012
|
[
"MIT"
] | null | null | null |
intel_query.py
|
sudo-rushil/DGA_Intel
|
6fcdba787dda999661cc2ee4f34da4feacd6e012
|
[
"MIT"
] | 3
|
2020-07-23T12:47:08.000Z
|
2021-12-26T23:58:26.000Z
|
import whois
def get_whois(domain):
try:
query = whois.query(domain)
assert isinstance(query, whois._3_adjust.Domain)
return query.__dict__
except:
pass
return None
def get_scans(domain):
url = "http://" + domain
urls = [url]
scans = vt.get_url_reports([url])[url]['scans']
positive, negative = [], []
for key, val in scans.items():
if val["detected"]:
negative.append(key)
else:
positive.append(key)
return positive, negative, len(positive), len(negative)
if __name__ == '__main__':
# print('test domain: microsoft.com')
# print(get_whois('microsoft.com'))
# print(get_scans('pxxfmjhosgqqs.com'))
pass
| 22.90625
| 59
| 0.601637
|
import whois
def get_whois(domain):
try:
query = whois.query(domain)
assert isinstance(query, whois._3_adjust.Domain)
return query.__dict__
except:
pass
return None
def get_scans(domain):
url = "http://" + domain
urls = [url]
scans = vt.get_url_reports([url])[url]['scans']
positive, negative = [], []
for key, val in scans.items():
if val["detected"]:
negative.append(key)
else:
positive.append(key)
return positive, negative, len(positive), len(negative)
if __name__ == '__main__':
pass
| true
| true
|
7905816a75a88e6c20a927fb765a527b85b73e51
| 5,050
|
py
|
Python
|
Transfer/YOLOv4-pytorch/eval_voc.py
|
chakkritte/EEEA-Net
|
260c2a5c673a806315fc5b529b9c9112c48ca8ae
|
[
"Apache-2.0"
] | 3
|
2021-08-30T01:36:52.000Z
|
2021-11-05T07:36:28.000Z
|
Transfer/YOLOv4-pytorch/eval_voc.py
|
chakkritte/EEEA-Net
|
260c2a5c673a806315fc5b529b9c9112c48ca8ae
|
[
"Apache-2.0"
] | 1
|
2021-11-29T12:00:56.000Z
|
2021-11-30T04:07:28.000Z
|
Transfer/YOLOv4-pytorch/eval_voc.py
|
chakkritte/EEEA-Net
|
260c2a5c673a806315fc5b529b9c9112c48ca8ae
|
[
"Apache-2.0"
] | 2
|
2021-08-17T10:06:59.000Z
|
2021-08-30T01:36:57.000Z
|
import utils.gpu as gpu
from model.build_model import Build_Model
from utils.tools import *
from eval.evaluator import Evaluator
import argparse
import time
import logging
import config.yolov4_config as cfg
from utils.visualize import *
from utils.torch_utils import *
from utils.log import Logger
import pooraka as prk
class Evaluation(object):
def __init__(self,
gpu_id=0,
weight_path=None,
visiual=None,
eval=False,
mode_path=None
):
self.__num_class = cfg.VOC_DATA["NUM"]
self.__conf_threshold = cfg.VAL["CONF_THRESH"]
self.__nms_threshold = cfg.VAL["NMS_THRESH"]
self.__device = gpu.select_device(gpu_id)
self.__multi_scale_val = cfg.VAL["MULTI_SCALE_VAL"]
self.__flip_val = cfg.VAL["FLIP_VAL"]
self.__visiual = visiual
self.__eval = eval
self.__classes = cfg.VOC_DATA["CLASSES"]
if cfg.MODEL_TYPE["TYPE"] == 'NSGA-YOLOv4':
self.__model = Build_Model(weight_path=mode_path).to(self.__device)
else:
self.__model = Build_Model(weight_path=weight_path).to(self.__device)
self.__load_model_weights(weight_path)
self.__evalter = Evaluator(self.__model, showatt=False)
def __load_model_weights(self, weight_path):
print("loading weight file from : {}".format(weight_path))
weight = os.path.join(weight_path)
chkpt = torch.load(weight, map_location=self.__device)
self.__model.load_state_dict(chkpt)
print("loading weight file is done")
flops, params = prk.get_flops_params(self.__model.cpu(), (1, 3, 416, 416))
print(flops, params )
self.__model = self.__model.cuda()
del chkpt
def val(self):
global logger
if self.__eval:
logger.info("***********Start Evaluation****************")
start = time.time()
mAP = 0
with torch.no_grad():
APs, inference_time = Evaluator(self.__model, showatt=False).APs_voc(self.__multi_scale_val, self.__flip_val)
for i in APs:
logger.info("{} --> mAP : {}".format(i, APs[i]))
mAP += APs[i]
mAP = mAP / self.__num_class
logger.info('mAP:{}'.format(mAP))
logger.info("inference time: {:.2f} ms".format(inference_time))
end = time.time()
logger.info(" ===val cost time:{:.4f}s".format(end - start))
def detection(self):
global logger
if self.__visiual:
imgs = os.listdir(self.__visiual)
logger.info("***********Start Detection****************")
for v in imgs:
path = os.path.join(self.__visiual, v)
logger.info("val images : {}".format(path))
img = cv2.imread(path)
assert img is not None
bboxes_prd = self.__evalter.get_bbox(img,v)
if bboxes_prd.shape[0] != 0:
boxes = bboxes_prd[..., :4]
class_inds = bboxes_prd[..., 5].astype(np.int32)
scores = bboxes_prd[..., 4]
visualize_boxes(image=img, boxes=boxes, labels=class_inds, probs=scores, class_labels=self.__classes)
path = os.path.join(cfg.PROJECT_PATH, "detection_result/{}".format(v))
cv2.imwrite(path, img)
logger.info("saved images : {}".format(path))
if __name__ == "__main__":
global logger
parser = argparse.ArgumentParser()
parser.add_argument('--weight_path', type=str, default='weight/best.pt', help='weight file path')
parser.add_argument('--model_path', type=str, default='', help='weight file path')
parser.add_argument('--log_val_path', type=str, default='log_val',
help='weight file path')
parser.add_argument('--gpu_id', type=int, default=-1, help='whither use GPU(eg:0,1,2,3,4,5,6,7,8) or CPU(-1)')
parser.add_argument('--visiual', type=str, default='VOCtest-2007/VOC2007/JPEGImages', help='val data path or None')
parser.add_argument('--eval', action='store_true', default=True, help='eval the mAP or not')
parser.add_argument('--mode', type=str, default='val',
help='val or det')
opt = parser.parse_args()
logger = Logger(log_file_name=opt.log_val_path + '/log_voc_val.txt', log_level=logging.DEBUG, logger_name='YOLOv4').get_log()
if opt.mode == 'val':
Evaluation(gpu_id=opt.gpu_id,
weight_path=opt.weight_path,
eval=opt.eval,
visiual=opt.visiual,
mode_path = opt.model_path).val()
else:
Evaluation(gpu_id=opt.gpu_id,
weight_path=opt.weight_path,
eval=opt.eval,
visiual=opt.visiual, mode_path = opt.model_path).detection()
| 39.76378
| 129
| 0.577426
|
import utils.gpu as gpu
from model.build_model import Build_Model
from utils.tools import *
from eval.evaluator import Evaluator
import argparse
import time
import logging
import config.yolov4_config as cfg
from utils.visualize import *
from utils.torch_utils import *
from utils.log import Logger
import pooraka as prk
class Evaluation(object):
def __init__(self,
gpu_id=0,
weight_path=None,
visiual=None,
eval=False,
mode_path=None
):
self.__num_class = cfg.VOC_DATA["NUM"]
self.__conf_threshold = cfg.VAL["CONF_THRESH"]
self.__nms_threshold = cfg.VAL["NMS_THRESH"]
self.__device = gpu.select_device(gpu_id)
self.__multi_scale_val = cfg.VAL["MULTI_SCALE_VAL"]
self.__flip_val = cfg.VAL["FLIP_VAL"]
self.__visiual = visiual
self.__eval = eval
self.__classes = cfg.VOC_DATA["CLASSES"]
if cfg.MODEL_TYPE["TYPE"] == 'NSGA-YOLOv4':
self.__model = Build_Model(weight_path=mode_path).to(self.__device)
else:
self.__model = Build_Model(weight_path=weight_path).to(self.__device)
self.__load_model_weights(weight_path)
self.__evalter = Evaluator(self.__model, showatt=False)
def __load_model_weights(self, weight_path):
print("loading weight file from : {}".format(weight_path))
weight = os.path.join(weight_path)
chkpt = torch.load(weight, map_location=self.__device)
self.__model.load_state_dict(chkpt)
print("loading weight file is done")
flops, params = prk.get_flops_params(self.__model.cpu(), (1, 3, 416, 416))
print(flops, params )
self.__model = self.__model.cuda()
del chkpt
def val(self):
global logger
if self.__eval:
logger.info("***********Start Evaluation****************")
start = time.time()
mAP = 0
with torch.no_grad():
APs, inference_time = Evaluator(self.__model, showatt=False).APs_voc(self.__multi_scale_val, self.__flip_val)
for i in APs:
logger.info("{} --> mAP : {}".format(i, APs[i]))
mAP += APs[i]
mAP = mAP / self.__num_class
logger.info('mAP:{}'.format(mAP))
logger.info("inference time: {:.2f} ms".format(inference_time))
end = time.time()
logger.info(" ===val cost time:{:.4f}s".format(end - start))
def detection(self):
global logger
if self.__visiual:
imgs = os.listdir(self.__visiual)
logger.info("***********Start Detection****************")
for v in imgs:
path = os.path.join(self.__visiual, v)
logger.info("val images : {}".format(path))
img = cv2.imread(path)
assert img is not None
bboxes_prd = self.__evalter.get_bbox(img,v)
if bboxes_prd.shape[0] != 0:
boxes = bboxes_prd[..., :4]
class_inds = bboxes_prd[..., 5].astype(np.int32)
scores = bboxes_prd[..., 4]
visualize_boxes(image=img, boxes=boxes, labels=class_inds, probs=scores, class_labels=self.__classes)
path = os.path.join(cfg.PROJECT_PATH, "detection_result/{}".format(v))
cv2.imwrite(path, img)
logger.info("saved images : {}".format(path))
if __name__ == "__main__":
global logger
parser = argparse.ArgumentParser()
parser.add_argument('--weight_path', type=str, default='weight/best.pt', help='weight file path')
parser.add_argument('--model_path', type=str, default='', help='weight file path')
parser.add_argument('--log_val_path', type=str, default='log_val',
help='weight file path')
parser.add_argument('--gpu_id', type=int, default=-1, help='whither use GPU(eg:0,1,2,3,4,5,6,7,8) or CPU(-1)')
parser.add_argument('--visiual', type=str, default='VOCtest-2007/VOC2007/JPEGImages', help='val data path or None')
parser.add_argument('--eval', action='store_true', default=True, help='eval the mAP or not')
parser.add_argument('--mode', type=str, default='val',
help='val or det')
opt = parser.parse_args()
logger = Logger(log_file_name=opt.log_val_path + '/log_voc_val.txt', log_level=logging.DEBUG, logger_name='YOLOv4').get_log()
if opt.mode == 'val':
Evaluation(gpu_id=opt.gpu_id,
weight_path=opt.weight_path,
eval=opt.eval,
visiual=opt.visiual,
mode_path = opt.model_path).val()
else:
Evaluation(gpu_id=opt.gpu_id,
weight_path=opt.weight_path,
eval=opt.eval,
visiual=opt.visiual, mode_path = opt.model_path).detection()
| true
| true
|
7905818eece2f476bcb4cf2567ee243c0368a91d
| 6,114
|
py
|
Python
|
PythonAPI/carissma_project/PID_apply_static_sp.py
|
AbdulHoffmann/carla_carissma
|
8d382769ffa02a6c61a22c57160285505f5ff0a4
|
[
"MIT"
] | null | null | null |
PythonAPI/carissma_project/PID_apply_static_sp.py
|
AbdulHoffmann/carla_carissma
|
8d382769ffa02a6c61a22c57160285505f5ff0a4
|
[
"MIT"
] | null | null | null |
PythonAPI/carissma_project/PID_apply_static_sp.py
|
AbdulHoffmann/carla_carissma
|
8d382769ffa02a6c61a22c57160285505f5ff0a4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# file trying to apply and test the pid controller on carla.
import glob
import os
import sys
import time
import matplotlib.pyplot as plt
from PID_controller import PID
import numpy as np
import speed_profile_reader as spr
try:
sys.path.append(glob.glob('../**/*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
import carla
import random
import time
class TestData:
def __init__(self, total_duration, time_increment):
self._iter_num = 0
self.time = np.empty([int(total_duration / time_increment) + 1, 1])
self.setpoint = np.empty([int(total_duration / time_increment) + 1, 1])
self.actual_velocity = np.empty([int(total_duration / time_increment) + 1, 1])
self.error = np.empty([int(total_duration / time_increment) + 1, 1])
def append_data(self, t, sp, vel, error):
self.time[self._iter_num] = t
self.setpoint[self._iter_num] = sp
self.actual_velocity[self._iter_num] = vel
self.error[self._iter_num] = error
self._iter_num+=1
def plot(self):
plt.figure()
plt.plot(self.time, self.setpoint)
plt.plot(self.time, self.actual_velocity)
plt.xlabel('Time (s)')
plt.ylabel('Velocity (m/s)')
plt.title("PID Result")
plt.figure()
plt.plot(self.time, self.error, 'r--', label='error', alpha=0.75, linewidth=0.5)
plt.plot(self.time, np.zeros(len(self.time)), 'k--', linewidth=0.5)
plt.title("Controller Error")
plt.show()
class DataInit:
K = {
"Kp": 0.055734,
"Ki": 0.0114169,
"Kd": .00006
# For 10 m/s
# "Kp": 0.055734,
# "Ki": 0.0130169,
# "Kd": .000006
# "Kp": 1,
# "Ki": 0.0112,
# "Kd": 0.000006
}
total_duration = 20
sampling_period = 0.025
def main():
actor_list = []
verboseIsEnabled = None
try:
"""
Section for starting the client and connecting to the server
"""
client = carla.Client('localhost', 2000)
client.set_timeout(2.0)
for arg in sys.argv:
if (arg == '--verbose'):
verboseIsEnabled = True
if verboseIsEnabled:
print('client version: %s' % client.get_client_version())
print('server version: %s' % client.get_server_version())
print('client to server connection status: {}'.format(client.get_server_version()))
print('Retrieving the world data from server...')
world = client.get_world()
if verboseIsEnabled:
print('{} \n'.format(world))
"""
Section for retrieving the blueprints and spawn the actors
"""
blueprint_library = world.get_blueprint_library()
if verboseIsEnabled:
print('\nRetrieving CARLA blueprint library...')
print('\nobject: %s\n\nblueprint methods: %s\n\nblueprint list:' % (type(blueprint_library), dir(blueprint_library)) )
for blueprint in blueprint_library:
print(blueprint)
audi_blueprint = blueprint_library.find('vehicle.audi.tt')
print('\n%s\n' % audi_blueprint)
color = '191,191,191'
audi_blueprint.set_attribute('color', color)
transform = carla.Transform(
carla.Location(
x=10.5, y=-1.8,
z=38.5),carla.Rotation(yaw=0.0)
)
vehicleEgo = world.spawn_actor(audi_blueprint, transform)
actor_list.append(vehicleEgo)
print('created %s' % vehicleEgo.type_id)
color = random.choice(audi_blueprint.get_attribute('color').recommended_values)
audi_blueprint.set_attribute('color', color)
"""
Section for initializing the PID testing
"""
user_input_sp = None
while (not isinstance(user_input_sp, int)) and (not isinstance(user_input_sp, float)):
user_input_sp = input('Enter the desired Setpoint:\n')
data = TestData(DataInit.total_duration, DataInit.sampling_period)
start = time.time()
print('\nStarting test:\n\n' + 'Time(s) current_vel(m/s) setpoint_vel(m/s) throttle(%) pid_demand')
time.sleep(2.5)
print('.................................................................\n')
time.sleep(1)
# raise SystemExit
p = PID(
DataInit.K['Kp'],
DataInit.K['Ki'],
DataInit.K['Kd']
)
p.setPoint(user_input_sp)
p.Integrator_min = -5
p.Integrator_max = 40
pid = 0
for _ in range(int(DataInit.total_duration / DataInit.sampling_period) + 1):
measurement_value = vehicleEgo.get_velocity().x
vehicleEgo.apply_control(carla.VehicleControl(pid)) if 1 > pid > 0 else vehicleEgo.apply_control(carla.VehicleControl(1))
if 0 > pid: vehicleEgo.apply_control(carla.VehicleControl(brake=abs(pid)))
pid = p.update(measurement_value)
data.append_data(round(time.time() - start, 2), p.getSetPoint(), round(vehicleEgo.get_velocity().x, 5), p.getError())
time.sleep(DataInit.sampling_period)
print('%0.3f\t%0.2f\t\t\t%0.2f\t\t%0.2f\t%0.2f' % (time.time() - start,
vehicleEgo.get_velocity().x,
p.set_point,
vehicleEgo.get_control().throttle,
pid))
data.plot()
print('\nError Mean (Steady State):\n' +
str(round(np.absolute(np.mean(data.error[data.error.shape[0]/2:data.error.shape[0]])), 5)*100) +
'%\n')
finally:
print('destroying actors')
for actor in actor_list:
actor.destroy()
print('done.')
if __name__ == '__main__':
main()
| 33.593407
| 133
| 0.564279
|
import glob
import os
import sys
import time
import matplotlib.pyplot as plt
from PID_controller import PID
import numpy as np
import speed_profile_reader as spr
try:
sys.path.append(glob.glob('../**/*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
import carla
import random
import time
class TestData:
def __init__(self, total_duration, time_increment):
self._iter_num = 0
self.time = np.empty([int(total_duration / time_increment) + 1, 1])
self.setpoint = np.empty([int(total_duration / time_increment) + 1, 1])
self.actual_velocity = np.empty([int(total_duration / time_increment) + 1, 1])
self.error = np.empty([int(total_duration / time_increment) + 1, 1])
def append_data(self, t, sp, vel, error):
self.time[self._iter_num] = t
self.setpoint[self._iter_num] = sp
self.actual_velocity[self._iter_num] = vel
self.error[self._iter_num] = error
self._iter_num+=1
def plot(self):
plt.figure()
plt.plot(self.time, self.setpoint)
plt.plot(self.time, self.actual_velocity)
plt.xlabel('Time (s)')
plt.ylabel('Velocity (m/s)')
plt.title("PID Result")
plt.figure()
plt.plot(self.time, self.error, 'r--', label='error', alpha=0.75, linewidth=0.5)
plt.plot(self.time, np.zeros(len(self.time)), 'k--', linewidth=0.5)
plt.title("Controller Error")
plt.show()
class DataInit:
K = {
"Kp": 0.055734,
"Ki": 0.0114169,
"Kd": .00006
}
total_duration = 20
sampling_period = 0.025
def main():
actor_list = []
verboseIsEnabled = None
try:
client = carla.Client('localhost', 2000)
client.set_timeout(2.0)
for arg in sys.argv:
if (arg == '--verbose'):
verboseIsEnabled = True
if verboseIsEnabled:
print('client version: %s' % client.get_client_version())
print('server version: %s' % client.get_server_version())
print('client to server connection status: {}'.format(client.get_server_version()))
print('Retrieving the world data from server...')
world = client.get_world()
if verboseIsEnabled:
print('{} \n'.format(world))
blueprint_library = world.get_blueprint_library()
if verboseIsEnabled:
print('\nRetrieving CARLA blueprint library...')
print('\nobject: %s\n\nblueprint methods: %s\n\nblueprint list:' % (type(blueprint_library), dir(blueprint_library)) )
for blueprint in blueprint_library:
print(blueprint)
audi_blueprint = blueprint_library.find('vehicle.audi.tt')
print('\n%s\n' % audi_blueprint)
color = '191,191,191'
audi_blueprint.set_attribute('color', color)
transform = carla.Transform(
carla.Location(
x=10.5, y=-1.8,
z=38.5),carla.Rotation(yaw=0.0)
)
vehicleEgo = world.spawn_actor(audi_blueprint, transform)
actor_list.append(vehicleEgo)
print('created %s' % vehicleEgo.type_id)
color = random.choice(audi_blueprint.get_attribute('color').recommended_values)
audi_blueprint.set_attribute('color', color)
user_input_sp = None
while (not isinstance(user_input_sp, int)) and (not isinstance(user_input_sp, float)):
user_input_sp = input('Enter the desired Setpoint:\n')
data = TestData(DataInit.total_duration, DataInit.sampling_period)
start = time.time()
print('\nStarting test:\n\n' + 'Time(s) current_vel(m/s) setpoint_vel(m/s) throttle(%) pid_demand')
time.sleep(2.5)
print('.................................................................\n')
time.sleep(1)
p = PID(
DataInit.K['Kp'],
DataInit.K['Ki'],
DataInit.K['Kd']
)
p.setPoint(user_input_sp)
p.Integrator_min = -5
p.Integrator_max = 40
pid = 0
for _ in range(int(DataInit.total_duration / DataInit.sampling_period) + 1):
measurement_value = vehicleEgo.get_velocity().x
vehicleEgo.apply_control(carla.VehicleControl(pid)) if 1 > pid > 0 else vehicleEgo.apply_control(carla.VehicleControl(1))
if 0 > pid: vehicleEgo.apply_control(carla.VehicleControl(brake=abs(pid)))
pid = p.update(measurement_value)
data.append_data(round(time.time() - start, 2), p.getSetPoint(), round(vehicleEgo.get_velocity().x, 5), p.getError())
time.sleep(DataInit.sampling_period)
print('%0.3f\t%0.2f\t\t\t%0.2f\t\t%0.2f\t%0.2f' % (time.time() - start,
vehicleEgo.get_velocity().x,
p.set_point,
vehicleEgo.get_control().throttle,
pid))
data.plot()
print('\nError Mean (Steady State):\n' +
str(round(np.absolute(np.mean(data.error[data.error.shape[0]/2:data.error.shape[0]])), 5)*100) +
'%\n')
finally:
print('destroying actors')
for actor in actor_list:
actor.destroy()
print('done.')
if __name__ == '__main__':
main()
| true
| true
|
790581dc3d8123de9299cda66837fb0fbb9494b3
| 5,477
|
py
|
Python
|
src/cogs/commands/music.py
|
Jonak-Adipta-Kalita/JAK-Discord-Bot
|
9e48654952b603aba581471773a24132f2f228fb
|
[
"MIT"
] | 4
|
2021-08-31T14:21:25.000Z
|
2022-03-01T10:01:34.000Z
|
src/cogs/commands/music.py
|
Jonak-Adipta-Kalita/JAK-Discord-Bot
|
9e48654952b603aba581471773a24132f2f228fb
|
[
"MIT"
] | 134
|
2021-11-03T05:14:07.000Z
|
2022-03-31T08:06:55.000Z
|
src/cogs/commands/music.py
|
Jonak-Adipta-Kalita/JAK-Discord-Bot
|
9e48654952b603aba581471773a24132f2f228fb
|
[
"MIT"
] | null | null | null |
import disnake, youtube_dl
import src.core.embeds as embeds
import src.core.functions as funcs
from disnake.ext import commands
prefix = funcs.get_prefix()
class Music(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.group(invoke_without_command=True, description="Connect/Leave VC")
@commands.has_guild_permissions(connect=True)
async def vc(self, ctx: commands.Context, command: str):
await ctx.reply("Command not Found!!")
@commands.group(
invoke_without_command=True, description="Play, Pause, Resume, Stop Music"
)
@commands.has_guild_permissions(connect=True)
async def music(self, ctx: commands.Context, command: str):
await ctx.reply("Command not Found!!")
@vc.command(
description="Joins the VC you are currently in", aliases=["connect", "c"]
)
@commands.has_guild_permissions(connect=True)
async def join(self, ctx: commands.Context):
if ctx.author.voice is None:
await ctx.reply("You are not Connected to a Voice Channel!!")
return
if ctx.voice_client is None:
voice_channel = ctx.author.voice.channel
try:
await voice_channel.connect()
await ctx.reply("Connected!!")
except disnake.HTTPException:
await ctx.reply("Can't Connect to this Voice Channel!!")
else:
await ctx.reply("I am already in a Voice Channel!!")
@vc.command(description="Leaves VC", aliases=["disconnect", "dc"])
@commands.has_guild_permissions(connect=True)
async def leave(self, ctx: commands.Context):
if ctx.voice_client:
await ctx.reply("Disconnected!!")
await ctx.voice_client.disconnect()
else:
await ctx.reply("I am not Connected to any Voice Channel!!")
@music.command(description="Plays the Music")
@commands.has_guild_permissions(connect=True)
async def play(self, ctx: commands.Context, *, music_name: str):
vc = ctx.voice_client
if vc:
FFMPEG_OPTIONS = {
"before_options": "-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5",
"options": "-vn",
}
YDL_OPTIONS = {"formats": "bestaudio"}
with youtube_dl.YoutubeDL(YDL_OPTIONS) as ydl:
info = {}
url = ""
if music_name.startswith("https://"):
info = ydl.extract_info(music_name, download=False)
url = info["formats"][0]["url"]
else:
info_ = ydl.extract_info(f"ytsearch:{music_name}", download=False)
url_ = info_["entries"][0]["webpage_url"]
info = ydl.extract_info(url_, download=False)
url = info["formats"][0]["url"]
if info:
await ctx.reply(embed=embeds.music_playing_embed(info))
source = disnake.FFmpegPCMAudio(url, **FFMPEG_OPTIONS)
vc.play(source)
else:
await ctx.reply("I am not Connected to any Voice Channel!!")
@music.command(description="Pauses the Music")
@commands.has_guild_permissions(connect=True)
async def pause(self, ctx: commands.Context):
vc = ctx.voice_client
if vc:
if ctx.voice_client.is_playing():
await ctx.reply("Song Paused!!")
await ctx.voice_client.pause()
else:
await ctx.reply("No Song is Playing!!")
else:
await ctx.reply("I am not Connected to any Voice Channel!!")
@music.command(description="Resumes the Music")
@commands.has_guild_permissions(connect=True)
async def resume(self, ctx: commands.Context):
vc = ctx.voice_client
if vc:
if ctx.voice_client.is_paused():
await ctx.reply("Song Resumed!!")
await ctx.voice_client.resume()
else:
await ctx.reply("No Song is Paused!!")
else:
await ctx.reply(" I am not Connected to any Voice Channel!!")
@music.command(description="Adjusts the Volume as per given amount")
@commands.has_guild_permissions(connect=True)
async def volume(self, ctx: commands.Context, volume: int):
vc = ctx.voice_client
if vc:
if not 0 > volume > 100:
volume = volume / 100
vc.source = disnake.PCMVolumeTransformer(original=vc.source, volume=1.0)
vc.source.volume = volume
await ctx.reply(f"Changed volume to {volume * 100}%")
else:
await ctx.reply("Volume must be between 0 to 100 (Inclusive)")
else:
await ctx.reply("I am not Connected to any Voice Channel!!")
@music.command(description="Stops the Music")
@commands.has_guild_permissions(connect=True)
async def stop(self, ctx: commands.Context):
vc = ctx.voice_client
if vc:
if ctx.voice_client.is_playing() or ctx.voice_client.is_paused():
await ctx.reply("Song Stopped!!")
await ctx.voice_client.stop()
else:
await ctx.reply("No Song is Playing")
else:
await ctx.reply("I am not Connected to any Voice Channel!!")
def setup(bot: commands.Bot):
bot.add_cog(Music(bot))
| 37.258503
| 94
| 0.59266
|
import disnake, youtube_dl
import src.core.embeds as embeds
import src.core.functions as funcs
from disnake.ext import commands
prefix = funcs.get_prefix()
class Music(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.group(invoke_without_command=True, description="Connect/Leave VC")
@commands.has_guild_permissions(connect=True)
async def vc(self, ctx: commands.Context, command: str):
await ctx.reply("Command not Found!!")
@commands.group(
invoke_without_command=True, description="Play, Pause, Resume, Stop Music"
)
@commands.has_guild_permissions(connect=True)
async def music(self, ctx: commands.Context, command: str):
await ctx.reply("Command not Found!!")
@vc.command(
description="Joins the VC you are currently in", aliases=["connect", "c"]
)
@commands.has_guild_permissions(connect=True)
async def join(self, ctx: commands.Context):
if ctx.author.voice is None:
await ctx.reply("You are not Connected to a Voice Channel!!")
return
if ctx.voice_client is None:
voice_channel = ctx.author.voice.channel
try:
await voice_channel.connect()
await ctx.reply("Connected!!")
except disnake.HTTPException:
await ctx.reply("Can't Connect to this Voice Channel!!")
else:
await ctx.reply("I am already in a Voice Channel!!")
@vc.command(description="Leaves VC", aliases=["disconnect", "dc"])
@commands.has_guild_permissions(connect=True)
async def leave(self, ctx: commands.Context):
if ctx.voice_client:
await ctx.reply("Disconnected!!")
await ctx.voice_client.disconnect()
else:
await ctx.reply("I am not Connected to any Voice Channel!!")
@music.command(description="Plays the Music")
@commands.has_guild_permissions(connect=True)
async def play(self, ctx: commands.Context, *, music_name: str):
vc = ctx.voice_client
if vc:
FFMPEG_OPTIONS = {
"before_options": "-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5",
"options": "-vn",
}
YDL_OPTIONS = {"formats": "bestaudio"}
with youtube_dl.YoutubeDL(YDL_OPTIONS) as ydl:
info = {}
url = ""
if music_name.startswith("https://"):
info = ydl.extract_info(music_name, download=False)
url = info["formats"][0]["url"]
else:
info_ = ydl.extract_info(f"ytsearch:{music_name}", download=False)
url_ = info_["entries"][0]["webpage_url"]
info = ydl.extract_info(url_, download=False)
url = info["formats"][0]["url"]
if info:
await ctx.reply(embed=embeds.music_playing_embed(info))
source = disnake.FFmpegPCMAudio(url, **FFMPEG_OPTIONS)
vc.play(source)
else:
await ctx.reply("I am not Connected to any Voice Channel!!")
@music.command(description="Pauses the Music")
@commands.has_guild_permissions(connect=True)
async def pause(self, ctx: commands.Context):
vc = ctx.voice_client
if vc:
if ctx.voice_client.is_playing():
await ctx.reply("Song Paused!!")
await ctx.voice_client.pause()
else:
await ctx.reply("No Song is Playing!!")
else:
await ctx.reply("I am not Connected to any Voice Channel!!")
@music.command(description="Resumes the Music")
@commands.has_guild_permissions(connect=True)
async def resume(self, ctx: commands.Context):
vc = ctx.voice_client
if vc:
if ctx.voice_client.is_paused():
await ctx.reply("Song Resumed!!")
await ctx.voice_client.resume()
else:
await ctx.reply("No Song is Paused!!")
else:
await ctx.reply(" I am not Connected to any Voice Channel!!")
@music.command(description="Adjusts the Volume as per given amount")
@commands.has_guild_permissions(connect=True)
async def volume(self, ctx: commands.Context, volume: int):
vc = ctx.voice_client
if vc:
if not 0 > volume > 100:
volume = volume / 100
vc.source = disnake.PCMVolumeTransformer(original=vc.source, volume=1.0)
vc.source.volume = volume
await ctx.reply(f"Changed volume to {volume * 100}%")
else:
await ctx.reply("Volume must be between 0 to 100 (Inclusive)")
else:
await ctx.reply("I am not Connected to any Voice Channel!!")
@music.command(description="Stops the Music")
@commands.has_guild_permissions(connect=True)
async def stop(self, ctx: commands.Context):
vc = ctx.voice_client
if vc:
if ctx.voice_client.is_playing() or ctx.voice_client.is_paused():
await ctx.reply("Song Stopped!!")
await ctx.voice_client.stop()
else:
await ctx.reply("No Song is Playing")
else:
await ctx.reply("I am not Connected to any Voice Channel!!")
def setup(bot: commands.Bot):
bot.add_cog(Music(bot))
| true
| true
|
79058316b63fb7cef0f0b151c39807830494756e
| 164
|
py
|
Python
|
03 Variable/cal.py
|
codewithsandy/Python-Basic-Exp
|
4c70ada4a042923a94301453c7bd76e704cd2989
|
[
"MIT"
] | 3
|
2021-05-08T13:11:41.000Z
|
2021-05-14T02:43:20.000Z
|
03 Variable/cal.py
|
codewithsandy/Python-Basic-Exp
|
4c70ada4a042923a94301453c7bd76e704cd2989
|
[
"MIT"
] | null | null | null |
03 Variable/cal.py
|
codewithsandy/Python-Basic-Exp
|
4c70ada4a042923a94301453c7bd76e704cd2989
|
[
"MIT"
] | null | null | null |
print("Enter 1st number")
n1 = input()
print("Enter 2nd number")
n2 = input()
print("Sum of Both = ", int(n1) + int(n2))
print("Sum of Both = ", int(n1) + int(n2))
| 23.428571
| 42
| 0.609756
|
print("Enter 1st number")
n1 = input()
print("Enter 2nd number")
n2 = input()
print("Sum of Both = ", int(n1) + int(n2))
print("Sum of Both = ", int(n1) + int(n2))
| true
| true
|
79058382308ced0197edcfd3915af02f502fc1d5
| 1,795
|
py
|
Python
|
Mean_Std_Calculation.py
|
SkyRd1/Statistical_Functions
|
3c7a4bba91e43110567f0d2fd1089699d9038206
|
[
"MIT"
] | null | null | null |
Mean_Std_Calculation.py
|
SkyRd1/Statistical_Functions
|
3c7a4bba91e43110567f0d2fd1089699d9038206
|
[
"MIT"
] | null | null | null |
Mean_Std_Calculation.py
|
SkyRd1/Statistical_Functions
|
3c7a4bba91e43110567f0d2fd1089699d9038206
|
[
"MIT"
] | null | null | null |
#Author: Sepehr Roudini.
#Date: 02/05/2018.
#University of Iowa.
#Department of Chemical Engineering.
#Purpose: Calculating mean and Std
#--------------------------------------------------------------------------------------------#
#Defining function and importing necessary libraries.
#--------------------------------------------------------------------------------------------#
##############################################################################################
#Libraries used in this function are: numpy and math.
##############################################################################################
#Data: A 1d array of data.
##############################################################################################
#This functions returnes mean and standard
#deviation of data.
##############################################################################################
def Calculate_Mean_Std(Data):
#numpy is for data manipulationt
import numpy as np
#--------------------------------------------------------------------------------------------#
#--------------------------------------------------------------------------------------------#
#--------------------------------------------------------------------------------------------#
#Preparing data and quantile calculation
#--------------------------------------------------------------------------------------------#
#Calculating mean
mean = np.sum(Data)/len(Data)
#Calculating standard deviation
std = np.sqrt(np.sum(((Data-mean)**2))/(len(Data)-1))
return mean, std
#--------------------------------------------------------------------------------------------#
#--------------------------------------------------------------------------------------------#
| 48.513514
| 95
| 0.262953
| true
| true
|
|
790583b2c3acc0d327043db1ab6d3e03738f5d8d
| 1,466
|
py
|
Python
|
all_nba_team/api/hardcoded_queries.py
|
Voldy87/all-nba-team
|
d7d8eae20f79acfb2b09b419110a79aca1294784
|
[
"MIT"
] | null | null | null |
all_nba_team/api/hardcoded_queries.py
|
Voldy87/all-nba-team
|
d7d8eae20f79acfb2b09b419110a79aca1294784
|
[
"MIT"
] | 2
|
2020-02-11T22:30:42.000Z
|
2020-06-05T18:12:36.000Z
|
all_nba_team/api/hardcoded_queries.py
|
Voldy87/all-nba-team
|
d7d8eae20f79acfb2b09b419110a79aca1294784
|
[
"MIT"
] | null | null | null |
FRANCHISES = """
select t1.aliases, overall, firsts, seconds, third, y1,y2, unique_a, unique_1, unique_12
from
(select Count(A."PlayerID") as overall,T."Aliases" as aliases, MAX(A."year") as y1, MIN(A."year") as y2, Count (distinct A."PlayerID") as unique_a
from public."all-nba-teams_list" A, public.teams T
where A."TeamID"=any(T."Aliases")
group by T."Aliases"
order by T."Aliases"
) as t1
join
(
select Count(A."PlayerID") as firsts,T."Aliases" as aliases, Count (distinct A."PlayerID") as unique_1
from public."all-nba-teams_list" A, public.teams T
where A."TeamID"=any(T."Aliases") and A."type"=1
group by T."Aliases"
order by T."Aliases"
) as t2 on t1.aliases=t2.aliases
join
(
select Count(A."PlayerID") as seconds,T."Aliases" as aliases
from public."all-nba-teams_list" A, public.teams T
where A."TeamID"=any(T."Aliases") and A."type"=2
group by T."Aliases"
order by T."Aliases"
) as t3 on t1.aliases=t3.aliases
join
(
select Count(A."PlayerID") as third,T."Aliases" as aliases
from public."all-nba-teams_list" A, public.teams T
where A."TeamID"=any(T."Aliases") and A."type"=3
group by T."Aliases"
order by T."Aliases"
) as t4 on t1.aliases=t4.aliases
join
(
select Count (distinct A."PlayerID") as unique_12, T."Aliases" as aliases
from public."all-nba-teams_list" A, public.teams T
where A."TeamID"=any(T."Aliases") and A."type"in(1,2)
group by T."Aliases"
order by T."Aliases"
) as t5 on t1.aliases=t5.aliases
"""
| 34.904762
| 147
| 0.697817
|
FRANCHISES = """
select t1.aliases, overall, firsts, seconds, third, y1,y2, unique_a, unique_1, unique_12
from
(select Count(A."PlayerID") as overall,T."Aliases" as aliases, MAX(A."year") as y1, MIN(A."year") as y2, Count (distinct A."PlayerID") as unique_a
from public."all-nba-teams_list" A, public.teams T
where A."TeamID"=any(T."Aliases")
group by T."Aliases"
order by T."Aliases"
) as t1
join
(
select Count(A."PlayerID") as firsts,T."Aliases" as aliases, Count (distinct A."PlayerID") as unique_1
from public."all-nba-teams_list" A, public.teams T
where A."TeamID"=any(T."Aliases") and A."type"=1
group by T."Aliases"
order by T."Aliases"
) as t2 on t1.aliases=t2.aliases
join
(
select Count(A."PlayerID") as seconds,T."Aliases" as aliases
from public."all-nba-teams_list" A, public.teams T
where A."TeamID"=any(T."Aliases") and A."type"=2
group by T."Aliases"
order by T."Aliases"
) as t3 on t1.aliases=t3.aliases
join
(
select Count(A."PlayerID") as third,T."Aliases" as aliases
from public."all-nba-teams_list" A, public.teams T
where A."TeamID"=any(T."Aliases") and A."type"=3
group by T."Aliases"
order by T."Aliases"
) as t4 on t1.aliases=t4.aliases
join
(
select Count (distinct A."PlayerID") as unique_12, T."Aliases" as aliases
from public."all-nba-teams_list" A, public.teams T
where A."TeamID"=any(T."Aliases") and A."type"in(1,2)
group by T."Aliases"
order by T."Aliases"
) as t5 on t1.aliases=t5.aliases
"""
| true
| true
|
790584b8be63177d5ff89bbdf329e29694e4d791
| 1,105
|
py
|
Python
|
unittests/gccxml10184_tester.py
|
iMichka/pygccxml
|
f872d056f477ed2438cd22b422d60dc924469805
|
[
"BSL-1.0"
] | null | null | null |
unittests/gccxml10184_tester.py
|
iMichka/pygccxml
|
f872d056f477ed2438cd22b422d60dc924469805
|
[
"BSL-1.0"
] | null | null | null |
unittests/gccxml10184_tester.py
|
iMichka/pygccxml
|
f872d056f477ed2438cd22b422d60dc924469805
|
[
"BSL-1.0"
] | 1
|
2016-06-17T03:14:31.000Z
|
2016-06-17T03:14:31.000Z
|
# Copyright 2014-2017 Insight Software Consortium.
# Copyright 2004-2009 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0.
# See http://www.boost.org/LICENSE_1_0.txt
import unittest
import parser_test_case
from pygccxml import parser
from pygccxml import declarations
code = \
"""
class A {
public:
virtual ~A() = 0;
unsigned int a : 1;
unsigned int unused : 31;
};
"""
class Test(parser_test_case.parser_test_case_t):
def __init__(self, *args):
parser_test_case.parser_test_case_t.__init__(self, *args)
def test(self):
src_reader = parser.source_reader_t(self.config)
global_ns = declarations.get_global_namespace(
src_reader.read_string(code))
self.assertTrue(global_ns.variable('a').bits == 1)
self.assertTrue(global_ns.variable('unused').bits == 31)
def create_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(Test))
return suite
def run_suite():
unittest.TextTestRunner(verbosity=2).run(create_suite())
if __name__ == "__main__":
run_suite()
| 23.510638
| 65
| 0.703167
|
import unittest
import parser_test_case
from pygccxml import parser
from pygccxml import declarations
code = \
"""
class A {
public:
virtual ~A() = 0;
unsigned int a : 1;
unsigned int unused : 31;
};
"""
class Test(parser_test_case.parser_test_case_t):
def __init__(self, *args):
parser_test_case.parser_test_case_t.__init__(self, *args)
def test(self):
src_reader = parser.source_reader_t(self.config)
global_ns = declarations.get_global_namespace(
src_reader.read_string(code))
self.assertTrue(global_ns.variable('a').bits == 1)
self.assertTrue(global_ns.variable('unused').bits == 31)
def create_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(Test))
return suite
def run_suite():
unittest.TextTestRunner(verbosity=2).run(create_suite())
if __name__ == "__main__":
run_suite()
| true
| true
|
790586ff84fe73391e4d4066603c78ee07efaaea
| 4,054
|
py
|
Python
|
mldc/data/schema.py
|
qkrguswn2401/dstc8-meta-dialog
|
86a5ecb021719d49fcc5a7cd748984e12eb7e1bf
|
[
"MIT"
] | 76
|
2019-06-18T13:30:11.000Z
|
2021-12-25T06:08:05.000Z
|
mldc/data/schema.py
|
qkrguswn2401/dstc8-meta-dialog
|
86a5ecb021719d49fcc5a7cd748984e12eb7e1bf
|
[
"MIT"
] | 6
|
2019-07-22T22:48:46.000Z
|
2019-10-02T14:05:47.000Z
|
mldc/data/schema.py
|
qkrguswn2401/dstc8-meta-dialog
|
86a5ecb021719d49fcc5a7cd748984e12eb7e1bf
|
[
"MIT"
] | 13
|
2019-06-27T06:47:12.000Z
|
2021-09-13T12:48:37.000Z
|
""""
defines a class that maps to the JSON input format and can be used with pydantic.
"""
import json
import os
import pickle
from hashlib import md5
from typing import List, Optional
from pydantic import BaseModel
from mldc.util import NLGEvalOutput
class MetaDlgDataDialog(BaseModel):
id: Optional[str]
domain: str = ""
task_id: str = ""
user_id: str = ""
bot_id: str = ""
turns: List[str]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class MetaDlgDataDialogList(BaseModel):
dialogs: List[MetaDlgDataDialog]
class PartitionSpec(BaseModel):
domains: List[str] = []
tasks: List[str] = []
paths: List[str] = []
def _asdict(self):
# convert to list for json-serializability
return dict(domains=self.domains, tasks=self.tasks, paths=self.paths)
# the next few fields/functions are here to make PartitionSpec behave like
# a pytext ConfigBase object. This way, we can use it directly in a task
# config. It would be easier if we could just inherit from ConfigBase,
# but alas, ConfigBase's metaclass is not a metaclass of BaseModel.
_field_types = __annotations__ # noqa
@property
def _fields(cls):
return cls.__annotations__.keys()
@property
def _field_defaults(cls):
_, defaults = cls.annotations_and_defaults()
return defaults
def is_ok(self, dlg: MetaDlgDataDialog):
if self.tasks and dlg.task_id not in self.tasks:
return False
if self.domains and dlg.domain not in self.domains:
return False
return True
def __bool__(self):
return True if self.domains or self.tasks or self.paths else False
def add(self, other):
self.domains = list(set(self.domains + other.domains))
self.tasks = list(set(self.tasks + other.tasks))
self.paths = list(set(self.paths + other.paths))
@classmethod
def from_paths(cls, paths):
return cls(domains=[], paths=paths, tasks=[])
def iterate_paths(self):
for path in self.paths:
yield path, PartitionSpec(domains=[NLGEvalOutput._domain_name(path)],
paths=[path],
tasks=self.tasks)
def checksum(self, zipfile, featurizer_config, text_embedder_cfg):
checksum = md5(json.dumps(featurizer_config._asdict(), sort_keys=True).encode('utf-8'))
text_embedder_cfg = text_embedder_cfg._asdict()
del text_embedder_cfg['preproc_dir']
del text_embedder_cfg['use_cuda_if_available']
checksum.update(json.dumps(text_embedder_cfg, sort_keys=True).encode('utf-8'))
md5file = zipfile + ".md5"
# if md5file exists and is newer than zipfile, read md5 sum from it
# else calculate it for the zipfile.
if os.path.exists(md5file) and os.path.getmtime(zipfile) <= os.path.getmtime(md5file):
with open(md5file, 'rt') as f:
checksum.update(f.read().split()[0].strip().encode('utf-8'))
else:
with open(zipfile, 'rb') as f:
checksum.update(md5(f.read()).hexdigest().encode('utf-8'))
checksum.update(pickle.dumps(sorted(self.domains)))
checksum.update(pickle.dumps(sorted(self.paths)))
checksum.update(pickle.dumps(sorted(self.tasks)))
return checksum.hexdigest()
class DataSpec(BaseModel):
train: PartitionSpec = PartitionSpec()
validation: PartitionSpec = PartitionSpec()
test: PartitionSpec = PartitionSpec()
def unpack_domains(self):
return [list(p) for p in (self.train.domains, self.validation.domains, self.test.domains)]
def unpack_tasks(self):
return [list(p) for p in (self.train.tasks, self.validation.tasks, self.test.tasks)]
def unpack_paths(self):
return [list(p) for p in (self.train.paths, self.validation.paths, self.test.paths)]
def unpack(self):
return self.train._asdict(), self.validation._asdict(), self.test._asdict()
@classmethod
def load(cls, f):
kwargs = json.load(f)
# This just works with Pydantic
return cls(**kwargs)
def add(self, other):
self.train.add(other.train)
self.validation.add(other.validation)
self.test.add(other.test)
| 31.92126
| 94
| 0.695856
|
import json
import os
import pickle
from hashlib import md5
from typing import List, Optional
from pydantic import BaseModel
from mldc.util import NLGEvalOutput
class MetaDlgDataDialog(BaseModel):
id: Optional[str]
domain: str = ""
task_id: str = ""
user_id: str = ""
bot_id: str = ""
turns: List[str]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class MetaDlgDataDialogList(BaseModel):
dialogs: List[MetaDlgDataDialog]
class PartitionSpec(BaseModel):
domains: List[str] = []
tasks: List[str] = []
paths: List[str] = []
def _asdict(self):
return dict(domains=self.domains, tasks=self.tasks, paths=self.paths)
_field_types = __annotations__ # noqa
@property
def _fields(cls):
return cls.__annotations__.keys()
@property
def _field_defaults(cls):
_, defaults = cls.annotations_and_defaults()
return defaults
def is_ok(self, dlg: MetaDlgDataDialog):
if self.tasks and dlg.task_id not in self.tasks:
return False
if self.domains and dlg.domain not in self.domains:
return False
return True
def __bool__(self):
return True if self.domains or self.tasks or self.paths else False
def add(self, other):
self.domains = list(set(self.domains + other.domains))
self.tasks = list(set(self.tasks + other.tasks))
self.paths = list(set(self.paths + other.paths))
@classmethod
def from_paths(cls, paths):
return cls(domains=[], paths=paths, tasks=[])
def iterate_paths(self):
for path in self.paths:
yield path, PartitionSpec(domains=[NLGEvalOutput._domain_name(path)],
paths=[path],
tasks=self.tasks)
def checksum(self, zipfile, featurizer_config, text_embedder_cfg):
checksum = md5(json.dumps(featurizer_config._asdict(), sort_keys=True).encode('utf-8'))
text_embedder_cfg = text_embedder_cfg._asdict()
del text_embedder_cfg['preproc_dir']
del text_embedder_cfg['use_cuda_if_available']
checksum.update(json.dumps(text_embedder_cfg, sort_keys=True).encode('utf-8'))
md5file = zipfile + ".md5"
# if md5file exists and is newer than zipfile, read md5 sum from it
# else calculate it for the zipfile.
if os.path.exists(md5file) and os.path.getmtime(zipfile) <= os.path.getmtime(md5file):
with open(md5file, 'rt') as f:
checksum.update(f.read().split()[0].strip().encode('utf-8'))
else:
with open(zipfile, 'rb') as f:
checksum.update(md5(f.read()).hexdigest().encode('utf-8'))
checksum.update(pickle.dumps(sorted(self.domains)))
checksum.update(pickle.dumps(sorted(self.paths)))
checksum.update(pickle.dumps(sorted(self.tasks)))
return checksum.hexdigest()
class DataSpec(BaseModel):
train: PartitionSpec = PartitionSpec()
validation: PartitionSpec = PartitionSpec()
test: PartitionSpec = PartitionSpec()
def unpack_domains(self):
return [list(p) for p in (self.train.domains, self.validation.domains, self.test.domains)]
def unpack_tasks(self):
return [list(p) for p in (self.train.tasks, self.validation.tasks, self.test.tasks)]
def unpack_paths(self):
return [list(p) for p in (self.train.paths, self.validation.paths, self.test.paths)]
def unpack(self):
return self.train._asdict(), self.validation._asdict(), self.test._asdict()
@classmethod
def load(cls, f):
kwargs = json.load(f)
# This just works with Pydantic
return cls(**kwargs)
def add(self, other):
self.train.add(other.train)
self.validation.add(other.validation)
self.test.add(other.test)
| true
| true
|
790587c615836f82f6c5850b1fa2b6843584abd0
| 6,548
|
py
|
Python
|
src/trainer.py
|
CvlabAssignment/WRcan
|
e77571472f5a3928b1e9cee5440d52f702e59a41
|
[
"MIT"
] | 10
|
2021-07-27T13:47:10.000Z
|
2022-03-02T16:41:41.000Z
|
src/trainer.py
|
CvlabAssignment/WRcan
|
e77571472f5a3928b1e9cee5440d52f702e59a41
|
[
"MIT"
] | null | null | null |
src/trainer.py
|
CvlabAssignment/WRcan
|
e77571472f5a3928b1e9cee5440d52f702e59a41
|
[
"MIT"
] | 1
|
2021-09-29T09:37:04.000Z
|
2021-09-29T09:37:04.000Z
|
import os
import math
from decimal import Decimal
import utility
import torch
import torch.nn.utils as utils
from tqdm import tqdm
class Trainer():
def __init__(self, args, loader, my_model, my_loss, ckp):
self.args = args
self.scale = args.scale
self.ckp = ckp
self.loader_train = loader.loader_train
self.loader_test = loader.loader_test
self.model = my_model
self.loss = my_loss
self.optimizer = utility.make_optimizer(args, self.model)
self.flag_ae_loss = True if args.loss.lower().find('ae') >= 0 else False
if self.args.precision == 'amp':
self.scaler = torch.cuda.amp.GradScaler()
if self.args.load != '':
# To avoid "UserWarning: Detected call of `lr_scheduler.step()` before `optimizer.step()`."
# The 0 gradient value will not update any parameter of the model to train.
self.optimizer.zero_grad()
self.optimizer.step()
self.optimizer.load(ckp.dir, epoch=len(ckp.log))
self.error_last = 1e8
def train(self):
self.loss.step()
epoch = self.optimizer.get_last_epoch() + 1
lr = self.optimizer.get_lr()
self.ckp.write_log(
'[Epoch {}]\tLearning rate: {:.2e}'.format(epoch, Decimal(lr))
)
self.loss.start_log()
self.model.train()
timer_data, timer_model = utility.timer(), utility.timer()
# TEMP
self.loader_train.dataset.set_scale(0)
for batch, (lr, hr, _,) in enumerate(self.loader_train):
lr, hr = self.prepare(lr, hr)
if self.flag_ae_loss:
hr, hr_ae = hr[:,:self.args.n_colors, ...], hr[:,self.args.n_colors:,...]
else:
hr_ae = None
timer_data.hold()
timer_model.tic()
self.optimizer.zero_grad()
if self.args.precision == 'amp':
with torch.cuda.amp.autocast():
sr = self.model(lr, 0)
if self.flag_ae_loss:
sr_ae = self._forward_auto_encoder(hr_ae, 0)
else:
sr_ae = None
loss = self.loss(sr, hr, sr_ae, hr_ae)
self.scaler.scale(loss).backward()
else:
sr = self.model(lr, 0)
if self.flag_ae_loss:
sr_ae = self._forward_auto_encoder(hr_ae, 0)
else:
sr_ae = None
loss = self.loss(sr, hr, sr_ae, hr_ae)
loss.backward()
if self.args.gclip > 0:
utils.clip_grad_value_(
self.model.parameters(),
self.args.gclip
)
if self.args.precision == 'amp':
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.optimizer.step()
timer_model.hold()
if (batch + 1) % self.args.print_every == 0:
self.ckp.write_log('[{}/{}]\t{}\t{:.1f}+{:.1f}s'.format(
(batch + 1) * self.args.batch_size,
len(self.loader_train.dataset),
self.loss.display_loss(batch),
timer_model.release(),
timer_data.release()))
timer_data.tic()
self.loss.end_log(len(self.loader_train))
self.error_last = self.loss.log[-1, -1]
self.optimizer.schedule()
def test(self):
torch.set_grad_enabled(False)
epoch = self.optimizer.get_last_epoch()
self.ckp.write_log('\nEvaluation:')
self.ckp.add_log(
torch.zeros(1, len(self.loader_test), len(self.scale))
)
self.model.eval()
timer_test = utility.timer()
if self.args.save_results: self.ckp.begin_background()
for idx_data, d in enumerate(self.loader_test):
for idx_scale, scale in enumerate(self.scale):
d.dataset.set_scale(idx_scale)
for lr, hr, filename in tqdm(d, ncols=80):
lr, hr = self.prepare(lr, hr)
sr = self.model(lr, idx_scale)
sr = utility.quantize(sr, self.args.rgb_range)
save_list = [sr]
self.ckp.log[-1, idx_data, idx_scale] += utility.calc_psnr(
sr, hr, scale, self.args.rgb_range, dataset=d
)
if self.args.save_gt:
save_list.extend([lr, hr])
if self.args.save_results:
self.ckp.save_results(d, filename[0], save_list, scale)
self.ckp.log[-1, idx_data, idx_scale] /= len(d)
best = self.ckp.log.max(0)
self.ckp.write_log(
'[{} x{}]\tPSNR: {:.3f} (Best: {:.3f} @epoch {})'.format(
d.dataset.name,
scale,
self.ckp.log[-1, idx_data, idx_scale],
best[0][idx_data, idx_scale],
best[1][idx_data, idx_scale] + 1
)
)
self.ckp.write_log('Forward: {:.2f}s\n'.format(timer_test.toc()))
self.ckp.write_log('Saving...')
if self.args.save_results:
self.ckp.end_background()
if not self.args.test_only:
self.ckp.save(self, epoch, is_best=(best[1][0, 0] + 1 == epoch))
self.ckp.write_log(
'Total: {:.2f}s\n'.format(timer_test.toc()), refresh=True
)
torch.set_grad_enabled(True)
def prepare(self, *args):
device = torch.device('cpu' if self.args.cpu else 'cuda')
def _prepare(tensor):
if self.args.precision == 'half': tensor = tensor.half()
return tensor.to(device)
return [_prepare(a) for a in args]
def terminate(self):
if self.args.test_only:
self.test()
return True
else:
epoch = self.optimizer.get_last_epoch() + 1
return epoch > self.args.epochs
# return epoch >= self.args.epochs
def _forward_auto_encoder(self, x, idx_scale):
self.model.set_forward_ae_loss(True)
x = self.model(x, idx_scale)
self.model.set_forward_ae_loss(False)
return x
| 34.645503
| 105
| 0.51069
|
import os
import math
from decimal import Decimal
import utility
import torch
import torch.nn.utils as utils
from tqdm import tqdm
class Trainer():
def __init__(self, args, loader, my_model, my_loss, ckp):
self.args = args
self.scale = args.scale
self.ckp = ckp
self.loader_train = loader.loader_train
self.loader_test = loader.loader_test
self.model = my_model
self.loss = my_loss
self.optimizer = utility.make_optimizer(args, self.model)
self.flag_ae_loss = True if args.loss.lower().find('ae') >= 0 else False
if self.args.precision == 'amp':
self.scaler = torch.cuda.amp.GradScaler()
if self.args.load != '':
self.optimizer.zero_grad()
self.optimizer.step()
self.optimizer.load(ckp.dir, epoch=len(ckp.log))
self.error_last = 1e8
def train(self):
self.loss.step()
epoch = self.optimizer.get_last_epoch() + 1
lr = self.optimizer.get_lr()
self.ckp.write_log(
'[Epoch {}]\tLearning rate: {:.2e}'.format(epoch, Decimal(lr))
)
self.loss.start_log()
self.model.train()
timer_data, timer_model = utility.timer(), utility.timer()
self.loader_train.dataset.set_scale(0)
for batch, (lr, hr, _,) in enumerate(self.loader_train):
lr, hr = self.prepare(lr, hr)
if self.flag_ae_loss:
hr, hr_ae = hr[:,:self.args.n_colors, ...], hr[:,self.args.n_colors:,...]
else:
hr_ae = None
timer_data.hold()
timer_model.tic()
self.optimizer.zero_grad()
if self.args.precision == 'amp':
with torch.cuda.amp.autocast():
sr = self.model(lr, 0)
if self.flag_ae_loss:
sr_ae = self._forward_auto_encoder(hr_ae, 0)
else:
sr_ae = None
loss = self.loss(sr, hr, sr_ae, hr_ae)
self.scaler.scale(loss).backward()
else:
sr = self.model(lr, 0)
if self.flag_ae_loss:
sr_ae = self._forward_auto_encoder(hr_ae, 0)
else:
sr_ae = None
loss = self.loss(sr, hr, sr_ae, hr_ae)
loss.backward()
if self.args.gclip > 0:
utils.clip_grad_value_(
self.model.parameters(),
self.args.gclip
)
if self.args.precision == 'amp':
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.optimizer.step()
timer_model.hold()
if (batch + 1) % self.args.print_every == 0:
self.ckp.write_log('[{}/{}]\t{}\t{:.1f}+{:.1f}s'.format(
(batch + 1) * self.args.batch_size,
len(self.loader_train.dataset),
self.loss.display_loss(batch),
timer_model.release(),
timer_data.release()))
timer_data.tic()
self.loss.end_log(len(self.loader_train))
self.error_last = self.loss.log[-1, -1]
self.optimizer.schedule()
def test(self):
torch.set_grad_enabled(False)
epoch = self.optimizer.get_last_epoch()
self.ckp.write_log('\nEvaluation:')
self.ckp.add_log(
torch.zeros(1, len(self.loader_test), len(self.scale))
)
self.model.eval()
timer_test = utility.timer()
if self.args.save_results: self.ckp.begin_background()
for idx_data, d in enumerate(self.loader_test):
for idx_scale, scale in enumerate(self.scale):
d.dataset.set_scale(idx_scale)
for lr, hr, filename in tqdm(d, ncols=80):
lr, hr = self.prepare(lr, hr)
sr = self.model(lr, idx_scale)
sr = utility.quantize(sr, self.args.rgb_range)
save_list = [sr]
self.ckp.log[-1, idx_data, idx_scale] += utility.calc_psnr(
sr, hr, scale, self.args.rgb_range, dataset=d
)
if self.args.save_gt:
save_list.extend([lr, hr])
if self.args.save_results:
self.ckp.save_results(d, filename[0], save_list, scale)
self.ckp.log[-1, idx_data, idx_scale] /= len(d)
best = self.ckp.log.max(0)
self.ckp.write_log(
'[{} x{}]\tPSNR: {:.3f} (Best: {:.3f} @epoch {})'.format(
d.dataset.name,
scale,
self.ckp.log[-1, idx_data, idx_scale],
best[0][idx_data, idx_scale],
best[1][idx_data, idx_scale] + 1
)
)
self.ckp.write_log('Forward: {:.2f}s\n'.format(timer_test.toc()))
self.ckp.write_log('Saving...')
if self.args.save_results:
self.ckp.end_background()
if not self.args.test_only:
self.ckp.save(self, epoch, is_best=(best[1][0, 0] + 1 == epoch))
self.ckp.write_log(
'Total: {:.2f}s\n'.format(timer_test.toc()), refresh=True
)
torch.set_grad_enabled(True)
def prepare(self, *args):
device = torch.device('cpu' if self.args.cpu else 'cuda')
def _prepare(tensor):
if self.args.precision == 'half': tensor = tensor.half()
return tensor.to(device)
return [_prepare(a) for a in args]
def terminate(self):
if self.args.test_only:
self.test()
return True
else:
epoch = self.optimizer.get_last_epoch() + 1
return epoch > self.args.epochs
def _forward_auto_encoder(self, x, idx_scale):
self.model.set_forward_ae_loss(True)
x = self.model(x, idx_scale)
self.model.set_forward_ae_loss(False)
return x
| true
| true
|
79058971dea1e3889bf6c81a306c2c5fc18b8adb
| 21,882
|
py
|
Python
|
virtual/lib/python3.8/site-packages/bootstrap4/renderers.py
|
devseme/Community-Watch
|
815c71431db52b85a7b6dc5bb27860c6066a6e4f
|
[
"MIT"
] | 1,060
|
2017-04-26T10:31:24.000Z
|
2022-03-29T03:58:00.000Z
|
virtual/lib/python3.8/site-packages/bootstrap4/renderers.py
|
devseme/Community-Watch
|
815c71431db52b85a7b6dc5bb27860c6066a6e4f
|
[
"MIT"
] | 298
|
2017-05-07T15:20:09.000Z
|
2022-03-28T09:01:42.000Z
|
virtual/lib/python3.8/site-packages/bootstrap4/renderers.py
|
devseme/Community-Watch
|
815c71431db52b85a7b6dc5bb27860c6066a6e4f
|
[
"MIT"
] | 282
|
2017-04-26T12:08:43.000Z
|
2022-02-16T06:06:45.000Z
|
from bs4 import BeautifulSoup
from django.forms import (
BaseForm,
BaseFormSet,
BoundField,
CheckboxInput,
CheckboxSelectMultiple,
DateInput,
EmailInput,
FileInput,
MultiWidget,
NumberInput,
PasswordInput,
RadioSelect,
Select,
SelectDateWidget,
TextInput,
URLInput,
)
from django.utils.html import conditional_escape, escape, strip_tags
from django.utils.safestring import mark_safe
from .bootstrap import get_bootstrap_setting
from .exceptions import BootstrapError
from .forms import (
FORM_GROUP_CLASS,
is_widget_with_placeholder,
render_field,
render_form,
render_form_group,
render_label,
)
from .text import text_value
from .utils import add_css_class, render_template_file
try:
# If Django is set up without a database, importing this widget gives RuntimeError
from django.contrib.auth.forms import ReadOnlyPasswordHashWidget
except RuntimeError:
ReadOnlyPasswordHashWidget = None
class BaseRenderer(object):
"""A content renderer."""
def __init__(self, *args, **kwargs):
self.layout = kwargs.get("layout", "")
self.form_group_class = kwargs.get("form_group_class", FORM_GROUP_CLASS)
self.field_class = kwargs.get("field_class", "")
self.label_class = kwargs.get("label_class", "")
self.show_help = kwargs.get("show_help", True)
self.show_label = kwargs.get("show_label", True)
self.exclude = kwargs.get("exclude", "")
self.set_placeholder = kwargs.get("set_placeholder", True)
self.size = self.parse_size(kwargs.get("size", ""))
self.horizontal_label_class = kwargs.get(
"horizontal_label_class", get_bootstrap_setting("horizontal_label_class")
)
self.horizontal_field_class = kwargs.get(
"horizontal_field_class", get_bootstrap_setting("horizontal_field_class")
)
def parse_size(self, size):
size = text_value(size).lower().strip()
if size in ("sm", "small"):
return "small"
if size in ("lg", "large"):
return "large"
if size in ("md", "medium", ""):
return "medium"
raise BootstrapError('Invalid value "%s" for parameter "size" (expected "sm", "md", "lg" or "").' % size)
def get_size_class(self, prefix="form-control"):
if self.size == "small":
return prefix + "-sm"
if self.size == "large":
return prefix + "-lg"
return ""
def _render(self):
return ""
def render(self):
return mark_safe(self._render())
class FormsetRenderer(BaseRenderer):
"""Default formset renderer."""
def __init__(self, formset, *args, **kwargs):
if not isinstance(formset, BaseFormSet):
raise BootstrapError('Parameter "formset" should contain a valid Django Formset.')
self.formset = formset
super().__init__(*args, **kwargs)
def render_management_form(self):
return text_value(self.formset.management_form)
def render_form(self, form, **kwargs):
return render_form(form, **kwargs)
def render_forms(self):
rendered_forms = []
for form in self.formset.forms:
rendered_forms.append(
self.render_form(
form,
layout=self.layout,
form_group_class=self.form_group_class,
field_class=self.field_class,
label_class=self.label_class,
show_label=self.show_label,
show_help=self.show_help,
exclude=self.exclude,
set_placeholder=self.set_placeholder,
size=self.size,
horizontal_label_class=self.horizontal_label_class,
horizontal_field_class=self.horizontal_field_class,
)
)
return "\n".join(rendered_forms)
def get_formset_errors(self):
return self.formset.non_form_errors()
def render_errors(self):
formset_errors = self.get_formset_errors()
if formset_errors:
return render_template_file(
"bootstrap4/form_errors.html",
context={"errors": formset_errors, "form": self.formset, "layout": self.layout},
)
return ""
def _render(self):
return "".join([self.render_errors(), self.render_management_form(), self.render_forms()])
class FormRenderer(BaseRenderer):
"""Default form renderer."""
def __init__(self, form, *args, **kwargs):
if not isinstance(form, BaseForm):
raise BootstrapError('Parameter "form" should contain a valid Django Form.')
self.form = form
super().__init__(*args, **kwargs)
self.error_css_class = kwargs.get("error_css_class", None)
self.required_css_class = kwargs.get("required_css_class", None)
self.bound_css_class = kwargs.get("bound_css_class", None)
self.alert_error_type = kwargs.get("alert_error_type", "non_fields")
self.form_check_class = kwargs.get("form_check_class", "form-check")
def render_fields(self):
rendered_fields = []
for field in self.form:
rendered_fields.append(
render_field(
field,
layout=self.layout,
form_group_class=self.form_group_class,
field_class=self.field_class,
label_class=self.label_class,
form_check_class=self.form_check_class,
show_label=self.show_label,
show_help=self.show_help,
exclude=self.exclude,
set_placeholder=self.set_placeholder,
size=self.size,
horizontal_label_class=self.horizontal_label_class,
horizontal_field_class=self.horizontal_field_class,
error_css_class=self.error_css_class,
required_css_class=self.required_css_class,
bound_css_class=self.bound_css_class,
)
)
return "\n".join(rendered_fields)
def get_fields_errors(self):
form_errors = []
for field in self.form:
if not field.is_hidden and field.errors:
form_errors += field.errors
return form_errors
def render_errors(self, type="all"):
form_errors = None
if type == "all":
form_errors = self.get_fields_errors() + self.form.non_field_errors()
elif type == "fields":
form_errors = self.get_fields_errors()
elif type == "non_fields":
form_errors = self.form.non_field_errors()
if form_errors:
return render_template_file(
"bootstrap4/form_errors.html",
context={"errors": form_errors, "form": self.form, "layout": self.layout, "type": type},
)
return ""
def _render(self):
return self.render_errors(self.alert_error_type) + self.render_fields()
class FieldRenderer(BaseRenderer):
"""Default field renderer."""
# These widgets will not be wrapped in a form-control class
WIDGETS_NO_FORM_CONTROL = (CheckboxInput, RadioSelect, CheckboxSelectMultiple, FileInput)
def __init__(self, field, *args, **kwargs):
if not isinstance(field, BoundField):
raise BootstrapError('Parameter "field" should contain a valid Django BoundField.')
self.field = field
super().__init__(*args, **kwargs)
self.widget = field.field.widget
self.is_multi_widget = isinstance(field.field.widget, MultiWidget)
self.initial_attrs = self.widget.attrs.copy()
self.field_help = text_value(mark_safe(field.help_text)) if self.show_help and field.help_text else ""
self.field_errors = [conditional_escape(text_value(error)) for error in field.errors]
self.form_check_class = kwargs.get("form_check_class", "form-check")
if "placeholder" in kwargs:
# Find the placeholder in kwargs, even if it's empty
self.placeholder = kwargs["placeholder"]
elif get_bootstrap_setting("set_placeholder"):
# If not found, see if we set the label
self.placeholder = field.label
else:
# Or just set it to empty
self.placeholder = ""
if self.placeholder:
self.placeholder = text_value(self.placeholder)
self.addon_before = kwargs.get("addon_before", self.widget.attrs.pop("addon_before", ""))
self.addon_after = kwargs.get("addon_after", self.widget.attrs.pop("addon_after", ""))
self.addon_before_class = kwargs.get(
"addon_before_class", self.widget.attrs.pop("addon_before_class", "input-group-text")
)
self.addon_after_class = kwargs.get(
"addon_after_class", self.widget.attrs.pop("addon_after_class", "input-group-text")
)
# These are set in Django or in the global BOOTSTRAP4 settings, and
# they can be overwritten in the template
error_css_class = kwargs.get("error_css_class", None)
required_css_class = kwargs.get("required_css_class", None)
bound_css_class = kwargs.get("bound_css_class", None)
if error_css_class is not None:
self.error_css_class = error_css_class
else:
self.error_css_class = getattr(field.form, "error_css_class", get_bootstrap_setting("error_css_class"))
if required_css_class is not None:
self.required_css_class = required_css_class
else:
self.required_css_class = getattr(
field.form, "required_css_class", get_bootstrap_setting("required_css_class")
)
if bound_css_class is not None:
self.success_css_class = bound_css_class
else:
self.success_css_class = getattr(field.form, "bound_css_class", get_bootstrap_setting("success_css_class"))
# If the form is marked as form.empty_permitted, do not set required class
if self.field.form.empty_permitted:
self.required_css_class = ""
def restore_widget_attrs(self):
self.widget.attrs = self.initial_attrs.copy()
def add_class_attrs(self, widget=None):
if widget is None:
widget = self.widget
classes = widget.attrs.get("class", "")
if ReadOnlyPasswordHashWidget is not None and isinstance(widget, ReadOnlyPasswordHashWidget):
# Render this is a static control
classes = add_css_class(classes, "form-control-static", prepend=True)
elif not isinstance(widget, self.WIDGETS_NO_FORM_CONTROL):
classes = add_css_class(classes, "form-control", prepend=True)
# For these widget types, add the size class here
classes = add_css_class(classes, self.get_size_class())
elif isinstance(widget, CheckboxInput):
classes = add_css_class(classes, "form-check-input", prepend=True)
elif isinstance(widget, FileInput):
classes = add_css_class(classes, "form-control-file", prepend=True)
if self.field.errors:
if self.error_css_class:
classes = add_css_class(classes, self.error_css_class)
else:
if self.field.form.is_bound:
classes = add_css_class(classes, self.success_css_class)
widget.attrs["class"] = classes
def add_placeholder_attrs(self, widget=None):
if widget is None:
widget = self.widget
placeholder = widget.attrs.get("placeholder", self.placeholder)
if placeholder and self.set_placeholder and is_widget_with_placeholder(widget):
# TODO: Should this be stripped and/or escaped?
widget.attrs["placeholder"] = placeholder
def add_help_attrs(self, widget=None):
if widget is None:
widget = self.widget
if not isinstance(widget, CheckboxInput):
widget.attrs["title"] = widget.attrs.get("title", escape(strip_tags(self.field_help)))
def add_widget_attrs(self):
if self.is_multi_widget:
widgets = self.widget.widgets
else:
widgets = [self.widget]
for widget in widgets:
self.add_class_attrs(widget)
self.add_placeholder_attrs(widget)
self.add_help_attrs(widget)
def list_to_class(self, html, klass):
classes = add_css_class(klass, self.get_size_class())
mapping = [
("<ul", '<div class="{classes}"'.format(classes=classes)),
("</ul>", "</div>"),
("<li", '<div class="{form_check_class}"'.format(form_check_class=self.form_check_class)),
("</li>", "</div>"),
]
for k, v in mapping:
html = html.replace(k, v)
# Apply bootstrap4 classes to labels and inputs.
# A simple 'replace' isn't enough as we don't want to have several 'class' attr definition, which would happen
# if we tried to 'html.replace("input", "input class=...")'
soup = BeautifulSoup(html, features="html.parser")
enclosing_div = soup.find("div", {"class": classes})
if enclosing_div:
for label in enclosing_div.find_all("label"):
label.attrs["class"] = label.attrs.get("class", []) + ["form-check-label"]
try:
label.input.attrs["class"] = label.input.attrs.get("class", []) + ["form-check-input"]
except AttributeError:
pass
return str(soup)
def add_checkbox_label(self, html):
return html + render_label(
content=self.field.label,
label_for=self.field.id_for_label,
label_title=escape(strip_tags(self.field_help)),
label_class="form-check-label",
)
def fix_date_select_input(self, html):
div1 = '<div class="col-4">'
div2 = "</div>"
html = html.replace("<select", div1 + "<select")
html = html.replace("</select>", "</select>" + div2)
return '<div class="row bootstrap4-multi-input">{html}</div>'.format(html=html)
def fix_file_input_label(self, html):
if self.layout != "horizontal":
html = "<br>" + html
return html
def post_widget_render(self, html):
if isinstance(self.widget, RadioSelect):
html = self.list_to_class(html, "radio radio-success")
elif isinstance(self.widget, CheckboxSelectMultiple):
html = self.list_to_class(html, "checkbox")
elif isinstance(self.widget, SelectDateWidget):
html = self.fix_date_select_input(html)
elif isinstance(self.widget, CheckboxInput):
html = self.add_checkbox_label(html)
elif isinstance(self.widget, FileInput):
html = self.fix_file_input_label(html)
return html
def wrap_widget(self, html):
if isinstance(self.widget, CheckboxInput):
# Wrap checkboxes
# Note checkboxes do not get size classes, see #318
html = '<div class="form-check">{html}</div>'.format(html=html)
return html
def make_input_group_addon(self, inner_class, outer_class, content):
if not content:
return ""
if inner_class:
content = '<span class="{inner_class}">{content}</span>'.format(inner_class=inner_class, content=content)
return '<div class="{outer_class}">{content}</div>'.format(outer_class=outer_class, content=content)
@property
def is_input_group(self):
allowed_widget_types = (TextInput, PasswordInput, DateInput, NumberInput, Select, EmailInput, URLInput)
return (self.addon_before or self.addon_after) and isinstance(self.widget, allowed_widget_types)
def make_input_group(self, html):
if self.is_input_group:
before = self.make_input_group_addon(self.addon_before_class, "input-group-prepend", self.addon_before)
after = self.make_input_group_addon(self.addon_after_class, "input-group-append", self.addon_after)
html = self.append_errors("{before}{html}{after}".format(before=before, html=html, after=after))
html = '<div class="input-group">{html}</div>'.format(html=html)
return html
def append_help(self, html):
field_help = self.field_help or None
if field_help:
help_html = render_template_file(
"bootstrap4/field_help_text.html",
context={
"field": self.field,
"field_help": field_help,
"layout": self.layout,
"show_help": self.show_help,
},
)
html += help_html
return html
def append_errors(self, html):
field_errors = self.field_errors
if field_errors:
errors_html = render_template_file(
"bootstrap4/field_errors.html",
context={
"field": self.field,
"field_errors": field_errors,
"layout": self.layout,
"show_help": self.show_help,
},
)
html += errors_html
return html
def append_to_field(self, html):
if isinstance(self.widget, CheckboxInput):
# we have already appended errors and help to checkboxes
# in append_to_checkbox_field
return html
if not self.is_input_group:
# we already appended errors for input groups in make_input_group
html = self.append_errors(html)
return self.append_help(html)
def append_to_checkbox_field(self, html):
if not isinstance(self.widget, CheckboxInput):
# we will append errors and help to normal fields later in append_to_field
return html
html = self.append_errors(html)
return self.append_help(html)
def get_field_class(self):
field_class = self.field_class
if not field_class and self.layout == "horizontal":
field_class = self.horizontal_field_class
return field_class
def wrap_field(self, html):
field_class = self.get_field_class()
if field_class:
html = '<div class="{field_class}">{html}</div>'.format(field_class=field_class, html=html)
return html
def get_label_class(self):
label_class = self.label_class
if not label_class and self.layout == "horizontal":
label_class = self.horizontal_label_class
label_class = add_css_class(label_class, "col-form-label")
label_class = text_value(label_class)
if not self.show_label or self.show_label == "sr-only":
label_class = add_css_class(label_class, "sr-only")
return label_class
def get_label(self):
if self.show_label == "skip":
return None
elif isinstance(self.widget, CheckboxInput):
label = None
else:
label = self.field.label
if self.layout == "horizontal" and not label:
return mark_safe(" ")
return label
def add_label(self, html):
label = self.get_label()
if label:
html = render_label(label, label_for=self.field.id_for_label, label_class=self.get_label_class()) + html
return html
def get_form_group_class(self):
form_group_class = self.form_group_class
if self.field.errors:
if self.error_css_class:
form_group_class = add_css_class(form_group_class, self.error_css_class)
else:
if self.field.form.is_bound:
form_group_class = add_css_class(form_group_class, self.success_css_class)
if self.field.field.required and self.required_css_class:
form_group_class = add_css_class(form_group_class, self.required_css_class)
if self.layout == "horizontal":
form_group_class = add_css_class(form_group_class, "row")
return form_group_class
def wrap_label_and_field(self, html):
return render_form_group(html, self.get_form_group_class())
def _render(self):
# See if we're not excluded
if self.field.name in self.exclude.replace(" ", "").split(","):
return ""
# Hidden input requires no special treatment
if self.field.is_hidden:
return text_value(self.field)
# Render the widget
self.add_widget_attrs()
html = self.field.as_widget(attrs=self.widget.attrs)
self.restore_widget_attrs()
# Start post render
html = self.post_widget_render(html)
html = self.append_to_checkbox_field(html)
html = self.wrap_widget(html)
html = self.make_input_group(html)
html = self.append_to_field(html)
html = self.wrap_field(html)
html = self.add_label(html)
html = self.wrap_label_and_field(html)
return html
class InlineFieldRenderer(FieldRenderer):
"""Inline field renderer."""
def add_error_attrs(self):
field_title = self.widget.attrs.get("title", "")
field_title += " " + " ".join([strip_tags(e) for e in self.field_errors])
self.widget.attrs["title"] = field_title.strip()
def add_widget_attrs(self):
super().add_widget_attrs()
self.add_error_attrs()
def append_to_field(self, html):
return html
def get_field_class(self):
return self.field_class
def get_label_class(self):
return add_css_class(self.label_class, "sr-only")
| 39.285458
| 119
| 0.621744
|
from bs4 import BeautifulSoup
from django.forms import (
BaseForm,
BaseFormSet,
BoundField,
CheckboxInput,
CheckboxSelectMultiple,
DateInput,
EmailInput,
FileInput,
MultiWidget,
NumberInput,
PasswordInput,
RadioSelect,
Select,
SelectDateWidget,
TextInput,
URLInput,
)
from django.utils.html import conditional_escape, escape, strip_tags
from django.utils.safestring import mark_safe
from .bootstrap import get_bootstrap_setting
from .exceptions import BootstrapError
from .forms import (
FORM_GROUP_CLASS,
is_widget_with_placeholder,
render_field,
render_form,
render_form_group,
render_label,
)
from .text import text_value
from .utils import add_css_class, render_template_file
try:
from django.contrib.auth.forms import ReadOnlyPasswordHashWidget
except RuntimeError:
ReadOnlyPasswordHashWidget = None
class BaseRenderer(object):
def __init__(self, *args, **kwargs):
self.layout = kwargs.get("layout", "")
self.form_group_class = kwargs.get("form_group_class", FORM_GROUP_CLASS)
self.field_class = kwargs.get("field_class", "")
self.label_class = kwargs.get("label_class", "")
self.show_help = kwargs.get("show_help", True)
self.show_label = kwargs.get("show_label", True)
self.exclude = kwargs.get("exclude", "")
self.set_placeholder = kwargs.get("set_placeholder", True)
self.size = self.parse_size(kwargs.get("size", ""))
self.horizontal_label_class = kwargs.get(
"horizontal_label_class", get_bootstrap_setting("horizontal_label_class")
)
self.horizontal_field_class = kwargs.get(
"horizontal_field_class", get_bootstrap_setting("horizontal_field_class")
)
def parse_size(self, size):
size = text_value(size).lower().strip()
if size in ("sm", "small"):
return "small"
if size in ("lg", "large"):
return "large"
if size in ("md", "medium", ""):
return "medium"
raise BootstrapError('Invalid value "%s" for parameter "size" (expected "sm", "md", "lg" or "").' % size)
def get_size_class(self, prefix="form-control"):
if self.size == "small":
return prefix + "-sm"
if self.size == "large":
return prefix + "-lg"
return ""
def _render(self):
return ""
def render(self):
return mark_safe(self._render())
class FormsetRenderer(BaseRenderer):
def __init__(self, formset, *args, **kwargs):
if not isinstance(formset, BaseFormSet):
raise BootstrapError('Parameter "formset" should contain a valid Django Formset.')
self.formset = formset
super().__init__(*args, **kwargs)
def render_management_form(self):
return text_value(self.formset.management_form)
def render_form(self, form, **kwargs):
return render_form(form, **kwargs)
def render_forms(self):
rendered_forms = []
for form in self.formset.forms:
rendered_forms.append(
self.render_form(
form,
layout=self.layout,
form_group_class=self.form_group_class,
field_class=self.field_class,
label_class=self.label_class,
show_label=self.show_label,
show_help=self.show_help,
exclude=self.exclude,
set_placeholder=self.set_placeholder,
size=self.size,
horizontal_label_class=self.horizontal_label_class,
horizontal_field_class=self.horizontal_field_class,
)
)
return "\n".join(rendered_forms)
def get_formset_errors(self):
return self.formset.non_form_errors()
def render_errors(self):
formset_errors = self.get_formset_errors()
if formset_errors:
return render_template_file(
"bootstrap4/form_errors.html",
context={"errors": formset_errors, "form": self.formset, "layout": self.layout},
)
return ""
def _render(self):
return "".join([self.render_errors(), self.render_management_form(), self.render_forms()])
class FormRenderer(BaseRenderer):
def __init__(self, form, *args, **kwargs):
if not isinstance(form, BaseForm):
raise BootstrapError('Parameter "form" should contain a valid Django Form.')
self.form = form
super().__init__(*args, **kwargs)
self.error_css_class = kwargs.get("error_css_class", None)
self.required_css_class = kwargs.get("required_css_class", None)
self.bound_css_class = kwargs.get("bound_css_class", None)
self.alert_error_type = kwargs.get("alert_error_type", "non_fields")
self.form_check_class = kwargs.get("form_check_class", "form-check")
def render_fields(self):
rendered_fields = []
for field in self.form:
rendered_fields.append(
render_field(
field,
layout=self.layout,
form_group_class=self.form_group_class,
field_class=self.field_class,
label_class=self.label_class,
form_check_class=self.form_check_class,
show_label=self.show_label,
show_help=self.show_help,
exclude=self.exclude,
set_placeholder=self.set_placeholder,
size=self.size,
horizontal_label_class=self.horizontal_label_class,
horizontal_field_class=self.horizontal_field_class,
error_css_class=self.error_css_class,
required_css_class=self.required_css_class,
bound_css_class=self.bound_css_class,
)
)
return "\n".join(rendered_fields)
def get_fields_errors(self):
form_errors = []
for field in self.form:
if not field.is_hidden and field.errors:
form_errors += field.errors
return form_errors
def render_errors(self, type="all"):
form_errors = None
if type == "all":
form_errors = self.get_fields_errors() + self.form.non_field_errors()
elif type == "fields":
form_errors = self.get_fields_errors()
elif type == "non_fields":
form_errors = self.form.non_field_errors()
if form_errors:
return render_template_file(
"bootstrap4/form_errors.html",
context={"errors": form_errors, "form": self.form, "layout": self.layout, "type": type},
)
return ""
def _render(self):
return self.render_errors(self.alert_error_type) + self.render_fields()
class FieldRenderer(BaseRenderer):
WIDGETS_NO_FORM_CONTROL = (CheckboxInput, RadioSelect, CheckboxSelectMultiple, FileInput)
def __init__(self, field, *args, **kwargs):
if not isinstance(field, BoundField):
raise BootstrapError('Parameter "field" should contain a valid Django BoundField.')
self.field = field
super().__init__(*args, **kwargs)
self.widget = field.field.widget
self.is_multi_widget = isinstance(field.field.widget, MultiWidget)
self.initial_attrs = self.widget.attrs.copy()
self.field_help = text_value(mark_safe(field.help_text)) if self.show_help and field.help_text else ""
self.field_errors = [conditional_escape(text_value(error)) for error in field.errors]
self.form_check_class = kwargs.get("form_check_class", "form-check")
if "placeholder" in kwargs:
self.placeholder = kwargs["placeholder"]
elif get_bootstrap_setting("set_placeholder"):
# If not found, see if we set the label
self.placeholder = field.label
else:
# Or just set it to empty
self.placeholder = ""
if self.placeholder:
self.placeholder = text_value(self.placeholder)
self.addon_before = kwargs.get("addon_before", self.widget.attrs.pop("addon_before", ""))
self.addon_after = kwargs.get("addon_after", self.widget.attrs.pop("addon_after", ""))
self.addon_before_class = kwargs.get(
"addon_before_class", self.widget.attrs.pop("addon_before_class", "input-group-text")
)
self.addon_after_class = kwargs.get(
"addon_after_class", self.widget.attrs.pop("addon_after_class", "input-group-text")
)
# These are set in Django or in the global BOOTSTRAP4 settings, and
# they can be overwritten in the template
error_css_class = kwargs.get("error_css_class", None)
required_css_class = kwargs.get("required_css_class", None)
bound_css_class = kwargs.get("bound_css_class", None)
if error_css_class is not None:
self.error_css_class = error_css_class
else:
self.error_css_class = getattr(field.form, "error_css_class", get_bootstrap_setting("error_css_class"))
if required_css_class is not None:
self.required_css_class = required_css_class
else:
self.required_css_class = getattr(
field.form, "required_css_class", get_bootstrap_setting("required_css_class")
)
if bound_css_class is not None:
self.success_css_class = bound_css_class
else:
self.success_css_class = getattr(field.form, "bound_css_class", get_bootstrap_setting("success_css_class"))
# If the form is marked as form.empty_permitted, do not set required class
if self.field.form.empty_permitted:
self.required_css_class = ""
def restore_widget_attrs(self):
self.widget.attrs = self.initial_attrs.copy()
def add_class_attrs(self, widget=None):
if widget is None:
widget = self.widget
classes = widget.attrs.get("class", "")
if ReadOnlyPasswordHashWidget is not None and isinstance(widget, ReadOnlyPasswordHashWidget):
# Render this is a static control
classes = add_css_class(classes, "form-control-static", prepend=True)
elif not isinstance(widget, self.WIDGETS_NO_FORM_CONTROL):
classes = add_css_class(classes, "form-control", prepend=True)
# For these widget types, add the size class here
classes = add_css_class(classes, self.get_size_class())
elif isinstance(widget, CheckboxInput):
classes = add_css_class(classes, "form-check-input", prepend=True)
elif isinstance(widget, FileInput):
classes = add_css_class(classes, "form-control-file", prepend=True)
if self.field.errors:
if self.error_css_class:
classes = add_css_class(classes, self.error_css_class)
else:
if self.field.form.is_bound:
classes = add_css_class(classes, self.success_css_class)
widget.attrs["class"] = classes
def add_placeholder_attrs(self, widget=None):
if widget is None:
widget = self.widget
placeholder = widget.attrs.get("placeholder", self.placeholder)
if placeholder and self.set_placeholder and is_widget_with_placeholder(widget):
# TODO: Should this be stripped and/or escaped?
widget.attrs["placeholder"] = placeholder
def add_help_attrs(self, widget=None):
if widget is None:
widget = self.widget
if not isinstance(widget, CheckboxInput):
widget.attrs["title"] = widget.attrs.get("title", escape(strip_tags(self.field_help)))
def add_widget_attrs(self):
if self.is_multi_widget:
widgets = self.widget.widgets
else:
widgets = [self.widget]
for widget in widgets:
self.add_class_attrs(widget)
self.add_placeholder_attrs(widget)
self.add_help_attrs(widget)
def list_to_class(self, html, klass):
classes = add_css_class(klass, self.get_size_class())
mapping = [
("<ul", '<div class="{classes}"'.format(classes=classes)),
("</ul>", "</div>"),
("<li", '<div class="{form_check_class}"'.format(form_check_class=self.form_check_class)),
("</li>", "</div>"),
]
for k, v in mapping:
html = html.replace(k, v)
# Apply bootstrap4 classes to labels and inputs.
# A simple 'replace' isn't enough as we don't want to have several 'class' attr definition, which would happen
# if we tried to 'html.replace("input", "input class=...")'
soup = BeautifulSoup(html, features="html.parser")
enclosing_div = soup.find("div", {"class": classes})
if enclosing_div:
for label in enclosing_div.find_all("label"):
label.attrs["class"] = label.attrs.get("class", []) + ["form-check-label"]
try:
label.input.attrs["class"] = label.input.attrs.get("class", []) + ["form-check-input"]
except AttributeError:
pass
return str(soup)
def add_checkbox_label(self, html):
return html + render_label(
content=self.field.label,
label_for=self.field.id_for_label,
label_title=escape(strip_tags(self.field_help)),
label_class="form-check-label",
)
def fix_date_select_input(self, html):
div1 = '<div class="col-4">'
div2 = "</div>"
html = html.replace("<select", div1 + "<select")
html = html.replace("</select>", "</select>" + div2)
return '<div class="row bootstrap4-multi-input">{html}</div>'.format(html=html)
def fix_file_input_label(self, html):
if self.layout != "horizontal":
html = "<br>" + html
return html
def post_widget_render(self, html):
if isinstance(self.widget, RadioSelect):
html = self.list_to_class(html, "radio radio-success")
elif isinstance(self.widget, CheckboxSelectMultiple):
html = self.list_to_class(html, "checkbox")
elif isinstance(self.widget, SelectDateWidget):
html = self.fix_date_select_input(html)
elif isinstance(self.widget, CheckboxInput):
html = self.add_checkbox_label(html)
elif isinstance(self.widget, FileInput):
html = self.fix_file_input_label(html)
return html
def wrap_widget(self, html):
if isinstance(self.widget, CheckboxInput):
# Wrap checkboxes
# Note checkboxes do not get size classes, see #318
html = '<div class="form-check">{html}</div>'.format(html=html)
return html
def make_input_group_addon(self, inner_class, outer_class, content):
if not content:
return ""
if inner_class:
content = '<span class="{inner_class}">{content}</span>'.format(inner_class=inner_class, content=content)
return '<div class="{outer_class}">{content}</div>'.format(outer_class=outer_class, content=content)
@property
def is_input_group(self):
allowed_widget_types = (TextInput, PasswordInput, DateInput, NumberInput, Select, EmailInput, URLInput)
return (self.addon_before or self.addon_after) and isinstance(self.widget, allowed_widget_types)
def make_input_group(self, html):
if self.is_input_group:
before = self.make_input_group_addon(self.addon_before_class, "input-group-prepend", self.addon_before)
after = self.make_input_group_addon(self.addon_after_class, "input-group-append", self.addon_after)
html = self.append_errors("{before}{html}{after}".format(before=before, html=html, after=after))
html = '<div class="input-group">{html}</div>'.format(html=html)
return html
def append_help(self, html):
field_help = self.field_help or None
if field_help:
help_html = render_template_file(
"bootstrap4/field_help_text.html",
context={
"field": self.field,
"field_help": field_help,
"layout": self.layout,
"show_help": self.show_help,
},
)
html += help_html
return html
def append_errors(self, html):
field_errors = self.field_errors
if field_errors:
errors_html = render_template_file(
"bootstrap4/field_errors.html",
context={
"field": self.field,
"field_errors": field_errors,
"layout": self.layout,
"show_help": self.show_help,
},
)
html += errors_html
return html
def append_to_field(self, html):
if isinstance(self.widget, CheckboxInput):
# we have already appended errors and help to checkboxes
# in append_to_checkbox_field
return html
if not self.is_input_group:
# we already appended errors for input groups in make_input_group
html = self.append_errors(html)
return self.append_help(html)
def append_to_checkbox_field(self, html):
if not isinstance(self.widget, CheckboxInput):
# we will append errors and help to normal fields later in append_to_field
return html
html = self.append_errors(html)
return self.append_help(html)
def get_field_class(self):
field_class = self.field_class
if not field_class and self.layout == "horizontal":
field_class = self.horizontal_field_class
return field_class
def wrap_field(self, html):
field_class = self.get_field_class()
if field_class:
html = '<div class="{field_class}">{html}</div>'.format(field_class=field_class, html=html)
return html
def get_label_class(self):
label_class = self.label_class
if not label_class and self.layout == "horizontal":
label_class = self.horizontal_label_class
label_class = add_css_class(label_class, "col-form-label")
label_class = text_value(label_class)
if not self.show_label or self.show_label == "sr-only":
label_class = add_css_class(label_class, "sr-only")
return label_class
def get_label(self):
if self.show_label == "skip":
return None
elif isinstance(self.widget, CheckboxInput):
label = None
else:
label = self.field.label
if self.layout == "horizontal" and not label:
return mark_safe(" ")
return label
def add_label(self, html):
label = self.get_label()
if label:
html = render_label(label, label_for=self.field.id_for_label, label_class=self.get_label_class()) + html
return html
def get_form_group_class(self):
form_group_class = self.form_group_class
if self.field.errors:
if self.error_css_class:
form_group_class = add_css_class(form_group_class, self.error_css_class)
else:
if self.field.form.is_bound:
form_group_class = add_css_class(form_group_class, self.success_css_class)
if self.field.field.required and self.required_css_class:
form_group_class = add_css_class(form_group_class, self.required_css_class)
if self.layout == "horizontal":
form_group_class = add_css_class(form_group_class, "row")
return form_group_class
def wrap_label_and_field(self, html):
return render_form_group(html, self.get_form_group_class())
def _render(self):
# See if we're not excluded
if self.field.name in self.exclude.replace(" ", "").split(","):
return ""
if self.field.is_hidden:
return text_value(self.field)
self.add_widget_attrs()
html = self.field.as_widget(attrs=self.widget.attrs)
self.restore_widget_attrs()
html = self.post_widget_render(html)
html = self.append_to_checkbox_field(html)
html = self.wrap_widget(html)
html = self.make_input_group(html)
html = self.append_to_field(html)
html = self.wrap_field(html)
html = self.add_label(html)
html = self.wrap_label_and_field(html)
return html
class InlineFieldRenderer(FieldRenderer):
def add_error_attrs(self):
field_title = self.widget.attrs.get("title", "")
field_title += " " + " ".join([strip_tags(e) for e in self.field_errors])
self.widget.attrs["title"] = field_title.strip()
def add_widget_attrs(self):
super().add_widget_attrs()
self.add_error_attrs()
def append_to_field(self, html):
return html
def get_field_class(self):
return self.field_class
def get_label_class(self):
return add_css_class(self.label_class, "sr-only")
| true
| true
|
790589baeb8d74e89e928166764ecb0c256021a8
| 565
|
py
|
Python
|
Python OOP/test.py
|
zharmedia386/Data-Science-Stuff
|
40183c329e3b30c582c545c260ca7916f29e2f09
|
[
"MIT"
] | null | null | null |
Python OOP/test.py
|
zharmedia386/Data-Science-Stuff
|
40183c329e3b30c582c545c260ca7916f29e2f09
|
[
"MIT"
] | null | null | null |
Python OOP/test.py
|
zharmedia386/Data-Science-Stuff
|
40183c329e3b30c582c545c260ca7916f29e2f09
|
[
"MIT"
] | null | null | null |
class Hero:
def __init__(self,name,health,attackPower):
self.__name = name
self.__health = health
self.__attPower = attackPower
# getter
def getName(self):
return self.__name
def getHealth(self):
return self.__health
# setter
def diserang(self,serangPower):
self.__health -= serangPower
def setAttPower(self,nilaibaru):
self.__attPower = nilaibaru
# awal dari game
earthshaker = Hero("earthshaker",50, 5)
# game berjalan
print(earthshaker.getName())
print(earthshaker.getHealth())
earthshaker.diserang(5)
print(earthshaker.getHealth())
| 18.225806
| 44
| 0.748673
|
class Hero:
def __init__(self,name,health,attackPower):
self.__name = name
self.__health = health
self.__attPower = attackPower
def getName(self):
return self.__name
def getHealth(self):
return self.__health
def diserang(self,serangPower):
self.__health -= serangPower
def setAttPower(self,nilaibaru):
self.__attPower = nilaibaru
earthshaker = Hero("earthshaker",50, 5)
print(earthshaker.getName())
print(earthshaker.getHealth())
earthshaker.diserang(5)
print(earthshaker.getHealth())
| true
| true
|
79058bec3f80261eb2e0bae4d5f0d39cd0b75db9
| 5,987
|
py
|
Python
|
tests/image/segmentation/test_model.py
|
sumanmichael/lightning-flash
|
4c69c1bf49fa74d0f2fdb9c4dbdcdfd5942352db
|
[
"Apache-2.0"
] | null | null | null |
tests/image/segmentation/test_model.py
|
sumanmichael/lightning-flash
|
4c69c1bf49fa74d0f2fdb9c4dbdcdfd5942352db
|
[
"Apache-2.0"
] | null | null | null |
tests/image/segmentation/test_model.py
|
sumanmichael/lightning-flash
|
4c69c1bf49fa74d0f2fdb9c4dbdcdfd5942352db
|
[
"Apache-2.0"
] | 1
|
2021-07-14T09:17:46.000Z
|
2021-07-14T09:17:46.000Z
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
from typing import Tuple
from unittest import mock
import numpy as np
import pytest
import torch
from flash import Trainer
from flash.__main__ import main
from flash.core.data.data_pipeline import DataPipeline
from flash.core.data.data_source import DefaultDataKeys
from flash.core.utilities.imports import _IMAGE_AVAILABLE
from flash.image import SemanticSegmentation
from flash.image.segmentation.data import SemanticSegmentationPreprocess
from tests.helpers.utils import _IMAGE_TESTING, _SERVE_TESTING
# ======== Mock functions ========
class DummyDataset(torch.utils.data.Dataset):
size: Tuple[int, int] = (224, 224)
num_classes: int = 8
def __getitem__(self, index):
return {
DefaultDataKeys.INPUT: torch.rand(3, *self.size),
DefaultDataKeys.TARGET: torch.randint(self.num_classes - 1, self.size),
}
def __len__(self) -> int:
return 10
# ==============================
@pytest.mark.skipif(not _IMAGE_TESTING, reason="image libraries aren't installed.")
def test_smoke():
model = SemanticSegmentation(num_classes=1)
assert model is not None
@pytest.mark.skipif(not _IMAGE_TESTING, reason="image libraries aren't installed.")
@pytest.mark.parametrize("num_classes", [8, 256])
@pytest.mark.parametrize("img_shape", [(1, 3, 224, 192), (2, 3, 128, 256)])
def test_forward(num_classes, img_shape):
model = SemanticSegmentation(
num_classes=num_classes,
backbone="resnet50",
head="fpn",
)
B, C, H, W = img_shape
img = torch.rand(B, C, H, W)
out = model(img)
assert out.shape == (B, num_classes, H, W)
@pytest.mark.skipif(not _IMAGE_TESTING, reason="image libraries aren't installed.")
def test_init_train(tmpdir):
model = SemanticSegmentation(num_classes=10)
train_dl = torch.utils.data.DataLoader(DummyDataset())
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
trainer.finetune(model, train_dl, strategy="freeze_unfreeze")
@pytest.mark.skipif(not _IMAGE_TESTING, reason="image libraries aren't installed.")
def test_non_existent_backbone():
with pytest.raises(KeyError):
SemanticSegmentation(2, "i am never going to implement this lol")
@pytest.mark.skipif(not _IMAGE_TESTING, reason="image libraries aren't installed.")
def test_freeze():
model = SemanticSegmentation(2)
model.freeze()
for p in model.backbone.parameters():
assert p.requires_grad is False
@pytest.mark.skipif(not _IMAGE_TESTING, reason="image libraries aren't installed.")
def test_unfreeze():
model = SemanticSegmentation(2)
model.unfreeze()
for p in model.backbone.parameters():
assert p.requires_grad is True
@pytest.mark.skipif(not _IMAGE_TESTING, reason="image libraries aren't installed.")
def test_predict_tensor():
img = torch.rand(1, 3, 64, 64)
model = SemanticSegmentation(2, backbone="mobilenetv3_large_100")
data_pipe = DataPipeline(preprocess=SemanticSegmentationPreprocess(num_classes=1))
out = model.predict(img, data_source="tensors", data_pipeline=data_pipe)
assert isinstance(out[0], list)
assert len(out[0]) == 64
assert len(out[0][0]) == 64
@pytest.mark.skipif(not _IMAGE_TESTING, reason="image libraries aren't installed.")
def test_predict_numpy():
img = np.ones((1, 3, 64, 64))
model = SemanticSegmentation(2, backbone="mobilenetv3_large_100")
data_pipe = DataPipeline(preprocess=SemanticSegmentationPreprocess(num_classes=1))
out = model.predict(img, data_source="numpy", data_pipeline=data_pipe)
assert isinstance(out[0], list)
assert len(out[0]) == 64
assert len(out[0][0]) == 64
@pytest.mark.skipif(not _IMAGE_TESTING, reason="image libraries aren't installed.")
@pytest.mark.parametrize("jitter, args", [(torch.jit.trace, (torch.rand(1, 3, 32, 32),))])
def test_jit(tmpdir, jitter, args):
path = os.path.join(tmpdir, "test.pt")
model = SemanticSegmentation(2)
model.eval()
model = jitter(model, *args)
torch.jit.save(model, path)
model = torch.jit.load(path)
out = model(torch.rand(1, 3, 32, 32))
assert isinstance(out, torch.Tensor)
assert out.shape == torch.Size([1, 2, 32, 32])
@pytest.mark.skipif(not _SERVE_TESTING, reason="serve libraries aren't installed.")
@mock.patch("flash._IS_TESTING", True)
def test_serve():
model = SemanticSegmentation(2)
# TODO: Currently only servable once a preprocess has been attached
model._preprocess = SemanticSegmentationPreprocess()
model.eval()
model.serve()
@pytest.mark.skipif(_IMAGE_AVAILABLE, reason="image libraries are installed.")
def test_load_from_checkpoint_dependency_error():
with pytest.raises(ModuleNotFoundError, match=re.escape("'lightning-flash[image]'")):
SemanticSegmentation.load_from_checkpoint("not_a_real_checkpoint.pt")
@pytest.mark.skipif(not _IMAGE_TESTING, reason="image libraries aren't installed.")
def test_available_pretrained_weights():
assert SemanticSegmentation.available_pretrained_weights("resnet18") == ["imagenet", "ssl", "swsl"]
@pytest.mark.skipif(not _IMAGE_TESTING, reason="image libraries aren't installed.")
def test_cli():
cli_args = ["flash", "semantic-segmentation", "--trainer.fast_dev_run", "True"]
with mock.patch("sys.argv", cli_args):
try:
main()
except SystemExit:
pass
| 34.408046
| 103
| 0.719559
|
import os
import re
from typing import Tuple
from unittest import mock
import numpy as np
import pytest
import torch
from flash import Trainer
from flash.__main__ import main
from flash.core.data.data_pipeline import DataPipeline
from flash.core.data.data_source import DefaultDataKeys
from flash.core.utilities.imports import _IMAGE_AVAILABLE
from flash.image import SemanticSegmentation
from flash.image.segmentation.data import SemanticSegmentationPreprocess
from tests.helpers.utils import _IMAGE_TESTING, _SERVE_TESTING
class DummyDataset(torch.utils.data.Dataset):
size: Tuple[int, int] = (224, 224)
num_classes: int = 8
def __getitem__(self, index):
return {
DefaultDataKeys.INPUT: torch.rand(3, *self.size),
DefaultDataKeys.TARGET: torch.randint(self.num_classes - 1, self.size),
}
def __len__(self) -> int:
return 10
@pytest.mark.skipif(not _IMAGE_TESTING, reason="image libraries aren't installed.")
def test_smoke():
model = SemanticSegmentation(num_classes=1)
assert model is not None
@pytest.mark.skipif(not _IMAGE_TESTING, reason="image libraries aren't installed.")
@pytest.mark.parametrize("num_classes", [8, 256])
@pytest.mark.parametrize("img_shape", [(1, 3, 224, 192), (2, 3, 128, 256)])
def test_forward(num_classes, img_shape):
model = SemanticSegmentation(
num_classes=num_classes,
backbone="resnet50",
head="fpn",
)
B, C, H, W = img_shape
img = torch.rand(B, C, H, W)
out = model(img)
assert out.shape == (B, num_classes, H, W)
@pytest.mark.skipif(not _IMAGE_TESTING, reason="image libraries aren't installed.")
def test_init_train(tmpdir):
model = SemanticSegmentation(num_classes=10)
train_dl = torch.utils.data.DataLoader(DummyDataset())
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
trainer.finetune(model, train_dl, strategy="freeze_unfreeze")
@pytest.mark.skipif(not _IMAGE_TESTING, reason="image libraries aren't installed.")
def test_non_existent_backbone():
with pytest.raises(KeyError):
SemanticSegmentation(2, "i am never going to implement this lol")
@pytest.mark.skipif(not _IMAGE_TESTING, reason="image libraries aren't installed.")
def test_freeze():
model = SemanticSegmentation(2)
model.freeze()
for p in model.backbone.parameters():
assert p.requires_grad is False
@pytest.mark.skipif(not _IMAGE_TESTING, reason="image libraries aren't installed.")
def test_unfreeze():
model = SemanticSegmentation(2)
model.unfreeze()
for p in model.backbone.parameters():
assert p.requires_grad is True
@pytest.mark.skipif(not _IMAGE_TESTING, reason="image libraries aren't installed.")
def test_predict_tensor():
img = torch.rand(1, 3, 64, 64)
model = SemanticSegmentation(2, backbone="mobilenetv3_large_100")
data_pipe = DataPipeline(preprocess=SemanticSegmentationPreprocess(num_classes=1))
out = model.predict(img, data_source="tensors", data_pipeline=data_pipe)
assert isinstance(out[0], list)
assert len(out[0]) == 64
assert len(out[0][0]) == 64
@pytest.mark.skipif(not _IMAGE_TESTING, reason="image libraries aren't installed.")
def test_predict_numpy():
img = np.ones((1, 3, 64, 64))
model = SemanticSegmentation(2, backbone="mobilenetv3_large_100")
data_pipe = DataPipeline(preprocess=SemanticSegmentationPreprocess(num_classes=1))
out = model.predict(img, data_source="numpy", data_pipeline=data_pipe)
assert isinstance(out[0], list)
assert len(out[0]) == 64
assert len(out[0][0]) == 64
@pytest.mark.skipif(not _IMAGE_TESTING, reason="image libraries aren't installed.")
@pytest.mark.parametrize("jitter, args", [(torch.jit.trace, (torch.rand(1, 3, 32, 32),))])
def test_jit(tmpdir, jitter, args):
path = os.path.join(tmpdir, "test.pt")
model = SemanticSegmentation(2)
model.eval()
model = jitter(model, *args)
torch.jit.save(model, path)
model = torch.jit.load(path)
out = model(torch.rand(1, 3, 32, 32))
assert isinstance(out, torch.Tensor)
assert out.shape == torch.Size([1, 2, 32, 32])
@pytest.mark.skipif(not _SERVE_TESTING, reason="serve libraries aren't installed.")
@mock.patch("flash._IS_TESTING", True)
def test_serve():
model = SemanticSegmentation(2)
model._preprocess = SemanticSegmentationPreprocess()
model.eval()
model.serve()
@pytest.mark.skipif(_IMAGE_AVAILABLE, reason="image libraries are installed.")
def test_load_from_checkpoint_dependency_error():
with pytest.raises(ModuleNotFoundError, match=re.escape("'lightning-flash[image]'")):
SemanticSegmentation.load_from_checkpoint("not_a_real_checkpoint.pt")
@pytest.mark.skipif(not _IMAGE_TESTING, reason="image libraries aren't installed.")
def test_available_pretrained_weights():
assert SemanticSegmentation.available_pretrained_weights("resnet18") == ["imagenet", "ssl", "swsl"]
@pytest.mark.skipif(not _IMAGE_TESTING, reason="image libraries aren't installed.")
def test_cli():
cli_args = ["flash", "semantic-segmentation", "--trainer.fast_dev_run", "True"]
with mock.patch("sys.argv", cli_args):
try:
main()
except SystemExit:
pass
| true
| true
|
79058bef7a99d4d4210fb03d8456134c3422b2ee
| 29,968
|
py
|
Python
|
mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py
|
marcovalenti/mmdetection
|
215ea4174c1234ac4c3e23bf29020fc1cefc36ad
|
[
"Apache-2.0"
] | 1
|
2021-09-30T11:30:40.000Z
|
2021-09-30T11:30:40.000Z
|
mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py
|
marcovalenti/mmdetection
|
215ea4174c1234ac4c3e23bf29020fc1cefc36ad
|
[
"Apache-2.0"
] | null | null | null |
mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py
|
marcovalenti/mmdetection
|
215ea4174c1234ac4c3e23bf29020fc1cefc36ad
|
[
"Apache-2.0"
] | null | null | null |
import torch.nn as nn
import torch.nn.functional as F
import torch
from mmcv.cnn import ConvModule
from mmcv.runner import force_fp32
from mmdet.models.builder import HEADS, build_loss
from mmdet.models.losses import accuracy
from .bbox_head import BBoxHead
from mmdet.core import multi_apply, multiclass_nms
from mmdet.core.bbox.iou_calculators.builder import build_iou_calculator
@HEADS.register_module()
class ConvFCBBoxHead(BBoxHead):
r"""More general bbox head, with shared conv and fc layers and two optional
separated branches.
.. code-block:: none
/-> cls convs -> cls fcs -> cls
shared convs -> shared fcs
\-> reg convs -> reg fcs -> reg
(\-> dis convs -> dis fcs -> dis)
""" # noqa: W605
def __init__(self,
num_shared_convs=0,
num_shared_fcs=0,
num_cls_convs=0,
num_cls_fcs=0,
num_reg_convs=0,
num_reg_fcs=0,
conv_out_channels=256,
fc_out_channels=1024,
conv_cfg=None,
norm_cfg=None,
with_dis=False, #for leaves
num_dis_convs=0,
num_dis_fcs=0,
*args,
**kwargs):
super(ConvFCBBoxHead, self).__init__(*args, **kwargs)
#only for leaves
self.with_dis = with_dis
self.num_dis_convs = num_dis_convs
self.num_dis_fcs = num_dis_fcs
assert (num_shared_convs + num_shared_fcs + num_cls_convs +
num_cls_fcs + num_reg_convs + num_reg_fcs > 0)
if num_cls_convs > 0 or num_reg_convs > 0:
assert num_shared_fcs == 0
if not self.with_cls:
assert num_cls_convs == 0 and num_cls_fcs == 0
if not self.with_reg:
assert num_reg_convs == 0 and num_reg_fcs == 0
if not self.with_dis:
assert num_dis_convs == 0 and num_dis_fcs == 0
self.num_shared_convs = num_shared_convs
self.num_shared_fcs = num_shared_fcs
self.num_cls_convs = num_cls_convs
self.num_cls_fcs = num_cls_fcs
self.num_reg_convs = num_reg_convs
self.num_reg_fcs = num_reg_fcs
self.conv_out_channels = conv_out_channels
self.fc_out_channels = fc_out_channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
# add shared convs and fcs
self.shared_convs, self.shared_fcs, last_layer_dim = \
self._add_conv_fc_branch(
self.num_shared_convs, self.num_shared_fcs, self.in_channels,
True)
self.shared_out_channels = last_layer_dim
# add cls specific branch
self.cls_convs, self.cls_fcs, self.cls_last_dim = \
self._add_conv_fc_branch(
self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels)
# add reg specific branch
self.reg_convs, self.reg_fcs, self.reg_last_dim = \
self._add_conv_fc_branch(
self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels)
#add dis branch(only for leaves)
if self.with_dis:
self.dis_convs, self.dis_fcs, self.dis_last_dim = \
self._add_conv_fc_branch(
self.num_dis_convs, self.num_dis_fcs, self.shared_out_channels)
if self.num_shared_fcs == 0 and not self.with_avg_pool:
if self.num_cls_fcs == 0:
self.cls_last_dim *= self.roi_feat_area
if self.num_reg_fcs == 0:
self.reg_last_dim *= self.roi_feat_area
self.relu = nn.ReLU(inplace=True)
# reconstruct fc_cls and fc_reg since input channels are changed
if self.with_cls:
self.fc_cls = nn.Linear(self.cls_last_dim, self.num_classes + 1)
if self.with_reg:
out_dim_reg = (4 if self.reg_class_agnostic else 4 *
self.num_classes)
self.fc_reg = nn.Linear(self.reg_last_dim, out_dim_reg)
if self.with_dis:
if self.dis_selector == 0 or self.dis_selector == 1:
self.fc_dis = nn.Linear(self.cls_last_dim, 1)
elif self.dis_selector == 2:
self.fc_dis = nn.Linear(self.cls_last_dim, 4)
def _add_conv_fc_branch(self,
num_branch_convs,
num_branch_fcs,
in_channels,
is_shared=False):
"""Add shared or separable branch.
convs -> avg pool (optional) -> fcs
"""
last_layer_dim = in_channels
# add branch specific conv layers
branch_convs = nn.ModuleList()
if num_branch_convs > 0:
for i in range(num_branch_convs):
conv_in_channels = (
last_layer_dim if i == 0 else self.conv_out_channels)
branch_convs.append(
ConvModule(
conv_in_channels,
self.conv_out_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
last_layer_dim = self.conv_out_channels
# add branch specific fc layers
branch_fcs = nn.ModuleList()
if num_branch_fcs > 0:
# for shared branch, only consider self.with_avg_pool
# for separated branches, also consider self.num_shared_fcs
if (is_shared
or self.num_shared_fcs == 0) and not self.with_avg_pool:
last_layer_dim *= self.roi_feat_area
for i in range(num_branch_fcs):
fc_in_channels = (
last_layer_dim if i == 0 else self.fc_out_channels)
branch_fcs.append(
nn.Linear(fc_in_channels, self.fc_out_channels))
last_layer_dim = self.fc_out_channels
return branch_convs, branch_fcs, last_layer_dim
def init_weights(self):
super(ConvFCBBoxHead, self).init_weights()
# conv layers are already initialized by ConvModule
if self.with_dis:
for module_list in [self.shared_fcs, self.cls_fcs, self.reg_fcs, self.dis_fcs]:
for m in module_list.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
nn.init.constant_(m.bias, 0)
else:
for module_list in [self.shared_fcs, self.cls_fcs, self.reg_fcs]:
for m in module_list.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
nn.init.constant_(m.bias, 0)
def forward(self, x):
# shared part
if self.num_shared_convs > 0:
for conv in self.shared_convs:
x = conv(x)
if self.num_shared_fcs > 0:
if self.with_avg_pool:
x = self.avg_pool(x)
x = x.flatten(1)
for fc in self.shared_fcs:
x = self.relu(fc(x))
# separate branches
x_cls = x
x_reg = x
if self.with_dis:
x_dis = x
for conv in self.dis_convs:
x_dis = conv(x_dis)
if x_dis.dim() > 2:
if self.with_avg_pool:
x_dis = self.avg_pool(x_dis)
x_dis = x_dis.flatten(1)
for fc in self.dis_fcs:
x_dis = self.relu(fc(x_dis))
for conv in self.cls_convs:
x_cls = conv(x_cls)
if x_cls.dim() > 2:
if self.with_avg_pool:
x_cls = self.avg_pool(x_cls)
x_cls = x_cls.flatten(1)
for fc in self.cls_fcs:
x_cls = self.relu(fc(x_cls))
for conv in self.reg_convs:
x_reg = conv(x_reg)
if x_reg.dim() > 2:
if self.with_avg_pool:
x_reg = self.avg_pool(x_reg)
x_reg = x_reg.flatten(1)
for fc in self.reg_fcs:
x_reg = self.relu(fc(x_reg))
cls_score = self.fc_cls(x_cls) if self.with_cls else None
bbox_pred = self.fc_reg(x_reg) if self.with_reg else None
dis_pred = self.fc_dis(x_dis) if self.with_dis else None
return cls_score, bbox_pred, dis_pred
@HEADS.register_module()
class Shared2FCBBoxHead(ConvFCBBoxHead):
def __init__(self, fc_out_channels=1024, *args, **kwargs):
super(Shared2FCBBoxHead, self).__init__(
num_shared_convs=0,
num_shared_fcs=2,
num_cls_convs=0,
num_cls_fcs=0,
num_reg_convs=0,
num_reg_fcs=0,
fc_out_channels=fc_out_channels,
*args,
**kwargs)
@HEADS.register_module()
class Shared2FCBBoxHeadLeaves(ConvFCBBoxHead):
def __init__(self, fc_out_channels=1024, *args, **kwargs):
loss_dis = kwargs['loss_dis']
self.reference_labels = kwargs['reference_labels']
self.classes = kwargs['classes']
self.dis_selector = kwargs['dis_selector']
assert self.dis_selector in (0, 1, 2)
kwargs.pop('loss_dis')
kwargs.pop('reference_labels')
kwargs.pop('classes')
kwargs.pop('dis_selector')
super(Shared2FCBBoxHeadLeaves, self).__init__(
num_shared_convs=0,
num_shared_fcs=2,
num_cls_convs=0,
num_cls_fcs=0,
num_reg_convs=0,
num_reg_fcs=0,
fc_out_channels=fc_out_channels,
with_dis=True, #only for leaves
num_dis_convs=0,
num_dis_fcs=0,
*args,
**kwargs)
if self.dis_selector == 0 or self.dis_selector == 1:
assert loss_dis['use_sigmoid'], "used invalid loss_dis"
elif self.dis_selector == 2:
assert not loss_dis['use_sigmoid'], "used invalid loss_dis"
self.loss_dis = build_loss(loss_dis)
#DEBUG
#loss_dis_py =dict(type='py_FocalLoss',
# alpha=torch.tensor(self.dis_weights, device=torch.device('cpu')),
# gamma = 2.0,
# reduction = 'mean')
#self.loss_dis_py = build_loss(loss_dis_py)
#Override
def get_targets(self,
sampling_results,
gt_bboxes,
gt_labels,
rcnn_train_cfg,
reference_labels,
classes,
concat=True):
"""Calculate the ground truth for all samples in a batch according to
the sampling_results.
Almost the same as the implementation in bbox_head, we passed
additional parameters pos_inds_list and neg_inds_list to
`_get_target_single` function.
Args:
sampling_results (List[obj:SamplingResults]): Assign results of
all images in a batch after sampling.
gt_bboxes (list[Tensor]): Gt_bboxes of all images in a batch,
each tensor has shape (num_gt, 4), the last dimension 4
represents [tl_x, tl_y, br_x, br_y].
gt_labels (list[Tensor]): Gt_labels of all images in a batch,
each tensor has shape (num_gt,).
rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN.
concat (bool): Whether to concatenate the results of all
the images in a single batch.
Returns:
Tuple[Tensor]: Ground truth for proposals in a single image.
Containing the following list of Tensors:
- labels (list[Tensor],Tensor): Gt_labels for all
proposals in a batch, each tensor in list has
shape (num_proposals,) when `concat=False`, otherwise
just a single tensor has shape (num_all_proposals,).
- label_weights (list[Tensor]): Labels_weights for
all proposals in a batch, each tensor in list has
shape (num_proposals,) when `concat=False`, otherwise
just a single tensor has shape (num_all_proposals,).
- bbox_targets (list[Tensor],Tensor): Regression target
for all proposals in a batch, each tensor in list
has shape (num_proposals, 4) when `concat=False`,
otherwise just a single tensor has shape
(num_all_proposals, 4), the last dimension 4 represents
[tl_x, tl_y, br_x, br_y].
- bbox_weights (list[tensor],Tensor): Regression weights for
all proposals in a batch, each tensor in list has shape
(num_proposals, 4) when `concat=False`, otherwise just a
single tensor has shape (num_all_proposals, 4).
- dis_targets (list[tensor], Tensor): Gt_dis for all
proposal in a batch, each tensor in list has
shape (num_proposal,) when 'concat=False`, otherwise
just a single tensor has shape (num_all_proposals,).
"""
pos_bboxes_list = [res.pos_bboxes for res in sampling_results]
neg_bboxes_list = [res.neg_bboxes for res in sampling_results]
pos_gt_bboxes_list = [res.pos_gt_bboxes for res in sampling_results]
pos_gt_labels_list = [res.pos_gt_labels for res in sampling_results]
labels, label_weights, bbox_targets, bbox_weights = multi_apply(
self._get_target_single,
pos_bboxes_list,
neg_bboxes_list,
pos_gt_bboxes_list,
pos_gt_labels_list,
cfg=rcnn_train_cfg)
#processing for dis_target
iou_calculator=dict(type='BboxOverlaps2D')
iou_calculator = build_iou_calculator(iou_calculator)
isolation_thr = 0.45 #TODO da mettere come arg
#retrive the gt_superclass bboxes
dis_targets = []
for i, res in enumerate(sampling_results):
ref_grap_list =[]
ref_leav_list =[]
ref_grap_dis_list =[]
ref_leav_dis_list =[]
for j, bbox in enumerate(gt_bboxes[i]):
if self.dis_selector == 0:
if 'grappolo' in classes[gt_labels[i][j]] and gt_labels[i][j] != reference_labels['grappolo_vite']:
ref_grap_dis_list.append(bbox)
elif (('foglia' in classes[gt_labels[i][j]] or classes[gt_labels[i][j]] == 'malattia_esca'\
or classes[gt_labels[i][j]] == 'virosi_pinot_grigio')
and gt_labels[i][j] != reference_labels['foglia_vite']):
ref_leav_dis_list.append(bbox)
elif self.dis_selector == 1:
if gt_labels[i][j] == reference_labels['grappolo_vite']:
ref_grap_list.append(bbox)
elif gt_labels[i][j] == reference_labels['foglia_vite']:
ref_leav_list.append(bbox)
elif self.dis_selector == 2:
if gt_labels[i][j] == reference_labels['grappolo_vite']:
ref_grap_list.append(bbox)
elif gt_labels[i][j] == reference_labels['foglia_vite']:
ref_leav_list.append(bbox)
elif 'grappolo' in classes[gt_labels[i][j]]:
ref_grap_dis_list.append(bbox)
elif 'foglia' in classes[gt_labels[i][j]] or classes[gt_labels[i][j]] == 'malattia_esca'\
or classes[gt_labels[i][j]] == 'virosi_pinot_grigio':
ref_leav_dis_list.append(bbox)
'''
if 'grappolo' in classes[gt_labels[i][j]] and gt_labels[i][j] != reference_labels['grappolo_vite']:
ref_grap_dis_list.append(bbox)
elif (('foglia' in classes[gt_labels[i][j]] or classes[gt_labels[i][j]] == 'malattia_esca'\
or classes[gt_labels[i][j]] == 'virosi_pinot_grigio')
and gt_labels[i][j] != reference_labels['foglia_vite']):
ref_leav_dis_list.append(bbox)
'''
if len(ref_grap_list) > 0:
ref_grap_tensor = torch.cat(ref_grap_list)
ref_grap_tensor = torch.reshape(ref_grap_tensor, (len(ref_grap_list), 4))
if len(ref_leav_list) > 0:
ref_leav_tensor = torch.cat(ref_leav_list)
ref_leav_tensor = torch.reshape(ref_leav_tensor, (len(ref_leav_list), 4))
if len(ref_grap_dis_list) > 0:
ref_grap_dis_tensor = torch.cat(ref_grap_dis_list)
ref_grap_dis_tensor = torch.reshape(ref_grap_dis_tensor, (len(ref_grap_dis_list), 4))
if len(ref_leav_dis_list) > 0:
ref_leav_dis_tensor = torch.cat(ref_leav_dis_list)
ref_leav_dis_tensor = torch.reshape(ref_leav_dis_tensor, (len(ref_leav_dis_list), 4))
num_pos = res.pos_bboxes.size(0)
num_neg = res.neg_bboxes.size(0)
num_samples = num_pos + num_neg
dis_tensor= res.pos_bboxes.new_full((num_samples, ), -1, dtype=torch.long)
dis_list = []
for j, bbox in enumerate(res.pos_bboxes):
#trick for using the iof calculator
bbox = bbox.unsqueeze(0)
if res.pos_gt_labels[j] == reference_labels['grappolo_vite']:
if self.dis_selector == 0:
dis_list.append(-1) #the grape is not considered
elif self.dis_selector == 1 or self.dis_selector == 2:
if len(ref_grap_dis_list) > 0:
overlaps = iou_calculator(ref_grap_dis_tensor, bbox, mode='iof')
overlaps = overlaps < isolation_thr
if overlaps.all():
dis_list.append(0) #the grape is healthy
else:
dis_list.append(1) #the grape is affected by a disease
else:
dis_list.append(0) #the grape is healthy
elif res.pos_gt_labels[j] == reference_labels['foglia_vite']:
if self.dis_selector == 0:
dis_list.append(-1) #the leaf is not considered
elif self.dis_selector == 1 or self.dis_selector == 2:
if len(ref_leav_dis_list) > 0:
overlaps = iou_calculator(ref_leav_dis_tensor, bbox, mode='iof')
overlaps = overlaps < isolation_thr
if overlaps.all():
dis_list.append(0) #the leaf is healthy
else:
dis_list.append(1) #the leaf is affected by a disease
else:
dis_list.append(0) #the leaf is healthy
elif 'grappolo' in classes[res.pos_gt_labels[j]] and res.pos_gt_labels[j] != reference_labels['grappolo_vite']:
if self.dis_selector == 1:
dis_list.append(-1) #the disease is not considered
elif self.dis_selector == 0:
if len(ref_grap_list) > 0:
overlaps = iou_calculator(bbox, ref_grap_tensor, mode='iof')
overlaps = overlaps < isolation_thr
if overlaps.all():
dis_list.append(0) #the disease is isolated
else:
dis_list.append(1) #the disease is inside a leaf or grape
else:
dis_list.append(0) #the disease is isolated
elif self.dis_selector == 2:
if len(ref_grap_list) > 0:
overlaps = iou_calculator(bbox, ref_grap_tensor, mode='iof')
overlaps = overlaps < isolation_thr
if overlaps.all():
dis_list.append(2) #the disease is isolated
else:
dis_list.append(3) #the disease is inside a leaf or grape
else:
dis_list.append(2) #the disease is isolated
elif (('foglia' in classes[res.pos_gt_labels[j]] or classes[res.pos_gt_labels[j]] == 'malattia_esca'
or classes[res.pos_gt_labels[j]] == 'virosi_pinot_grigio')
and res.pos_gt_labels[j] != reference_labels['foglia_vite']):
if self.dis_selector == 1:
dis_list.append(-1) #the disease is not considered
elif self.dis_selector == 0:
if len(ref_leav_list) > 0:
overlaps = iou_calculator(bbox, ref_leav_tensor, mode='iof')
overlaps = overlaps < isolation_thr
if overlaps.all():
dis_list.append(0) #the disease is isolated
else:
dis_list.append(1) #the disease is inside a leaf or grape
else:
dis_list.append(0) #the disease is isolated
elif self.dis_selector == 2:
if len(ref_leav_list) > 0:
overlaps = iou_calculator(bbox, ref_leav_tensor, mode='iof')
overlaps = overlaps < isolation_thr
if overlaps.all():
dis_list.append(2) #the disease is isolated
else:
dis_list.append(3) #the disease is inside a leaf or grape
else:
dis_list.append(2) #the disease is isolated
#elif res.pos_gt_labels[j] == reference_labels['oidio_tralci']:
# dis_list.append(-1) #the disease is not considered
dis_tensor[:num_pos] = torch.tensor(dis_list)
dis_targets.append(dis_tensor)
if concat:
labels = torch.cat(labels, 0)
label_weights = torch.cat(label_weights, 0)
bbox_targets = torch.cat(bbox_targets, 0)
bbox_weights = torch.cat(bbox_weights, 0)
dis_targets = torch.cat(dis_targets, 0)
#del dis_tensor
#torch.cuda.empty_cache()
return labels, label_weights, bbox_targets, bbox_weights, dis_targets
#Override
@force_fp32(apply_to=('cls_score', 'bbox_pred', 'dis_pred'))
def loss(self,
cls_score,
bbox_pred,
dis_pred,
rois,
labels,
label_weights,
bbox_targets,
bbox_weights,
dis_targets,
reduction_override=None):
losses = dict()
if cls_score is not None:
avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.)
if cls_score.numel() > 0:
losses['loss_cls'] = self.loss_cls(
cls_score,
labels,
label_weights,
avg_factor=avg_factor,
reduction_override=reduction_override)
losses['acc'] = accuracy(cls_score, labels)
if bbox_pred is not None:
bg_class_ind = self.num_classes
# 0~self.num_classes-1 are FG, self.num_classes is BG
pos_inds = (labels >= 0) & (labels < bg_class_ind)
# do not perform bounding box regression for BG anymore.
if pos_inds.any():
if self.reg_decoded_bbox:
# When the regression loss (e.g. `IouLoss`,
# `GIouLoss`, `DIouLoss`) is applied directly on
# the decoded bounding boxes, it decodes the
# already encoded coordinates to absolute format.
bbox_pred = self.bbox_coder.decode(rois[:, 1:], bbox_pred)
if self.reg_class_agnostic:
pos_bbox_pred = bbox_pred.view(
bbox_pred.size(0), 4)[pos_inds.type(torch.bool)]
else:
pos_bbox_pred = bbox_pred.view(
bbox_pred.size(0), -1,
4)[pos_inds.type(torch.bool),
labels[pos_inds.type(torch.bool)]]
losses['loss_bbox'] = self.loss_bbox(
pos_bbox_pred,
bbox_targets[pos_inds.type(torch.bool)],
bbox_weights[pos_inds.type(torch.bool)],
avg_factor=bbox_targets.size(0),
reduction_override=reduction_override)
else:
losses['loss_bbox'] = bbox_pred[pos_inds].sum()
if dis_pred is not None:
pos_inds = dis_targets != -1
if pos_inds.any():
pos_dis_pred = dis_pred[pos_inds.type(torch.bool)]
pos_dis_targets = dis_targets[pos_inds.type(torch.bool)]
avg_factor = dis_pred.size(0)
losses['loss_dis'] = self.loss_dis(
pos_dis_pred,
pos_dis_targets,
avg_factor=avg_factor,
reduction_override=reduction_override)
#DEBUG
#loss_py = self.loss_dis_py(pos_dis_pred,
# pos_dis_targets)
#from mmcv.utils import print_log
#import logging
#logger = logging.getLogger(__name__)
#print_log("loss_dis:{:0.4f}, loss_dis_py:{:0.4f}".format(losses['loss_dis'], loss_py), logger = logger)
return losses
#Override
@force_fp32(apply_to=('cls_score', 'bbox_pred', 'dis_pred'))
def get_bboxes(self,
rois,
cls_score,
bbox_pred,
dis_pred,
img_shape,
scale_factor,
rescale=False,
cfg=None):
if isinstance(cls_score, list):
cls_score = sum(cls_score) / float(len(cls_score))
scores = F.softmax(cls_score, dim=1) if cls_score is not None else None
if bbox_pred is not None:
bboxes = self.bbox_coder.decode(
rois[:, 1:], bbox_pred, max_shape=img_shape)
else:
bboxes = rois[:, 1:].clone()
if img_shape is not None:
bboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1])
bboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0])
if rescale and bboxes.size(0) > 0:
if isinstance(scale_factor, float):
bboxes /= scale_factor
else:
scale_factor = bboxes.new_tensor(scale_factor)
bboxes = (bboxes.view(bboxes.size(0), -1, 4) /
scale_factor).view(bboxes.size()[0], -1)
if dis_pred is not None:
if self.dis_selector == 0 or self.dis_selector == 1:
diseases = F.sigmoid(dis_pred)
elif self.dis_selector == 2:
diseases = F.softmax(dis_pred, dim=1)
if cfg is None:
return bboxes, scores, diseases
else:
det_bboxes, det_labels, inds = multiclass_nms(bboxes, scores,
cfg.score_thr, cfg.nms,
cfg.max_per_img,
return_inds=True)
if self.dis_selector == 0 or self.dis_selector == 1:
diseases = diseases.expand(bboxes.size(0), scores.size(1) - 1)
diseases = diseases.reshape(-1)
elif self.dis_selector == 2:
diseases = diseases[:, None].expand(bboxes.size(0), scores.size(1) - 1, 4)
diseases = diseases.reshape(-1, 4)
det_dis = diseases[inds]
return det_bboxes, det_labels, det_dis
@HEADS.register_module()
class Shared4Conv1FCBBoxHead(ConvFCBBoxHead):
def __init__(self, fc_out_channels=1024, *args, **kwargs):
super(Shared4Conv1FCBBoxHead, self).__init__(
num_shared_convs=4,
num_shared_fcs=1,
num_cls_convs=0,
num_cls_fcs=0,
num_reg_convs=0,
num_reg_fcs=0,
fc_out_channels=fc_out_channels,
*args,
**kwargs)
| 45.200603
| 154
| 0.521523
|
import torch.nn as nn
import torch.nn.functional as F
import torch
from mmcv.cnn import ConvModule
from mmcv.runner import force_fp32
from mmdet.models.builder import HEADS, build_loss
from mmdet.models.losses import accuracy
from .bbox_head import BBoxHead
from mmdet.core import multi_apply, multiclass_nms
from mmdet.core.bbox.iou_calculators.builder import build_iou_calculator
@HEADS.register_module()
class ConvFCBBoxHead(BBoxHead):
def __init__(self,
num_shared_convs=0,
num_shared_fcs=0,
num_cls_convs=0,
num_cls_fcs=0,
num_reg_convs=0,
num_reg_fcs=0,
conv_out_channels=256,
fc_out_channels=1024,
conv_cfg=None,
norm_cfg=None,
with_dis=False,
num_dis_convs=0,
num_dis_fcs=0,
*args,
**kwargs):
super(ConvFCBBoxHead, self).__init__(*args, **kwargs)
self.with_dis = with_dis
self.num_dis_convs = num_dis_convs
self.num_dis_fcs = num_dis_fcs
assert (num_shared_convs + num_shared_fcs + num_cls_convs +
num_cls_fcs + num_reg_convs + num_reg_fcs > 0)
if num_cls_convs > 0 or num_reg_convs > 0:
assert num_shared_fcs == 0
if not self.with_cls:
assert num_cls_convs == 0 and num_cls_fcs == 0
if not self.with_reg:
assert num_reg_convs == 0 and num_reg_fcs == 0
if not self.with_dis:
assert num_dis_convs == 0 and num_dis_fcs == 0
self.num_shared_convs = num_shared_convs
self.num_shared_fcs = num_shared_fcs
self.num_cls_convs = num_cls_convs
self.num_cls_fcs = num_cls_fcs
self.num_reg_convs = num_reg_convs
self.num_reg_fcs = num_reg_fcs
self.conv_out_channels = conv_out_channels
self.fc_out_channels = fc_out_channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.shared_convs, self.shared_fcs, last_layer_dim = \
self._add_conv_fc_branch(
self.num_shared_convs, self.num_shared_fcs, self.in_channels,
True)
self.shared_out_channels = last_layer_dim
self.cls_convs, self.cls_fcs, self.cls_last_dim = \
self._add_conv_fc_branch(
self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels)
self.reg_convs, self.reg_fcs, self.reg_last_dim = \
self._add_conv_fc_branch(
self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels)
if self.with_dis:
self.dis_convs, self.dis_fcs, self.dis_last_dim = \
self._add_conv_fc_branch(
self.num_dis_convs, self.num_dis_fcs, self.shared_out_channels)
if self.num_shared_fcs == 0 and not self.with_avg_pool:
if self.num_cls_fcs == 0:
self.cls_last_dim *= self.roi_feat_area
if self.num_reg_fcs == 0:
self.reg_last_dim *= self.roi_feat_area
self.relu = nn.ReLU(inplace=True)
if self.with_cls:
self.fc_cls = nn.Linear(self.cls_last_dim, self.num_classes + 1)
if self.with_reg:
out_dim_reg = (4 if self.reg_class_agnostic else 4 *
self.num_classes)
self.fc_reg = nn.Linear(self.reg_last_dim, out_dim_reg)
if self.with_dis:
if self.dis_selector == 0 or self.dis_selector == 1:
self.fc_dis = nn.Linear(self.cls_last_dim, 1)
elif self.dis_selector == 2:
self.fc_dis = nn.Linear(self.cls_last_dim, 4)
def _add_conv_fc_branch(self,
num_branch_convs,
num_branch_fcs,
in_channels,
is_shared=False):
last_layer_dim = in_channels
branch_convs = nn.ModuleList()
if num_branch_convs > 0:
for i in range(num_branch_convs):
conv_in_channels = (
last_layer_dim if i == 0 else self.conv_out_channels)
branch_convs.append(
ConvModule(
conv_in_channels,
self.conv_out_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
last_layer_dim = self.conv_out_channels
branch_fcs = nn.ModuleList()
if num_branch_fcs > 0:
if (is_shared
or self.num_shared_fcs == 0) and not self.with_avg_pool:
last_layer_dim *= self.roi_feat_area
for i in range(num_branch_fcs):
fc_in_channels = (
last_layer_dim if i == 0 else self.fc_out_channels)
branch_fcs.append(
nn.Linear(fc_in_channels, self.fc_out_channels))
last_layer_dim = self.fc_out_channels
return branch_convs, branch_fcs, last_layer_dim
def init_weights(self):
super(ConvFCBBoxHead, self).init_weights()
if self.with_dis:
for module_list in [self.shared_fcs, self.cls_fcs, self.reg_fcs, self.dis_fcs]:
for m in module_list.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
nn.init.constant_(m.bias, 0)
else:
for module_list in [self.shared_fcs, self.cls_fcs, self.reg_fcs]:
for m in module_list.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
nn.init.constant_(m.bias, 0)
def forward(self, x):
if self.num_shared_convs > 0:
for conv in self.shared_convs:
x = conv(x)
if self.num_shared_fcs > 0:
if self.with_avg_pool:
x = self.avg_pool(x)
x = x.flatten(1)
for fc in self.shared_fcs:
x = self.relu(fc(x))
x_cls = x
x_reg = x
if self.with_dis:
x_dis = x
for conv in self.dis_convs:
x_dis = conv(x_dis)
if x_dis.dim() > 2:
if self.with_avg_pool:
x_dis = self.avg_pool(x_dis)
x_dis = x_dis.flatten(1)
for fc in self.dis_fcs:
x_dis = self.relu(fc(x_dis))
for conv in self.cls_convs:
x_cls = conv(x_cls)
if x_cls.dim() > 2:
if self.with_avg_pool:
x_cls = self.avg_pool(x_cls)
x_cls = x_cls.flatten(1)
for fc in self.cls_fcs:
x_cls = self.relu(fc(x_cls))
for conv in self.reg_convs:
x_reg = conv(x_reg)
if x_reg.dim() > 2:
if self.with_avg_pool:
x_reg = self.avg_pool(x_reg)
x_reg = x_reg.flatten(1)
for fc in self.reg_fcs:
x_reg = self.relu(fc(x_reg))
cls_score = self.fc_cls(x_cls) if self.with_cls else None
bbox_pred = self.fc_reg(x_reg) if self.with_reg else None
dis_pred = self.fc_dis(x_dis) if self.with_dis else None
return cls_score, bbox_pred, dis_pred
@HEADS.register_module()
class Shared2FCBBoxHead(ConvFCBBoxHead):
def __init__(self, fc_out_channels=1024, *args, **kwargs):
super(Shared2FCBBoxHead, self).__init__(
num_shared_convs=0,
num_shared_fcs=2,
num_cls_convs=0,
num_cls_fcs=0,
num_reg_convs=0,
num_reg_fcs=0,
fc_out_channels=fc_out_channels,
*args,
**kwargs)
@HEADS.register_module()
class Shared2FCBBoxHeadLeaves(ConvFCBBoxHead):
def __init__(self, fc_out_channels=1024, *args, **kwargs):
loss_dis = kwargs['loss_dis']
self.reference_labels = kwargs['reference_labels']
self.classes = kwargs['classes']
self.dis_selector = kwargs['dis_selector']
assert self.dis_selector in (0, 1, 2)
kwargs.pop('loss_dis')
kwargs.pop('reference_labels')
kwargs.pop('classes')
kwargs.pop('dis_selector')
super(Shared2FCBBoxHeadLeaves, self).__init__(
num_shared_convs=0,
num_shared_fcs=2,
num_cls_convs=0,
num_cls_fcs=0,
num_reg_convs=0,
num_reg_fcs=0,
fc_out_channels=fc_out_channels,
with_dis=True,
num_dis_convs=0,
num_dis_fcs=0,
*args,
**kwargs)
if self.dis_selector == 0 or self.dis_selector == 1:
assert loss_dis['use_sigmoid'], "used invalid loss_dis"
elif self.dis_selector == 2:
assert not loss_dis['use_sigmoid'], "used invalid loss_dis"
self.loss_dis = build_loss(loss_dis)
def get_targets(self,
sampling_results,
gt_bboxes,
gt_labels,
rcnn_train_cfg,
reference_labels,
classes,
concat=True):
pos_bboxes_list = [res.pos_bboxes for res in sampling_results]
neg_bboxes_list = [res.neg_bboxes for res in sampling_results]
pos_gt_bboxes_list = [res.pos_gt_bboxes for res in sampling_results]
pos_gt_labels_list = [res.pos_gt_labels for res in sampling_results]
labels, label_weights, bbox_targets, bbox_weights = multi_apply(
self._get_target_single,
pos_bboxes_list,
neg_bboxes_list,
pos_gt_bboxes_list,
pos_gt_labels_list,
cfg=rcnn_train_cfg)
iou_calculator=dict(type='BboxOverlaps2D')
iou_calculator = build_iou_calculator(iou_calculator)
isolation_thr = 0.45
dis_targets = []
for i, res in enumerate(sampling_results):
ref_grap_list =[]
ref_leav_list =[]
ref_grap_dis_list =[]
ref_leav_dis_list =[]
for j, bbox in enumerate(gt_bboxes[i]):
if self.dis_selector == 0:
if 'grappolo' in classes[gt_labels[i][j]] and gt_labels[i][j] != reference_labels['grappolo_vite']:
ref_grap_dis_list.append(bbox)
elif (('foglia' in classes[gt_labels[i][j]] or classes[gt_labels[i][j]] == 'malattia_esca'\
or classes[gt_labels[i][j]] == 'virosi_pinot_grigio')
and gt_labels[i][j] != reference_labels['foglia_vite']):
ref_leav_dis_list.append(bbox)
elif self.dis_selector == 1:
if gt_labels[i][j] == reference_labels['grappolo_vite']:
ref_grap_list.append(bbox)
elif gt_labels[i][j] == reference_labels['foglia_vite']:
ref_leav_list.append(bbox)
elif self.dis_selector == 2:
if gt_labels[i][j] == reference_labels['grappolo_vite']:
ref_grap_list.append(bbox)
elif gt_labels[i][j] == reference_labels['foglia_vite']:
ref_leav_list.append(bbox)
elif 'grappolo' in classes[gt_labels[i][j]]:
ref_grap_dis_list.append(bbox)
elif 'foglia' in classes[gt_labels[i][j]] or classes[gt_labels[i][j]] == 'malattia_esca'\
or classes[gt_labels[i][j]] == 'virosi_pinot_grigio':
ref_leav_dis_list.append(bbox)
if len(ref_grap_list) > 0:
ref_grap_tensor = torch.cat(ref_grap_list)
ref_grap_tensor = torch.reshape(ref_grap_tensor, (len(ref_grap_list), 4))
if len(ref_leav_list) > 0:
ref_leav_tensor = torch.cat(ref_leav_list)
ref_leav_tensor = torch.reshape(ref_leav_tensor, (len(ref_leav_list), 4))
if len(ref_grap_dis_list) > 0:
ref_grap_dis_tensor = torch.cat(ref_grap_dis_list)
ref_grap_dis_tensor = torch.reshape(ref_grap_dis_tensor, (len(ref_grap_dis_list), 4))
if len(ref_leav_dis_list) > 0:
ref_leav_dis_tensor = torch.cat(ref_leav_dis_list)
ref_leav_dis_tensor = torch.reshape(ref_leav_dis_tensor, (len(ref_leav_dis_list), 4))
num_pos = res.pos_bboxes.size(0)
num_neg = res.neg_bboxes.size(0)
num_samples = num_pos + num_neg
dis_tensor= res.pos_bboxes.new_full((num_samples, ), -1, dtype=torch.long)
dis_list = []
for j, bbox in enumerate(res.pos_bboxes):
bbox = bbox.unsqueeze(0)
if res.pos_gt_labels[j] == reference_labels['grappolo_vite']:
if self.dis_selector == 0:
dis_list.append(-1)
elif self.dis_selector == 1 or self.dis_selector == 2:
if len(ref_grap_dis_list) > 0:
overlaps = iou_calculator(ref_grap_dis_tensor, bbox, mode='iof')
overlaps = overlaps < isolation_thr
if overlaps.all():
dis_list.append(0)
else:
dis_list.append(1)
else:
dis_list.append(0)
elif res.pos_gt_labels[j] == reference_labels['foglia_vite']:
if self.dis_selector == 0:
dis_list.append(-1)
elif self.dis_selector == 1 or self.dis_selector == 2:
if len(ref_leav_dis_list) > 0:
overlaps = iou_calculator(ref_leav_dis_tensor, bbox, mode='iof')
overlaps = overlaps < isolation_thr
if overlaps.all():
dis_list.append(0)
else:
dis_list.append(1)
else:
dis_list.append(0)
elif 'grappolo' in classes[res.pos_gt_labels[j]] and res.pos_gt_labels[j] != reference_labels['grappolo_vite']:
if self.dis_selector == 1:
dis_list.append(-1)
elif self.dis_selector == 0:
if len(ref_grap_list) > 0:
overlaps = iou_calculator(bbox, ref_grap_tensor, mode='iof')
overlaps = overlaps < isolation_thr
if overlaps.all():
dis_list.append(0)
else:
dis_list.append(1)
else:
dis_list.append(0)
elif self.dis_selector == 2:
if len(ref_grap_list) > 0:
overlaps = iou_calculator(bbox, ref_grap_tensor, mode='iof')
overlaps = overlaps < isolation_thr
if overlaps.all():
dis_list.append(2)
else:
dis_list.append(3)
else:
dis_list.append(2)
elif (('foglia' in classes[res.pos_gt_labels[j]] or classes[res.pos_gt_labels[j]] == 'malattia_esca'
or classes[res.pos_gt_labels[j]] == 'virosi_pinot_grigio')
and res.pos_gt_labels[j] != reference_labels['foglia_vite']):
if self.dis_selector == 1:
dis_list.append(-1)
elif self.dis_selector == 0:
if len(ref_leav_list) > 0:
overlaps = iou_calculator(bbox, ref_leav_tensor, mode='iof')
overlaps = overlaps < isolation_thr
if overlaps.all():
dis_list.append(0)
else:
dis_list.append(1)
else:
dis_list.append(0)
elif self.dis_selector == 2:
if len(ref_leav_list) > 0:
overlaps = iou_calculator(bbox, ref_leav_tensor, mode='iof')
overlaps = overlaps < isolation_thr
if overlaps.all():
dis_list.append(2)
else:
dis_list.append(3)
else:
dis_list.append(2)
tensor[:num_pos] = torch.tensor(dis_list)
dis_targets.append(dis_tensor)
if concat:
labels = torch.cat(labels, 0)
label_weights = torch.cat(label_weights, 0)
bbox_targets = torch.cat(bbox_targets, 0)
bbox_weights = torch.cat(bbox_weights, 0)
dis_targets = torch.cat(dis_targets, 0)
return labels, label_weights, bbox_targets, bbox_weights, dis_targets
@force_fp32(apply_to=('cls_score', 'bbox_pred', 'dis_pred'))
def loss(self,
cls_score,
bbox_pred,
dis_pred,
rois,
labels,
label_weights,
bbox_targets,
bbox_weights,
dis_targets,
reduction_override=None):
losses = dict()
if cls_score is not None:
avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.)
if cls_score.numel() > 0:
losses['loss_cls'] = self.loss_cls(
cls_score,
labels,
label_weights,
avg_factor=avg_factor,
reduction_override=reduction_override)
losses['acc'] = accuracy(cls_score, labels)
if bbox_pred is not None:
bg_class_ind = self.num_classes
pos_inds = (labels >= 0) & (labels < bg_class_ind)
if pos_inds.any():
if self.reg_decoded_bbox:
bbox_pred = self.bbox_coder.decode(rois[:, 1:], bbox_pred)
if self.reg_class_agnostic:
pos_bbox_pred = bbox_pred.view(
bbox_pred.size(0), 4)[pos_inds.type(torch.bool)]
else:
pos_bbox_pred = bbox_pred.view(
bbox_pred.size(0), -1,
4)[pos_inds.type(torch.bool),
labels[pos_inds.type(torch.bool)]]
losses['loss_bbox'] = self.loss_bbox(
pos_bbox_pred,
bbox_targets[pos_inds.type(torch.bool)],
bbox_weights[pos_inds.type(torch.bool)],
avg_factor=bbox_targets.size(0),
reduction_override=reduction_override)
else:
losses['loss_bbox'] = bbox_pred[pos_inds].sum()
if dis_pred is not None:
pos_inds = dis_targets != -1
if pos_inds.any():
pos_dis_pred = dis_pred[pos_inds.type(torch.bool)]
pos_dis_targets = dis_targets[pos_inds.type(torch.bool)]
avg_factor = dis_pred.size(0)
losses['loss_dis'] = self.loss_dis(
pos_dis_pred,
pos_dis_targets,
avg_factor=avg_factor,
reduction_override=reduction_override)
return losses
@force_fp32(apply_to=('cls_score', 'bbox_pred', 'dis_pred'))
def get_bboxes(self,
rois,
cls_score,
bbox_pred,
dis_pred,
img_shape,
scale_factor,
rescale=False,
cfg=None):
if isinstance(cls_score, list):
cls_score = sum(cls_score) / float(len(cls_score))
scores = F.softmax(cls_score, dim=1) if cls_score is not None else None
if bbox_pred is not None:
bboxes = self.bbox_coder.decode(
rois[:, 1:], bbox_pred, max_shape=img_shape)
else:
bboxes = rois[:, 1:].clone()
if img_shape is not None:
bboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1])
bboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0])
if rescale and bboxes.size(0) > 0:
if isinstance(scale_factor, float):
bboxes /= scale_factor
else:
scale_factor = bboxes.new_tensor(scale_factor)
bboxes = (bboxes.view(bboxes.size(0), -1, 4) /
scale_factor).view(bboxes.size()[0], -1)
if dis_pred is not None:
if self.dis_selector == 0 or self.dis_selector == 1:
diseases = F.sigmoid(dis_pred)
elif self.dis_selector == 2:
diseases = F.softmax(dis_pred, dim=1)
if cfg is None:
return bboxes, scores, diseases
else:
det_bboxes, det_labels, inds = multiclass_nms(bboxes, scores,
cfg.score_thr, cfg.nms,
cfg.max_per_img,
return_inds=True)
if self.dis_selector == 0 or self.dis_selector == 1:
diseases = diseases.expand(bboxes.size(0), scores.size(1) - 1)
diseases = diseases.reshape(-1)
elif self.dis_selector == 2:
diseases = diseases[:, None].expand(bboxes.size(0), scores.size(1) - 1, 4)
diseases = diseases.reshape(-1, 4)
det_dis = diseases[inds]
return det_bboxes, det_labels, det_dis
@HEADS.register_module()
class Shared4Conv1FCBBoxHead(ConvFCBBoxHead):
def __init__(self, fc_out_channels=1024, *args, **kwargs):
super(Shared4Conv1FCBBoxHead, self).__init__(
num_shared_convs=4,
num_shared_fcs=1,
num_cls_convs=0,
num_cls_fcs=0,
num_reg_convs=0,
num_reg_fcs=0,
fc_out_channels=fc_out_channels,
*args,
**kwargs)
| true
| true
|
79058c9537ecebf0b7ca925ac34b01a00b522dcd
| 2,405
|
py
|
Python
|
telethon/sync.py
|
bb010g/Telethon
|
278f0e9e983d938589b6d541e71135ad5b6857c5
|
[
"MIT"
] | 2
|
2021-04-29T14:19:25.000Z
|
2021-09-17T07:13:49.000Z
|
telethon/sync.py
|
exceloo/Telethon
|
30a0e390603072d3ec57a2f0eef0a297a9b0321b
|
[
"MIT"
] | 5
|
2021-04-30T21:14:18.000Z
|
2022-03-12T00:21:58.000Z
|
telethon/sync.py
|
exceloo/Telethon
|
30a0e390603072d3ec57a2f0eef0a297a9b0321b
|
[
"MIT"
] | 1
|
2020-04-16T22:02:26.000Z
|
2020-04-16T22:02:26.000Z
|
"""
This magical module will rewrite all public methods in the public interface
of the library so they can run the loop on their own if it's not already
running. This rewrite may not be desirable if the end user always uses the
methods they way they should be ran, but it's incredibly useful for quick
scripts and the runtime overhead is relatively low.
Some really common methods which are hardly used offer this ability by
default, such as ``.start()`` and ``.run_until_disconnected()`` (since
you may want to start, and then run until disconnected while using async
event handlers).
"""
import asyncio
import functools
import inspect
from . import connection
from .client.account import _TakeoutClient
from .client.telegramclient import TelegramClient
from .tl import types, functions, custom
from .tl.custom import (
Draft, Dialog, MessageButton, Forward, Button,
Message, InlineResult, Conversation
)
from .tl.custom.chatgetter import ChatGetter
from .tl.custom.sendergetter import SenderGetter
def _syncify_wrap(t, method_name):
method = getattr(t, method_name)
@functools.wraps(method)
def syncified(*args, **kwargs):
coro = method(*args, **kwargs)
loop = asyncio.get_event_loop()
if loop.is_running():
return coro
else:
return loop.run_until_complete(coro)
# Save an accessible reference to the original method
setattr(syncified, '__tl.sync', method)
setattr(t, method_name, syncified)
def syncify(*types):
"""
Converts all the methods in the given types (class definitions)
into synchronous, which return either the coroutine or the result
based on whether ``asyncio's`` event loop is running.
"""
# Our asynchronous generators all are `RequestIter`, which already
# provide a synchronous iterator variant, so we don't need to worry
# about asyncgenfunction's here.
for t in types:
for name in dir(t):
if not name.startswith('_') or name == '__call__':
if inspect.iscoroutinefunction(getattr(t, name)):
_syncify_wrap(t, name)
syncify(TelegramClient, _TakeoutClient, Draft, Dialog, MessageButton,
ChatGetter, SenderGetter, Forward, Message, InlineResult, Conversation)
__all__ = [
'TelegramClient', 'Button',
'types', 'functions', 'custom', 'errors',
'events', 'utils', 'connection'
]
| 33.873239
| 79
| 0.710603
|
import asyncio
import functools
import inspect
from . import connection
from .client.account import _TakeoutClient
from .client.telegramclient import TelegramClient
from .tl import types, functions, custom
from .tl.custom import (
Draft, Dialog, MessageButton, Forward, Button,
Message, InlineResult, Conversation
)
from .tl.custom.chatgetter import ChatGetter
from .tl.custom.sendergetter import SenderGetter
def _syncify_wrap(t, method_name):
method = getattr(t, method_name)
@functools.wraps(method)
def syncified(*args, **kwargs):
coro = method(*args, **kwargs)
loop = asyncio.get_event_loop()
if loop.is_running():
return coro
else:
return loop.run_until_complete(coro)
setattr(syncified, '__tl.sync', method)
setattr(t, method_name, syncified)
def syncify(*types):
# about asyncgenfunction's here.
for t in types:
for name in dir(t):
if not name.startswith('_') or name == '__call__':
if inspect.iscoroutinefunction(getattr(t, name)):
_syncify_wrap(t, name)
syncify(TelegramClient, _TakeoutClient, Draft, Dialog, MessageButton,
ChatGetter, SenderGetter, Forward, Message, InlineResult, Conversation)
__all__ = [
'TelegramClient', 'Button',
'types', 'functions', 'custom', 'errors',
'events', 'utils', 'connection'
]
| true
| true
|
79058e1b90023f1994f12d9db036003e0c9f794e
| 6,506
|
py
|
Python
|
utils.py
|
smtnkc/gcn4epi
|
2b9dd973b2d5120f618d3c36e8aa9d7d4a4e6b69
|
[
"MIT"
] | null | null | null |
utils.py
|
smtnkc/gcn4epi
|
2b9dd973b2d5120f618d3c36e8aa9d7d4a4e6b69
|
[
"MIT"
] | null | null | null |
utils.py
|
smtnkc/gcn4epi
|
2b9dd973b2d5120f618d3c36e8aa9d7d4a4e6b69
|
[
"MIT"
] | null | null | null |
import numpy as np
import pickle as pkl
import networkx as nx
import scipy.sparse as sp
from scipy.sparse.linalg.eigen.arpack import eigsh
def sample_mask(idx, l):
"""Create mask."""
mask = np.zeros(l)
mask[idx] = 1
return np.array(mask, dtype=np.bool)
def load_data(cell_line, cross_cell_line, label_rate, k_mer):
"""
Load input data from data/cell_line directory.
| x_20.index | the indices (IDs) of labeled train instances as list object (for label_rate = 20%) |
| ux_20.index | the indices (IDs) of unlabeled train instances as list object (for label_rate = 20%) |
| vx_20.index | the indices (IDs) of validation instances as list object (for label_rate = 20%) |
| tx_20.index | the indices (IDs) of test instances as list object (for label_rate = 20%) |
| features_5mer | the feature vectors of all instances as scipy.sparse.csr.csr_matrix object (for k_mer = 5) |
| nodes | a dict in the format {chromosome_name: ID} as collections.defaultdict object |
| labels | the one-hot labels of all instances as numpy.ndarray object |
| graph | a dict in the format {ID: [IDs_of_neighbor_nodes]} as collections.defaultdict object |
All objects above must be saved using python pickle module.
:param cell_line: Name of the cell line to which the datasets belong
:return: All data input files loaded (as well the training/test data).
"""
if (cross_cell_line != None) and (cross_cell_line != cell_line):
read_dir = 'data/{}_{}/'.format(cell_line, cross_cell_line)
else:
read_dir = 'data/{}/'.format(cell_line)
# STEP 1: Load all feature vectors, class labels and graph
features_file = open('{}/features_{}mer'.format(read_dir, k_mer), "rb")
features = pkl.load(features_file)
features_file.close()
labels_file = open('{}/labels'.format(read_dir), "rb")
labels = pkl.load(labels_file)
labels_file.close()
graph_file = open('{}/graph'.format(read_dir), "rb")
graph = pkl.load(graph_file)
graph_file.close()
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
# STEP 2: Load IDs of labeled_train/unlabeled_train/validation/test nodes
lr = txt = '{:.2f}'.format(label_rate).split('.')[1]
idx_x_file = open('{}/x_{}.index'.format(read_dir, lr), "rb")
idx_x = pkl.load(idx_x_file)
idx_x_file.close()
idx_ux_file = open('{}/ux_{}.index'.format(read_dir, lr), "rb")
idx_ux = pkl.load(idx_ux_file)
idx_ux_file.close()
idx_vx_file = open('{}/vx_{}.index'.format(read_dir, lr), "rb")
idx_vx = pkl.load(idx_vx_file)
idx_vx_file.close()
idx_tx_file = open('{}/tx_{}.index'.format(read_dir, lr), "rb")
idx_tx = pkl.load(idx_tx_file)
idx_tx_file.close()
# STEP 3: Take subsets from loaded features and class labels using loaded IDs
x = features[idx_x]
y = labels[idx_x]
ux = features[idx_ux]
uy = labels[idx_ux]
vx = features[idx_vx]
vy = labels[idx_vx]
tx = features[idx_tx]
ty = labels[idx_tx]
print("x={} ux={} vx={} tx={}".format(x.shape[0], ux.shape[0], vx.shape[0], tx.shape[0]))
# STEP 4: Mask labels
train_mask = sample_mask(idx_x, labels.shape[0])
val_mask = sample_mask(idx_vx, labels.shape[0])
test_mask = sample_mask(idx_tx, labels.shape[0])
y_train = np.zeros(labels.shape)
y_val = np.zeros(labels.shape)
y_test = np.zeros(labels.shape)
y_train[train_mask, :] = labels[train_mask, :]
y_val[val_mask, :] = labels[val_mask, :]
y_test[test_mask, :] = labels[test_mask, :]
return adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask
def sparse_to_tuple(sparse_mx):
"""Convert sparse matrix to tuple representation."""
def to_tuple(mx):
if not sp.isspmatrix_coo(mx):
mx = mx.tocoo()
coords = np.vstack((mx.row, mx.col)).transpose()
values = mx.data
shape = mx.shape
return coords, values, shape
if isinstance(sparse_mx, list):
for i in range(len(sparse_mx)):
sparse_mx[i] = to_tuple(sparse_mx[i])
else:
sparse_mx = to_tuple(sparse_mx)
return sparse_mx
def preprocess_features(features):
"""Row-normalize feature matrix and convert to tuple representation"""
rowsum = np.array(features.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
features = r_mat_inv.dot(features)
return sparse_to_tuple(features)
def normalize_adj(adj):
"""Symmetrically normalize adjacency matrix."""
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
def preprocess_adj(adj):
"""Preprocessing of adjacency matrix for simple GCN model and conversion to tuple representation."""
adj_normalized = normalize_adj(adj + sp.eye(adj.shape[0]))
return sparse_to_tuple(adj_normalized)
def construct_feed_dict(features, support, labels, labels_mask, placeholders):
"""Construct feed dictionary."""
feed_dict = dict()
feed_dict.update({placeholders['labels']: labels})
feed_dict.update({placeholders['labels_mask']: labels_mask})
feed_dict.update({placeholders['features']: features})
feed_dict.update({placeholders['support'][i]: support[i] for i in range(len(support))})
feed_dict.update({placeholders['num_features_nonzero']: features[1].shape})
return feed_dict
def chebyshev_polynomials(adj, k):
"""Calculate Chebyshev polynomials up to order k. Return a list of sparse matrices (tuple representation)."""
print("Calculating Chebyshev polynomials up to order {}...".format(k))
adj_normalized = normalize_adj(adj)
laplacian = sp.eye(adj.shape[0]) - adj_normalized
largest_eigval, _ = eigsh(laplacian, 1, which='LM')
scaled_laplacian = (2. / largest_eigval[0]) * laplacian - sp.eye(adj.shape[0])
t_k = list()
t_k.append(sp.eye(adj.shape[0]))
t_k.append(scaled_laplacian)
def chebyshev_recurrence(t_k_minus_one, t_k_minus_two, scaled_lap):
s_lap = sp.csr_matrix(scaled_lap, copy=True)
return 2 * s_lap.dot(t_k_minus_one) - t_k_minus_two
for i in range(2, k+1):
t_k.append(chebyshev_recurrence(t_k[-1], t_k[-2], scaled_laplacian))
return sparse_to_tuple(t_k)
| 36.144444
| 114
| 0.67553
|
import numpy as np
import pickle as pkl
import networkx as nx
import scipy.sparse as sp
from scipy.sparse.linalg.eigen.arpack import eigsh
def sample_mask(idx, l):
mask = np.zeros(l)
mask[idx] = 1
return np.array(mask, dtype=np.bool)
def load_data(cell_line, cross_cell_line, label_rate, k_mer):
if (cross_cell_line != None) and (cross_cell_line != cell_line):
read_dir = 'data/{}_{}/'.format(cell_line, cross_cell_line)
else:
read_dir = 'data/{}/'.format(cell_line)
features_file = open('{}/features_{}mer'.format(read_dir, k_mer), "rb")
features = pkl.load(features_file)
features_file.close()
labels_file = open('{}/labels'.format(read_dir), "rb")
labels = pkl.load(labels_file)
labels_file.close()
graph_file = open('{}/graph'.format(read_dir), "rb")
graph = pkl.load(graph_file)
graph_file.close()
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
lr = txt = '{:.2f}'.format(label_rate).split('.')[1]
idx_x_file = open('{}/x_{}.index'.format(read_dir, lr), "rb")
idx_x = pkl.load(idx_x_file)
idx_x_file.close()
idx_ux_file = open('{}/ux_{}.index'.format(read_dir, lr), "rb")
idx_ux = pkl.load(idx_ux_file)
idx_ux_file.close()
idx_vx_file = open('{}/vx_{}.index'.format(read_dir, lr), "rb")
idx_vx = pkl.load(idx_vx_file)
idx_vx_file.close()
idx_tx_file = open('{}/tx_{}.index'.format(read_dir, lr), "rb")
idx_tx = pkl.load(idx_tx_file)
idx_tx_file.close()
x = features[idx_x]
y = labels[idx_x]
ux = features[idx_ux]
uy = labels[idx_ux]
vx = features[idx_vx]
vy = labels[idx_vx]
tx = features[idx_tx]
ty = labels[idx_tx]
print("x={} ux={} vx={} tx={}".format(x.shape[0], ux.shape[0], vx.shape[0], tx.shape[0]))
train_mask = sample_mask(idx_x, labels.shape[0])
val_mask = sample_mask(idx_vx, labels.shape[0])
test_mask = sample_mask(idx_tx, labels.shape[0])
y_train = np.zeros(labels.shape)
y_val = np.zeros(labels.shape)
y_test = np.zeros(labels.shape)
y_train[train_mask, :] = labels[train_mask, :]
y_val[val_mask, :] = labels[val_mask, :]
y_test[test_mask, :] = labels[test_mask, :]
return adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask
def sparse_to_tuple(sparse_mx):
def to_tuple(mx):
if not sp.isspmatrix_coo(mx):
mx = mx.tocoo()
coords = np.vstack((mx.row, mx.col)).transpose()
values = mx.data
shape = mx.shape
return coords, values, shape
if isinstance(sparse_mx, list):
for i in range(len(sparse_mx)):
sparse_mx[i] = to_tuple(sparse_mx[i])
else:
sparse_mx = to_tuple(sparse_mx)
return sparse_mx
def preprocess_features(features):
rowsum = np.array(features.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
features = r_mat_inv.dot(features)
return sparse_to_tuple(features)
def normalize_adj(adj):
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
def preprocess_adj(adj):
adj_normalized = normalize_adj(adj + sp.eye(adj.shape[0]))
return sparse_to_tuple(adj_normalized)
def construct_feed_dict(features, support, labels, labels_mask, placeholders):
feed_dict = dict()
feed_dict.update({placeholders['labels']: labels})
feed_dict.update({placeholders['labels_mask']: labels_mask})
feed_dict.update({placeholders['features']: features})
feed_dict.update({placeholders['support'][i]: support[i] for i in range(len(support))})
feed_dict.update({placeholders['num_features_nonzero']: features[1].shape})
return feed_dict
def chebyshev_polynomials(adj, k):
print("Calculating Chebyshev polynomials up to order {}...".format(k))
adj_normalized = normalize_adj(adj)
laplacian = sp.eye(adj.shape[0]) - adj_normalized
largest_eigval, _ = eigsh(laplacian, 1, which='LM')
scaled_laplacian = (2. / largest_eigval[0]) * laplacian - sp.eye(adj.shape[0])
t_k = list()
t_k.append(sp.eye(adj.shape[0]))
t_k.append(scaled_laplacian)
def chebyshev_recurrence(t_k_minus_one, t_k_minus_two, scaled_lap):
s_lap = sp.csr_matrix(scaled_lap, copy=True)
return 2 * s_lap.dot(t_k_minus_one) - t_k_minus_two
for i in range(2, k+1):
t_k.append(chebyshev_recurrence(t_k[-1], t_k[-2], scaled_laplacian))
return sparse_to_tuple(t_k)
| true
| true
|
79058e7e2837f17f953fc9a88bbe6347313214c1
| 640
|
py
|
Python
|
src/util/summary_logging.py
|
wooseoklee4/AP-BSN
|
210013cfe0657e678e4b940fd4d5719ac0ac87c6
|
[
"MIT"
] | 8
|
2022-03-23T08:07:19.000Z
|
2022-03-30T17:08:17.000Z
|
src/util/summary_logging.py
|
wooseoklee4/AP-BSN
|
210013cfe0657e678e4b940fd4d5719ac0ac87c6
|
[
"MIT"
] | 1
|
2022-03-25T13:26:58.000Z
|
2022-03-26T10:35:04.000Z
|
src/util/summary_logging.py
|
wooseoklee4/AP-BSN
|
210013cfe0657e678e4b940fd4d5719ac0ac87c6
|
[
"MIT"
] | 1
|
2022-03-29T03:34:38.000Z
|
2022-03-29T03:34:38.000Z
|
import time
from torch.utils.tensorboard import SummaryWriter
import numpy as np
class LossWriter(SummaryWriter):
def __init__(self, log_dir=None, comment=''):
if log_dir == None:
log_dir = './logs/tensorboard/' + time.strftime('%Y-%m-%d--%H-%M-%S', time.localtime(time.time()))
super(LossWriter, self).__init__(log_dir=log_dir, comment=comment)
def write_loss(self, loss_name, scalar, n_iter):
self.add_scalar('Loss/'+loss_name, scalar, n_iter)
if __name__=='__main__':
testwriter = LossWriter()
for n_iter in range(100):
testwriter.write_loss(np.random.random(), n_iter)
| 29.090909
| 110
| 0.676563
|
import time
from torch.utils.tensorboard import SummaryWriter
import numpy as np
class LossWriter(SummaryWriter):
def __init__(self, log_dir=None, comment=''):
if log_dir == None:
log_dir = './logs/tensorboard/' + time.strftime('%Y-%m-%d--%H-%M-%S', time.localtime(time.time()))
super(LossWriter, self).__init__(log_dir=log_dir, comment=comment)
def write_loss(self, loss_name, scalar, n_iter):
self.add_scalar('Loss/'+loss_name, scalar, n_iter)
if __name__=='__main__':
testwriter = LossWriter()
for n_iter in range(100):
testwriter.write_loss(np.random.random(), n_iter)
| true
| true
|
79058ebaf9276f397750a8afd5394d8d67191355
| 2,324
|
py
|
Python
|
test/test_get_import_data_response.py
|
idaholab/Deep-Lynx-Python-Package
|
99927cc877eba8e2ee396feec807da1c48c64893
|
[
"MIT"
] | 3
|
2021-06-16T20:34:41.000Z
|
2021-06-16T23:54:36.000Z
|
test/test_get_import_data_response.py
|
idaholab/Deep-Lynx-Python-Package
|
99927cc877eba8e2ee396feec807da1c48c64893
|
[
"MIT"
] | null | null | null |
test/test_get_import_data_response.py
|
idaholab/Deep-Lynx-Python-Package
|
99927cc877eba8e2ee396feec807da1c48c64893
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Deep Lynx
The construction of megaprojects has consistently demonstrated challenges for project managers in regard to meeting cost, schedule, and performance requirements. Megaproject construction challenges are common place within megaprojects with many active projects in the United States failing to meet cost and schedule efforts by significant margins. Currently, engineering teams operate in siloed tools and disparate teams where connections across design, procurement, and construction systems are translated manually or over brittle point-to-point integrations. The manual nature of data exchange increases the risk of silent errors in the reactor design, with each silent error cascading across the design. These cascading errors lead to uncontrollable risk during construction, resulting in significant delays and cost overruns. Deep Lynx allows for an integrated platform during design and operations of mega projects. The Deep Lynx Core API delivers a few main features. 1. Provides a set of methods and endpoints for manipulating data in an object oriented database. This allows us to store complex datatypes as records and then to compile them into actual, modifiable objects at run-time. Users can store taxonomies or ontologies in a readable format. 2. Provides methods for storing and retrieving data in a graph database. This data is structured and validated against the aformentioned object oriented database before storage. # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.get_import_data_response import GetImportDataResponse # noqa: E501
from swagger_client.rest import ApiException
class TestGetImportDataResponse(unittest.TestCase):
"""GetImportDataResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testGetImportDataResponse(self):
"""Test GetImportDataResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.get_import_data_response.GetImportDataResponse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 58.1
| 1,455
| 0.790017
|
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.get_import_data_response import GetImportDataResponse
from swagger_client.rest import ApiException
class TestGetImportDataResponse(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testGetImportDataResponse(self):
s
if __name__ == '__main__':
unittest.main()
| true
| true
|
79058f13bf14612075f27b34498a476ca0a7c841
| 35,496
|
py
|
Python
|
alphazero/network/policies.py
|
timoklein/A0C
|
2825193f424bd5b74b654c929ef73775b0914ee5
|
[
"MIT"
] | 6
|
2021-02-17T18:04:17.000Z
|
2022-02-15T11:08:22.000Z
|
alphazero/network/policies.py
|
timoklein/A0C
|
2825193f424bd5b74b654c929ef73775b0914ee5
|
[
"MIT"
] | 1
|
2021-08-15T12:19:33.000Z
|
2021-08-23T16:41:43.000Z
|
alphazero/network/policies.py
|
timoklein/A0C
|
2825193f424bd5b74b654c929ef73775b0914ee5
|
[
"MIT"
] | 1
|
2021-09-28T03:47:53.000Z
|
2021-09-28T03:47:53.000Z
|
from typing import ClassVar, List, Optional, Tuple, Callable, Union, cast
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributions as D
from alphazero.network.distributions import SquashedNormal, GeneralizedBeta
from alphazero.network.utils import (
_map_nonlinearities,
_process_str,
)
__all__ = [
"make_policy",
"DiagonalNormalPolicy",
"DiagonalGMMPolicy",
"GeneralizedBetaPolicy",
"DiscretePolicy",
]
class Policy(nn.Module):
"""Base policy class.
The base policy is responsible for instanting the linear layers and value head.
It also defines some interface functions.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
- "beta", "generalizedbeta": Beta distribution with transformed support on (-c, c).
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log parameters.
log_param_max : int
Upper bound for learned log parameters.
"""
# member type annotations
state_dim: int
action_dim: int
action_bound: Optional[float]
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
value_head: nn.Linear
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: Optional[float],
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
super().__init__()
self.state_dim = representation_dim
self.action_dim = action_dim
self.action_bound = action_bound
# boundaries for the log standard deviation to increae training stability
self.log_param_min = log_param_min
self.log_param_max = log_param_max
assert hidden_dimensions, "Hidden dimensions can't be empty."
self.hidden_dimensions = hidden_dimensions
self.hidden_layers = len(hidden_dimensions)
activation: Callable[..., nn.Module] = _map_nonlinearities(nonlinearity)
self.layernorm = layernorm
# generate neural network except distribution heads
layers = [
nn.Linear(self.state_dim, hidden_dimensions[0]),
activation(inplace=True),
]
if layernorm:
layers.append(nn.LayerNorm(normalized_shape=hidden_dimensions[0]))
if 1 < self.hidden_layers:
for i, hidden_dim in enumerate(hidden_dimensions[:-1]):
hid = [
nn.Linear(hidden_dim, hidden_dimensions[i + 1]),
activation(inplace=True),
]
if layernorm:
hid.append(nn.LayerNorm(normalized_shape=hidden_dimensions[i + 1]))
layers.extend(hid)
self.trunk = nn.Sequential(*layers)
self.value_head = nn.Linear(hidden_dimensions[-1], 1)
def __repr__(self) -> str:
"""
Returns
-------
str
String representation of this instance.
"""
components: int = getattr(self, "num_components", 1)
return (
f"class={type(self).__name__}, distribution={self.distribution_type}, components={components}, "
f"state_dim={self.state_dim}, action_dim={self.action_dim}, action_bounds={self.bounds}, "
f"log_std_bounds={self.log_param_bounds}, hidden_layers={self.hidden_layers}, hidden_units={self.hidden_dimensions}, "
f"nonlinearity={type(self.trunk[1]).__name__}, layernorm={self.layernorm}"
)
@property
def bounds(self) -> np.ndarray:
if self.action_bound is None:
return np.array([-np.inf, np.inf], dtype=np.float32)
else:
return np.array([-self.action_bound, self.action_bound], dtype=np.float32)
@torch.no_grad()
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
raise NotImplementedError
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
raise NotImplementedError
@torch.no_grad()
def predict_V(self, x: torch.Tensor) -> np.ndarray:
self.eval()
x = self.trunk(x)
V_hat = self.value_head(x)
self.train()
return V_hat.detach().cpu().numpy()
class DiscretePolicy(nn.Module):
"""Base policy class.
The base policy is responsible for instanting the linear layers and value head.
It also defines some interface functions.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
- "beta", "generalizedbeta": Beta distribution with transformed support on (-c, c).
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log parameters.
log_param_max : int
Upper bound for learned log parameters.
"""
# member type annotations
state_dim: int
action_dim: int
num_actions: int
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
value_head: nn.Linear
# class variable
distribution_type: ClassVar[str] = "Categorical"
def __init__(
self,
representation_dim: int,
action_dim: int,
num_actions: int,
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
):
super().__init__()
self.state_dim = representation_dim
self.action_dim = action_dim
self.num_actions = num_actions
assert hidden_dimensions, "Hidden dimensions can't be empty."
self.hidden_dimensions = hidden_dimensions
self.hidden_layers = len(hidden_dimensions)
self.distribution = D.Categorical
activation: Callable[..., nn.Module] = _map_nonlinearities(nonlinearity)
self.layernorm = layernorm
# generate neural network except distribution heads
layers = [
nn.Linear(self.state_dim, hidden_dimensions[0]),
activation(inplace=True),
]
if layernorm:
layers.append(nn.LayerNorm(normalized_shape=hidden_dimensions[0]))
if 1 < self.hidden_layers:
for i, hidden_dim in enumerate(hidden_dimensions[:-1]):
hid = [
nn.Linear(hidden_dim, hidden_dimensions[i + 1]),
activation(inplace=True),
]
if layernorm:
hid.append(nn.LayerNorm(normalized_shape=hidden_dimensions[i + 1]))
layers.extend(hid)
self.trunk = nn.Sequential(*layers)
self.value_head = nn.Linear(hidden_dimensions[-1], 1)
self.dist_head = nn.Linear(hidden_dimensions[-1], num_actions)
def __repr__(self) -> str:
"""
Returns
-------
str
String representation of this instance.
"""
return (
f"class={type(self).__name__}, distribution={self.distribution_type}, num_actions={self.num_actions}, "
f"state_dim={self.state_dim}, action_dim={self.action_dim}, "
f"hidden_layers={self.hidden_layers}, hidden_units={self.hidden_dimensions}, "
f"nonlinearity={type(self.trunk[1]).__name__}, layernorm={self.layernorm}"
)
def _get_dist_params(
self, x: torch.Tensor
) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Distribution mean (mu), Distribution standard deviation (sigma), State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# dist_head returns a tensor of shape [batch_size, 2*action_dim]
# split this tensor along the last dimension into parameters for mu and sigma
pi_logits = self.dist_head(x)
return pi_logits, V_hat
def forward(self, x: torch.FloatTensor) -> Tuple[D.Categorical, torch.FloatTensor]:
"""Forward pass of the model.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[Normallike, torch.FloatTensor]
Normal or squashed Normal distribution (dist), State value estimate (V_hat).
"""
pi_logits, V_hat = self._get_dist_params(x)
dist = D.Categorical(logits=pi_logits)
# samples from dist have shape [batch_size, action_dim]
return dist, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
pi_logits, V_hat = self._get_dist_params(states)
# This creates an independent distribution for each action possibility
# so that the batch_shape of the distribution is identical to the shape of actions
# It's needed so that the log_probs are of the proper shape [batch_size, num_actions]
# else this throws since the distribution's batch_shape=[batch_shape] doesn't match
# the shape of the actions tensor, which is [batch_size, num_actions]
num_actions = actions.shape[1]
pi_hat = D.Categorical(
logits=pi_logits.unsqueeze(dim=1).repeat((1, num_actions, 1))
)
log_probs = pi_hat.log_prob(actions)
entropy = pi_hat.entropy()
return log_probs, entropy, V_hat
@torch.no_grad()
def predict_V(self, x: torch.Tensor) -> np.ndarray:
self.eval()
_, V_hat = self(x)
self.train()
return V_hat.detach().cpu().numpy()
@torch.no_grad()
def predict_pi(self, x: torch.Tensor) -> np.ndarray:
self.eval()
logits, _ = self._get_dist_params(x)
self.train()
return F.softmax(logits, dim=-1).detach().cpu().numpy()
class DiagonalNormalPolicy(Policy):
"""Policy class for factorized normal distributions.
Learns parameters for a factorized normal distribution of types
Normal, TanhSquashedNormal or GeneralizedSquashedNormal.
Factorized means that a conditionally independent (given a state) 1D Normal distribution is
learned for each dimension of the action space instead of a Multivariate Normal.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network. Has to be a Normallike distribution.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log standard deviation.
log_param_max : int
Upper bound for learned log standard deviation.
"""
# member annotations
state_dim: int
action_dim: int
action_bound: Optional[float]
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
dist_head: nn.Linear
value_head: nn.Linear
# class variable
policy_type: ClassVar[str] = "DiagonalNormal"
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: Optional[float],
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
super().__init__(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
self.dist_head = nn.Linear(hidden_dimensions[-1], 2 * self.action_dim)
def forward(
self, x: torch.FloatTensor
) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Distribution mean (mu), Distribution standard deviation (sigma), State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# dist_head returns a tensor of shape [batch_size, 2*action_dim]
# split this tensor along the last dimension into parameters for mu and sigma
mu, log_std = self.dist_head(x).chunk(2, dim=-1)
# Learning the log_std_dev is a trick for numerical stability
# Since the stddev > 0, we can learn the log and then exponentiate
# constrain log_std inside [log_param_min, log_param_max]
log_std = torch.clamp(log_std, min=self.log_param_min, max=self.log_param_max)
sigma = log_std.exp()
return mu, sigma, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
mu, sigma, V_hat = self(states)
# This aligns the distribution batch_shape with the number of actions at the root
# It can be thought of as generating num_actions identical normal distributions for each agent
# and then sampling the log_prob for action from the distribution
# num_actions = actions.shape[-1]
# mu = mu.expand((-1, num_actions))
# sigma = sigma.expand((-1, num_actions))
normal: Union[D.Normal, SquashedNormal]
if self.action_bound:
normal = SquashedNormal(mu, sigma, self.action_bound)
else:
normal = D.Normal(mu, sigma)
log_probs = normal.log_prob(actions)
entropy = -log_probs.mean(dim=-1)
return log_probs, entropy, V_hat
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
self.eval()
mu, sigma, _ = self(x)
normal: Union[D.Normal, SquashedNormal]
if self.action_bound:
normal = SquashedNormal(mu, sigma, self.action_bound)
else:
normal = D.Normal(mu, sigma)
action = normal.sample()
self.train()
return action.detach().cpu().numpy()
class DiagonalGMMPolicy(Policy):
"""Policy class for learning a factorized GMM.
Learns a 1D GMM for each dimension of the action space.
The components of the GMM are either Normal or squashed Normal.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network. Has to be Normallike.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
num_components : int
Number of mixture components.
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log standard deviations.
log_param_max : int
Upper bound for learned log standard deviations.
"""
# member annotations
state_dim: int
action_dim: int
action_bound: Optional[float]
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
num_components: int
trunk: nn.Sequential
dist_head: nn.Linear
value_head: nn.Linear
# class variable
policy_type: ClassVar[str] = "DiagonalGMM"
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: Optional[float],
num_components: int,
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
super().__init__(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
self.num_components = num_components
# calculate the number of parameters needed for the GMM
# 2 comes from each distribution being specifiec by 2 parameters
dist_params = num_components * (2 * self.action_dim + 1)
self.dist_head = nn.Linear(hidden_dimensions[-1], dist_params)
def forward(
self, x: torch.FloatTensor
) -> Tuple[
torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor
]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Distribution mean (mu), Distribution standard deviation (sigma),
Logits for the categorical distribution parameterizing the components (log_coeffs),
State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# mixture_params is a tensor of shape [batch_size, num_agents, 2*action_dim*num_components + num_components]
# the elements in the first term (2*action_dim*num_components) are the parameters for the mixture components
# the elements in the second term (+ num_components) are the mixture coefficients
mixture_params = self.dist_head(x)
# get mixture parameters and reorder to [batch_size, num_agents, 2*num_components, action_dim]
dist_params = mixture_params[
..., : self.num_components * 2 * self.action_dim
].view(x.shape[0], -1)
# get the num_components last tensor elements as logits for the mixture coefficients
log_coeff = mixture_params[..., -self.num_components :]
# split the dist_params along the middle dimension (2*num_components) into means and log stddevs
mu, log_std = dist_params.chunk(2, dim=-1)
# Learning the log_std_dev is a trick for numerical stability
# Since the stddev > 0, we can learn the log and then exponentiate
# constrain log_std inside [log_param_min, log_param_max]
log_std = torch.clamp(log_std, min=self.log_param_min, max=self.log_param_max)
sigma = log_std.exp()
return mu, sigma, log_coeff, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
mu, sigma, log_coeff, V_hat = self(states)
# We need num_actions identical gmms to sample log_probs for each action
num_actions = actions.shape[-1]
mu = mu.unsqueeze(dim=1).expand((-1, num_actions, -1))
sigma = sigma.unsqueeze(dim=1).expand((-1, num_actions, -1))
log_coeff = log_coeff.unsqueeze(dim=1).expand((-1, num_actions, -1))
mix = D.Categorical(logits=log_coeff)
component: Union[D.Normal, SquashedNormal]
if self.action_bound:
component = SquashedNormal(mu, sigma, self.action_bound)
else:
component = D.Normal(mu, sigma)
gmm = D.MixtureSameFamily(mix, component)
log_probs = gmm.log_prob(actions)
entropy = -log_probs.mean(dim=-1)
return log_probs, entropy, V_hat
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
self.eval()
mu, sigma, log_coeff, _ = self(x)
mix = D.Categorical(logits=log_coeff)
component: Union[D.Normal, SquashedNormal]
if self.action_bound:
component = SquashedNormal(mu, sigma, self.action_bound)
else:
component = D.Normal(mu, sigma)
gmm = D.MixtureSameFamily(mix, component)
action = gmm.sample()
self.train()
return action.detach().cpu().numpy()
class GeneralizedBetaPolicy(Policy):
"""Policy class for a generalized Beta distribution.
The beta distribution used by this class is generalized in that it has support
[-c, c] instead of [0,1].
This is achieved via a location-scale transformation (2c)x - c, where c are the desired bounds.
Since both parameters alpha, beta > 0, the log-learning-trick for the Normal standard deviation
is applied to both parameters.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log_alpha and log_beta.
log_param_max : int
Upper bound for learned log_alpha and log_beta.
"""
# member annotations
state_dim: int
action_dim: int
action_bound: float
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
dist_head: nn.Linear
value_head: nn.Linear
# class variable
policy_type: ClassVar[str] = "GeneralizedBeta"
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: float,
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
assert action_bound, "Beta policy needs action bounds specified."
super().__init__(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
self.dist_head = nn.Linear(hidden_dimensions[-1], 2 * self.action_dim)
def forward(
self, x: torch.FloatTensor
) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Alpha parameter (alpha), Beta parameter (beta), State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# create distribution parameters
dist_params = self.dist_head(x)
# Use the log_std_dev trick for alpha and beta
# since both alpha > 0 and beta > 0
dist_params = torch.clamp(
dist_params, min=self.log_param_min, max=self.log_param_max
)
alpha, beta = dist_params.exp().chunk(2, dim=-1)
return alpha, beta, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
alpha, beta, V_hat = self(states)
# ensure that the distribution batch_shape fits the number of actions taken for
# each agent at the root
num_actions = actions.shape[-1]
alpha = alpha.expand(-1, num_actions)
beta = beta.expand(-1, num_actions)
beta_dist = GeneralizedBeta(alpha, beta, self.action_bound)
log_probs = beta_dist.log_prob(actions)
entropy = -log_probs.mean(dim=-1)
return log_probs, entropy, V_hat
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
self.eval()
alpha, beta, _ = self(x)
beta_dist = D.Beta(alpha, beta)
action = beta_dist.sample()
self.train()
return action.detach().cpu().numpy()
def make_policy(
representation_dim: int,
action_dim: int,
distribution: str,
hidden_dimensions: List[int],
nonlinearity: str,
num_components: Optional[int] = None,
num_actions: Optional[int] = None,
action_bound: Optional[float] = None,
layernorm: bool = False,
log_param_min: float = -5,
log_param_max: float = 2,
) -> Union[
DiscretePolicy, DiagonalNormalPolicy, DiagonalGMMPolicy, GeneralizedBetaPolicy
]:
"""Constructs a policy network from a given config.
The following config keys need to be specified:
- "representation_dim": int
- "action_dim": int
- "distribution": str
- "num_components": int
- "action_bound": float
- "hidden_dimensions": List[int]
- "nonlinearity": str
- "layernorm": bool
- "log_param_min": Optional[float]
- "log_param_max": Optional[float]
Parameters
----------
representation_dim: int
Dimensionality of the vector state space of the environment.
action_dim: int
Number of action dimensions in the environment.
distribution: str
Name of the policy distribution as string ["discrete", "beta", "normal"].
hidden_dimensions: List[int]
List specification of the MLP policy. Each int element in the list represents a hidden
layer in the network with the respective number of neurons.
nonlinearity: str
Nonlinearity (activation function) used in the policy network.
num_components: Optional[int] = None
Number of components for mixture distributions.
num_actions: Optional[int] = None
Number of available actions. Used in the discrete policy.
action_bound: Optional[float] = None
Action bounds for the squashed normal or squashed GMM policy.
layernorm: bool = False
Use Layernorm in the policy network if set to True.
log_param_min: float = -5
Lower bound of the learned log parameters (standard deviation for Normal distributions).
log_param_max: float = 2
Upper bound of the learned log parameters.
Returns
-------
Union[DiscretePolicy, DiagonalNormalPolicy, DiagonalGMMPolicy, GeneralizedBetaPolicy]
Policy network intance.
"""
# basic config string preprocessing to ensure mapping works later
distribution = _process_str(distribution)
nonlinearity = _process_str(nonlinearity)
if distribution == "discrete":
return DiscretePolicy(
representation_dim=representation_dim,
action_dim=action_dim,
num_actions=cast(int, num_actions),
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
)
elif distribution == "beta":
assert num_components
return GeneralizedBetaPolicy(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=cast(float, action_bound),
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
else:
assert num_components
if 1 < num_components:
return DiagonalGMMPolicy(
representation_dim=representation_dim,
action_dim=action_dim,
num_components=num_components,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
else:
return DiagonalNormalPolicy(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
| 38.708833
| 130
| 0.647087
|
from typing import ClassVar, List, Optional, Tuple, Callable, Union, cast
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributions as D
from alphazero.network.distributions import SquashedNormal, GeneralizedBeta
from alphazero.network.utils import (
_map_nonlinearities,
_process_str,
)
__all__ = [
"make_policy",
"DiagonalNormalPolicy",
"DiagonalGMMPolicy",
"GeneralizedBetaPolicy",
"DiscretePolicy",
]
class Policy(nn.Module):
state_dim: int
action_dim: int
action_bound: Optional[float]
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
value_head: nn.Linear
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: Optional[float],
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
super().__init__()
self.state_dim = representation_dim
self.action_dim = action_dim
self.action_bound = action_bound
self.log_param_min = log_param_min
self.log_param_max = log_param_max
assert hidden_dimensions, "Hidden dimensions can't be empty."
self.hidden_dimensions = hidden_dimensions
self.hidden_layers = len(hidden_dimensions)
activation: Callable[..., nn.Module] = _map_nonlinearities(nonlinearity)
self.layernorm = layernorm
# generate neural network except distribution heads
layers = [
nn.Linear(self.state_dim, hidden_dimensions[0]),
activation(inplace=True),
]
if layernorm:
layers.append(nn.LayerNorm(normalized_shape=hidden_dimensions[0]))
if 1 < self.hidden_layers:
for i, hidden_dim in enumerate(hidden_dimensions[:-1]):
hid = [
nn.Linear(hidden_dim, hidden_dimensions[i + 1]),
activation(inplace=True),
]
if layernorm:
hid.append(nn.LayerNorm(normalized_shape=hidden_dimensions[i + 1]))
layers.extend(hid)
self.trunk = nn.Sequential(*layers)
self.value_head = nn.Linear(hidden_dimensions[-1], 1)
def __repr__(self) -> str:
components: int = getattr(self, "num_components", 1)
return (
f"class={type(self).__name__}, distribution={self.distribution_type}, components={components}, "
f"state_dim={self.state_dim}, action_dim={self.action_dim}, action_bounds={self.bounds}, "
f"log_std_bounds={self.log_param_bounds}, hidden_layers={self.hidden_layers}, hidden_units={self.hidden_dimensions}, "
f"nonlinearity={type(self.trunk[1]).__name__}, layernorm={self.layernorm}"
)
@property
def bounds(self) -> np.ndarray:
if self.action_bound is None:
return np.array([-np.inf, np.inf], dtype=np.float32)
else:
return np.array([-self.action_bound, self.action_bound], dtype=np.float32)
@torch.no_grad()
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
raise NotImplementedError
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
raise NotImplementedError
@torch.no_grad()
def predict_V(self, x: torch.Tensor) -> np.ndarray:
self.eval()
x = self.trunk(x)
V_hat = self.value_head(x)
self.train()
return V_hat.detach().cpu().numpy()
class DiscretePolicy(nn.Module):
# member type annotations
state_dim: int
action_dim: int
num_actions: int
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
value_head: nn.Linear
# class variable
distribution_type: ClassVar[str] = "Categorical"
def __init__(
self,
representation_dim: int,
action_dim: int,
num_actions: int,
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
):
super().__init__()
self.state_dim = representation_dim
self.action_dim = action_dim
self.num_actions = num_actions
assert hidden_dimensions, "Hidden dimensions can't be empty."
self.hidden_dimensions = hidden_dimensions
self.hidden_layers = len(hidden_dimensions)
self.distribution = D.Categorical
activation: Callable[..., nn.Module] = _map_nonlinearities(nonlinearity)
self.layernorm = layernorm
layers = [
nn.Linear(self.state_dim, hidden_dimensions[0]),
activation(inplace=True),
]
if layernorm:
layers.append(nn.LayerNorm(normalized_shape=hidden_dimensions[0]))
if 1 < self.hidden_layers:
for i, hidden_dim in enumerate(hidden_dimensions[:-1]):
hid = [
nn.Linear(hidden_dim, hidden_dimensions[i + 1]),
activation(inplace=True),
]
if layernorm:
hid.append(nn.LayerNorm(normalized_shape=hidden_dimensions[i + 1]))
layers.extend(hid)
self.trunk = nn.Sequential(*layers)
self.value_head = nn.Linear(hidden_dimensions[-1], 1)
self.dist_head = nn.Linear(hidden_dimensions[-1], num_actions)
def __repr__(self) -> str:
return (
f"class={type(self).__name__}, distribution={self.distribution_type}, num_actions={self.num_actions}, "
f"state_dim={self.state_dim}, action_dim={self.action_dim}, "
f"hidden_layers={self.hidden_layers}, hidden_units={self.hidden_dimensions}, "
f"nonlinearity={type(self.trunk[1]).__name__}, layernorm={self.layernorm}"
)
def _get_dist_params(
self, x: torch.Tensor
) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
x = self.trunk(x)
V_hat = self.value_head(x)
pi_logits = self.dist_head(x)
return pi_logits, V_hat
def forward(self, x: torch.FloatTensor) -> Tuple[D.Categorical, torch.FloatTensor]:
pi_logits, V_hat = self._get_dist_params(x)
dist = D.Categorical(logits=pi_logits)
return dist, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
pi_logits, V_hat = self._get_dist_params(states)
# else this throws since the distribution's batch_shape=[batch_shape] doesn't match
# the shape of the actions tensor, which is [batch_size, num_actions]
num_actions = actions.shape[1]
pi_hat = D.Categorical(
logits=pi_logits.unsqueeze(dim=1).repeat((1, num_actions, 1))
)
log_probs = pi_hat.log_prob(actions)
entropy = pi_hat.entropy()
return log_probs, entropy, V_hat
@torch.no_grad()
def predict_V(self, x: torch.Tensor) -> np.ndarray:
self.eval()
_, V_hat = self(x)
self.train()
return V_hat.detach().cpu().numpy()
@torch.no_grad()
def predict_pi(self, x: torch.Tensor) -> np.ndarray:
self.eval()
logits, _ = self._get_dist_params(x)
self.train()
return F.softmax(logits, dim=-1).detach().cpu().numpy()
class DiagonalNormalPolicy(Policy):
# member annotations
state_dim: int
action_dim: int
action_bound: Optional[float]
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
dist_head: nn.Linear
value_head: nn.Linear
# class variable
policy_type: ClassVar[str] = "DiagonalNormal"
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: Optional[float],
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
super().__init__(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
self.dist_head = nn.Linear(hidden_dimensions[-1], 2 * self.action_dim)
def forward(
self, x: torch.FloatTensor
) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
x = self.trunk(x)
V_hat = self.value_head(x)
# dist_head returns a tensor of shape [batch_size, 2*action_dim]
# split this tensor along the last dimension into parameters for mu and sigma
mu, log_std = self.dist_head(x).chunk(2, dim=-1)
# Learning the log_std_dev is a trick for numerical stability
# Since the stddev > 0, we can learn the log and then exponentiate
# constrain log_std inside [log_param_min, log_param_max]
log_std = torch.clamp(log_std, min=self.log_param_min, max=self.log_param_max)
sigma = log_std.exp()
return mu, sigma, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
mu, sigma, V_hat = self(states)
# This aligns the distribution batch_shape with the number of actions at the root
# It can be thought of as generating num_actions identical normal distributions for each agent
# and then sampling the log_prob for action from the distribution
# num_actions = actions.shape[-1]
# mu = mu.expand((-1, num_actions))
# sigma = sigma.expand((-1, num_actions))
normal: Union[D.Normal, SquashedNormal]
if self.action_bound:
normal = SquashedNormal(mu, sigma, self.action_bound)
else:
normal = D.Normal(mu, sigma)
log_probs = normal.log_prob(actions)
entropy = -log_probs.mean(dim=-1)
return log_probs, entropy, V_hat
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
self.eval()
mu, sigma, _ = self(x)
normal: Union[D.Normal, SquashedNormal]
if self.action_bound:
normal = SquashedNormal(mu, sigma, self.action_bound)
else:
normal = D.Normal(mu, sigma)
action = normal.sample()
self.train()
return action.detach().cpu().numpy()
class DiagonalGMMPolicy(Policy):
# member annotations
state_dim: int
action_dim: int
action_bound: Optional[float]
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
num_components: int
trunk: nn.Sequential
dist_head: nn.Linear
value_head: nn.Linear
# class variable
policy_type: ClassVar[str] = "DiagonalGMM"
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: Optional[float],
num_components: int,
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
super().__init__(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
self.num_components = num_components
# calculate the number of parameters needed for the GMM
# 2 comes from each distribution being specifiec by 2 parameters
dist_params = num_components * (2 * self.action_dim + 1)
self.dist_head = nn.Linear(hidden_dimensions[-1], dist_params)
def forward(
self, x: torch.FloatTensor
) -> Tuple[
torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor
]:
x = self.trunk(x)
V_hat = self.value_head(x)
# mixture_params is a tensor of shape [batch_size, num_agents, 2*action_dim*num_components + num_components]
# the elements in the first term (2*action_dim*num_components) are the parameters for the mixture components
# the elements in the second term (+ num_components) are the mixture coefficients
mixture_params = self.dist_head(x)
# get mixture parameters and reorder to [batch_size, num_agents, 2*num_components, action_dim]
dist_params = mixture_params[
..., : self.num_components * 2 * self.action_dim
].view(x.shape[0], -1)
# get the num_components last tensor elements as logits for the mixture coefficients
log_coeff = mixture_params[..., -self.num_components :]
# split the dist_params along the middle dimension (2*num_components) into means and log stddevs
mu, log_std = dist_params.chunk(2, dim=-1)
# Learning the log_std_dev is a trick for numerical stability
# Since the stddev > 0, we can learn the log and then exponentiate
# constrain log_std inside [log_param_min, log_param_max]
log_std = torch.clamp(log_std, min=self.log_param_min, max=self.log_param_max)
sigma = log_std.exp()
return mu, sigma, log_coeff, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
mu, sigma, log_coeff, V_hat = self(states)
# We need num_actions identical gmms to sample log_probs for each action
num_actions = actions.shape[-1]
mu = mu.unsqueeze(dim=1).expand((-1, num_actions, -1))
sigma = sigma.unsqueeze(dim=1).expand((-1, num_actions, -1))
log_coeff = log_coeff.unsqueeze(dim=1).expand((-1, num_actions, -1))
mix = D.Categorical(logits=log_coeff)
component: Union[D.Normal, SquashedNormal]
if self.action_bound:
component = SquashedNormal(mu, sigma, self.action_bound)
else:
component = D.Normal(mu, sigma)
gmm = D.MixtureSameFamily(mix, component)
log_probs = gmm.log_prob(actions)
entropy = -log_probs.mean(dim=-1)
return log_probs, entropy, V_hat
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
self.eval()
mu, sigma, log_coeff, _ = self(x)
mix = D.Categorical(logits=log_coeff)
component: Union[D.Normal, SquashedNormal]
if self.action_bound:
component = SquashedNormal(mu, sigma, self.action_bound)
else:
component = D.Normal(mu, sigma)
gmm = D.MixtureSameFamily(mix, component)
action = gmm.sample()
self.train()
return action.detach().cpu().numpy()
class GeneralizedBetaPolicy(Policy):
# member annotations
state_dim: int
action_dim: int
action_bound: float
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
dist_head: nn.Linear
value_head: nn.Linear
# class variable
policy_type: ClassVar[str] = "GeneralizedBeta"
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: float,
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
assert action_bound, "Beta policy needs action bounds specified."
super().__init__(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
self.dist_head = nn.Linear(hidden_dimensions[-1], 2 * self.action_dim)
def forward(
self, x: torch.FloatTensor
) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
x = self.trunk(x)
V_hat = self.value_head(x)
# create distribution parameters
dist_params = self.dist_head(x)
# Use the log_std_dev trick for alpha and beta
# since both alpha > 0 and beta > 0
dist_params = torch.clamp(
dist_params, min=self.log_param_min, max=self.log_param_max
)
alpha, beta = dist_params.exp().chunk(2, dim=-1)
return alpha, beta, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
alpha, beta, V_hat = self(states)
# ensure that the distribution batch_shape fits the number of actions taken for
# each agent at the root
num_actions = actions.shape[-1]
alpha = alpha.expand(-1, num_actions)
beta = beta.expand(-1, num_actions)
beta_dist = GeneralizedBeta(alpha, beta, self.action_bound)
log_probs = beta_dist.log_prob(actions)
entropy = -log_probs.mean(dim=-1)
return log_probs, entropy, V_hat
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
self.eval()
alpha, beta, _ = self(x)
beta_dist = D.Beta(alpha, beta)
action = beta_dist.sample()
self.train()
return action.detach().cpu().numpy()
def make_policy(
representation_dim: int,
action_dim: int,
distribution: str,
hidden_dimensions: List[int],
nonlinearity: str,
num_components: Optional[int] = None,
num_actions: Optional[int] = None,
action_bound: Optional[float] = None,
layernorm: bool = False,
log_param_min: float = -5,
log_param_max: float = 2,
) -> Union[
DiscretePolicy, DiagonalNormalPolicy, DiagonalGMMPolicy, GeneralizedBetaPolicy
]:
# basic config string preprocessing to ensure mapping works later
distribution = _process_str(distribution)
nonlinearity = _process_str(nonlinearity)
if distribution == "discrete":
return DiscretePolicy(
representation_dim=representation_dim,
action_dim=action_dim,
num_actions=cast(int, num_actions),
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
)
elif distribution == "beta":
assert num_components
return GeneralizedBetaPolicy(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=cast(float, action_bound),
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
else:
assert num_components
if 1 < num_components:
return DiagonalGMMPolicy(
representation_dim=representation_dim,
action_dim=action_dim,
num_components=num_components,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
else:
return DiagonalNormalPolicy(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
| true
| true
|
79058f2911553e7e243d913a93293fd27deb9840
| 10,499
|
py
|
Python
|
tests/test_cufft.py
|
ajkxyz/cuda4py
|
3f04dd5d72d64e5bd68dee91de1193a7bb6e8033
|
[
"BSD-2-Clause-FreeBSD"
] | 8
|
2016-03-12T00:36:04.000Z
|
2017-04-17T22:44:11.000Z
|
tests/test_cufft.py
|
Samsung/cuda4py
|
3f04dd5d72d64e5bd68dee91de1193a7bb6e8033
|
[
"BSD-2-Clause-FreeBSD"
] | 2
|
2017-02-12T21:03:57.000Z
|
2020-11-13T13:34:29.000Z
|
tests/test_cufft.py
|
Samsung/cuda4py
|
3f04dd5d72d64e5bd68dee91de1193a7bb6e8033
|
[
"BSD-2-Clause-FreeBSD"
] | 4
|
2016-03-05T04:40:37.000Z
|
2020-02-12T18:37:27.000Z
|
"""
Copyright (c) 2014, Samsung Electronics Co.,Ltd.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of Samsung Electronics Co.,Ltd..
"""
"""
cuda4py - CUDA cffi bindings and helper classes.
URL: https://github.com/ajkxyz/cuda4py
Original author: Alexey Kazantsev <a.kazantsev@samsung.com>
"""
"""
Tests some of the api in cuda4py.cufft package.
"""
import cuda4py as cu
import cuda4py.cufft as cufft
import gc
import logging
import numpy
import os
import unittest
class Test(unittest.TestCase):
def setUp(self):
logging.basicConfig(level=logging.DEBUG)
self.old_env = os.environ.get("CUDA_DEVICE")
if self.old_env is None:
os.environ["CUDA_DEVICE"] = "0"
self.ctx = cu.Devices().create_some_context()
self.path = os.path.dirname(__file__)
if not len(self.path):
self.path = "."
def tearDown(self):
if self.old_env is None:
del os.environ["CUDA_DEVICE"]
else:
os.environ["CUDA_DEVICE"] = self.old_env
del self.old_env
del self.ctx
gc.collect()
def test_constants(self):
self.assertEqual(cufft.CUFFT_SUCCESS, 0)
self.assertEqual(cufft.CUFFT_INVALID_PLAN, 1)
self.assertEqual(cufft.CUFFT_ALLOC_FAILED, 2)
self.assertEqual(cufft.CUFFT_INVALID_TYPE, 3)
self.assertEqual(cufft.CUFFT_INVALID_VALUE, 4)
self.assertEqual(cufft.CUFFT_INTERNAL_ERROR, 5)
self.assertEqual(cufft.CUFFT_EXEC_FAILED, 6)
self.assertEqual(cufft.CUFFT_SETUP_FAILED, 7)
self.assertEqual(cufft.CUFFT_INVALID_SIZE, 8)
self.assertEqual(cufft.CUFFT_UNALIGNED_DATA, 9)
self.assertEqual(cufft.CUFFT_INCOMPLETE_PARAMETER_LIST, 10)
self.assertEqual(cufft.CUFFT_INVALID_DEVICE, 11)
self.assertEqual(cufft.CUFFT_PARSE_ERROR, 12)
self.assertEqual(cufft.CUFFT_NO_WORKSPACE, 13)
self.assertEqual(cufft.CUFFT_R2C, 0x2a)
self.assertEqual(cufft.CUFFT_C2R, 0x2c)
self.assertEqual(cufft.CUFFT_C2C, 0x29)
self.assertEqual(cufft.CUFFT_D2Z, 0x6a)
self.assertEqual(cufft.CUFFT_Z2D, 0x6c)
self.assertEqual(cufft.CUFFT_Z2Z, 0x69)
self.assertEqual(cufft.CUFFT_FORWARD, -1)
self.assertEqual(cufft.CUFFT_INVERSE, 1)
def test_errors(self):
idx = cu.CU.ERRORS[cufft.CUFFT_INVALID_PLAN].find(" | ")
self.assertGreater(idx, 0)
def test_version(self):
fft = cufft.CUFFT(self.ctx)
ver = fft.version
logging.debug("cuFFT version is %d", ver)
self.assertTrue(ver == int(ver))
def test_auto_allocation(self):
fft = cufft.CUFFT(self.ctx)
self.assertTrue(fft.auto_allocation)
fft.auto_allocation = False
self.assertFalse(fft.auto_allocation)
fft.auto_allocation = True
self.assertTrue(fft.auto_allocation)
def test_make_plan_many(self):
fft = cufft.CUFFT(self.ctx)
fft.auto_allocation = False
sz = fft.make_plan_many((256, 128), 8, cufft.CUFFT_C2C)
logging.debug(
"make_plan_many (default layout) for 256x128 x8 returned %d", sz)
logging.debug("size is %d", fft.size)
self.assertEqual(fft.execute, fft.exec_c2c)
fft = cufft.CUFFT(self.ctx)
fft.auto_allocation = False
sz = fft.make_plan_many((256, 128), 8, cufft.CUFFT_C2C,
(256, 128), 1, 256 * 128,
(256, 128), 1, 256 * 128)
logging.debug(
"make_plan_many (tight layout) for 256x128 x8 returned is %d", sz)
logging.debug("size is %d", fft.size)
def _test_exec(self, dtype):
x = numpy.zeros([32, 64], dtype=dtype)
x[:] = numpy.random.rand(x.size).reshape(x.shape) - 0.5
y = numpy.ones((x.shape[0], x.shape[1] // 2 + 1),
dtype={numpy.float32: numpy.complex64,
numpy.float64: numpy.complex128}[dtype])
x_gold = x.copy()
try:
y_gold = numpy.fft.rfft2(x)
except TypeError:
y_gold = None # for pypy
xbuf = cu.MemAlloc(self.ctx, x)
ybuf = cu.MemAlloc(self.ctx, y)
# Forward transform
fft = cufft.CUFFT(self.ctx)
fft.auto_allocation = False
sz = fft.make_plan_many(x.shape, 1,
{numpy.float32: cufft.CUFFT_R2C,
numpy.float64: cufft.CUFFT_D2Z}[dtype])
tmp = cu.MemAlloc(self.ctx, sz)
fft.workarea = tmp
self.assertEqual(fft.workarea, tmp)
self.assertEqual(fft.execute,
{numpy.float32: fft.exec_r2c,
numpy.float64: fft.exec_d2z}[dtype])
fft.execute(xbuf, ybuf)
ybuf.to_host(y)
if y_gold is not None:
delta = y - y_gold
max_diff = numpy.fabs(numpy.sqrt(delta.real * delta.real +
delta.imag * delta.imag)).max()
logging.debug("Forward max_diff is %.6e", max_diff)
self.assertLess(max_diff, {numpy.float32: 1.0e-3,
numpy.float64: 1.0e-6}[dtype])
# Inverse transform
fft = cufft.CUFFT(self.ctx)
fft.auto_allocation = False
sz = fft.make_plan_many(x.shape, 1,
{numpy.float32: cufft.CUFFT_C2R,
numpy.float64: cufft.CUFFT_Z2D}[dtype])
fft.workarea = cu.MemAlloc(self.ctx, sz)
y /= x.size # correct scale before inverting
ybuf.to_device_async(y)
xbuf.memset32_async(0) # reset the resulting vector
self.assertEqual(fft.execute,
{numpy.float32: fft.exec_c2r,
numpy.float64: fft.exec_z2d}[dtype])
fft.execute(ybuf, xbuf)
xbuf.to_host(x)
max_diff = numpy.fabs(x - x_gold).max()
logging.debug("Inverse max_diff is %.6e", max_diff)
self.assertLess(max_diff, {numpy.float32: 1.0e-3,
numpy.float64: 1.0e-6}[dtype])
def test_exec_float(self):
logging.debug("ENTER: test_exec_float")
self._test_exec(numpy.float32)
logging.debug("EXIT: test_exec_float")
def test_exec_double(self):
logging.debug("ENTER: test_exec_double")
self._test_exec(numpy.float64)
logging.debug("EXIT: test_exec_double")
def _test_exec_complex(self, dtype):
x = numpy.zeros([32, 64], dtype=dtype)
x.real = numpy.random.rand(x.size).reshape(x.shape) - 0.5
x.imag = numpy.random.rand(x.size).reshape(x.shape) - 0.5
y = numpy.ones_like(x)
x_gold = x.copy()
try:
y_gold = numpy.fft.fft2(x)
except TypeError:
y_gold = None # for pypy
xbuf = cu.MemAlloc(self.ctx, x)
ybuf = cu.MemAlloc(self.ctx, y)
# Forward transform
fft = cufft.CUFFT(self.ctx)
fft.auto_allocation = False
sz = fft.make_plan_many(x.shape, 1,
{numpy.complex64: cufft.CUFFT_C2C,
numpy.complex128: cufft.CUFFT_Z2Z}[dtype])
tmp = cu.MemAlloc(self.ctx, sz)
fft.workarea = tmp
self.assertEqual(fft.workarea, tmp)
self.assertEqual(fft.execute, {numpy.complex64: fft.exec_c2c,
numpy.complex128: fft.exec_z2z}[dtype])
fft.execute(xbuf, ybuf, cufft.CUFFT_FORWARD)
ybuf.to_host(y)
if y_gold is not None:
delta = y - y_gold
max_diff = numpy.fabs(numpy.sqrt(delta.real * delta.real +
delta.imag * delta.imag)).max()
logging.debug("Forward max_diff is %.6e", max_diff)
self.assertLess(max_diff, {numpy.complex64: 1.0e-3,
numpy.complex128: 1.0e-6}[dtype])
# Inverse transform
y /= x.size # correct scale before inverting
ybuf.to_device_async(y)
xbuf.memset32_async(0) # reset the resulting vector
fft.execute(ybuf, xbuf, cufft.CUFFT_INVERSE)
xbuf.to_host(x)
delta = x - x_gold
max_diff = numpy.fabs(numpy.sqrt(delta.real * delta.real +
delta.imag * delta.imag)).max()
logging.debug("Inverse max_diff is %.6e", max_diff)
self.assertLess(max_diff, {numpy.complex64: 1.0e-3,
numpy.complex128: 1.0e-6}[dtype])
def test_exec_complex_float(self):
logging.debug("ENTER: test_exec_complex_float")
self._test_exec_complex(numpy.complex64)
logging.debug("EXIT: test_exec_complex_float")
def test_exec_complex_double(self):
logging.debug("ENTER: test_exec_complex_double")
self._test_exec_complex(numpy.complex128)
logging.debug("EXIT: test_exec_complex_double")
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
unittest.main()
| 39.768939
| 79
| 0.620916
|
import cuda4py as cu
import cuda4py.cufft as cufft
import gc
import logging
import numpy
import os
import unittest
class Test(unittest.TestCase):
def setUp(self):
logging.basicConfig(level=logging.DEBUG)
self.old_env = os.environ.get("CUDA_DEVICE")
if self.old_env is None:
os.environ["CUDA_DEVICE"] = "0"
self.ctx = cu.Devices().create_some_context()
self.path = os.path.dirname(__file__)
if not len(self.path):
self.path = "."
def tearDown(self):
if self.old_env is None:
del os.environ["CUDA_DEVICE"]
else:
os.environ["CUDA_DEVICE"] = self.old_env
del self.old_env
del self.ctx
gc.collect()
def test_constants(self):
self.assertEqual(cufft.CUFFT_SUCCESS, 0)
self.assertEqual(cufft.CUFFT_INVALID_PLAN, 1)
self.assertEqual(cufft.CUFFT_ALLOC_FAILED, 2)
self.assertEqual(cufft.CUFFT_INVALID_TYPE, 3)
self.assertEqual(cufft.CUFFT_INVALID_VALUE, 4)
self.assertEqual(cufft.CUFFT_INTERNAL_ERROR, 5)
self.assertEqual(cufft.CUFFT_EXEC_FAILED, 6)
self.assertEqual(cufft.CUFFT_SETUP_FAILED, 7)
self.assertEqual(cufft.CUFFT_INVALID_SIZE, 8)
self.assertEqual(cufft.CUFFT_UNALIGNED_DATA, 9)
self.assertEqual(cufft.CUFFT_INCOMPLETE_PARAMETER_LIST, 10)
self.assertEqual(cufft.CUFFT_INVALID_DEVICE, 11)
self.assertEqual(cufft.CUFFT_PARSE_ERROR, 12)
self.assertEqual(cufft.CUFFT_NO_WORKSPACE, 13)
self.assertEqual(cufft.CUFFT_R2C, 0x2a)
self.assertEqual(cufft.CUFFT_C2R, 0x2c)
self.assertEqual(cufft.CUFFT_C2C, 0x29)
self.assertEqual(cufft.CUFFT_D2Z, 0x6a)
self.assertEqual(cufft.CUFFT_Z2D, 0x6c)
self.assertEqual(cufft.CUFFT_Z2Z, 0x69)
self.assertEqual(cufft.CUFFT_FORWARD, -1)
self.assertEqual(cufft.CUFFT_INVERSE, 1)
def test_errors(self):
idx = cu.CU.ERRORS[cufft.CUFFT_INVALID_PLAN].find(" | ")
self.assertGreater(idx, 0)
def test_version(self):
fft = cufft.CUFFT(self.ctx)
ver = fft.version
logging.debug("cuFFT version is %d", ver)
self.assertTrue(ver == int(ver))
def test_auto_allocation(self):
fft = cufft.CUFFT(self.ctx)
self.assertTrue(fft.auto_allocation)
fft.auto_allocation = False
self.assertFalse(fft.auto_allocation)
fft.auto_allocation = True
self.assertTrue(fft.auto_allocation)
def test_make_plan_many(self):
fft = cufft.CUFFT(self.ctx)
fft.auto_allocation = False
sz = fft.make_plan_many((256, 128), 8, cufft.CUFFT_C2C)
logging.debug(
"make_plan_many (default layout) for 256x128 x8 returned %d", sz)
logging.debug("size is %d", fft.size)
self.assertEqual(fft.execute, fft.exec_c2c)
fft = cufft.CUFFT(self.ctx)
fft.auto_allocation = False
sz = fft.make_plan_many((256, 128), 8, cufft.CUFFT_C2C,
(256, 128), 1, 256 * 128,
(256, 128), 1, 256 * 128)
logging.debug(
"make_plan_many (tight layout) for 256x128 x8 returned is %d", sz)
logging.debug("size is %d", fft.size)
def _test_exec(self, dtype):
x = numpy.zeros([32, 64], dtype=dtype)
x[:] = numpy.random.rand(x.size).reshape(x.shape) - 0.5
y = numpy.ones((x.shape[0], x.shape[1] // 2 + 1),
dtype={numpy.float32: numpy.complex64,
numpy.float64: numpy.complex128}[dtype])
x_gold = x.copy()
try:
y_gold = numpy.fft.rfft2(x)
except TypeError:
y_gold = None
xbuf = cu.MemAlloc(self.ctx, x)
ybuf = cu.MemAlloc(self.ctx, y)
fft = cufft.CUFFT(self.ctx)
fft.auto_allocation = False
sz = fft.make_plan_many(x.shape, 1,
{numpy.float32: cufft.CUFFT_R2C,
numpy.float64: cufft.CUFFT_D2Z}[dtype])
tmp = cu.MemAlloc(self.ctx, sz)
fft.workarea = tmp
self.assertEqual(fft.workarea, tmp)
self.assertEqual(fft.execute,
{numpy.float32: fft.exec_r2c,
numpy.float64: fft.exec_d2z}[dtype])
fft.execute(xbuf, ybuf)
ybuf.to_host(y)
if y_gold is not None:
delta = y - y_gold
max_diff = numpy.fabs(numpy.sqrt(delta.real * delta.real +
delta.imag * delta.imag)).max()
logging.debug("Forward max_diff is %.6e", max_diff)
self.assertLess(max_diff, {numpy.float32: 1.0e-3,
numpy.float64: 1.0e-6}[dtype])
fft = cufft.CUFFT(self.ctx)
fft.auto_allocation = False
sz = fft.make_plan_many(x.shape, 1,
{numpy.float32: cufft.CUFFT_C2R,
numpy.float64: cufft.CUFFT_Z2D}[dtype])
fft.workarea = cu.MemAlloc(self.ctx, sz)
y /= x.size
ybuf.to_device_async(y)
xbuf.memset32_async(0)
self.assertEqual(fft.execute,
{numpy.float32: fft.exec_c2r,
numpy.float64: fft.exec_z2d}[dtype])
fft.execute(ybuf, xbuf)
xbuf.to_host(x)
max_diff = numpy.fabs(x - x_gold).max()
logging.debug("Inverse max_diff is %.6e", max_diff)
self.assertLess(max_diff, {numpy.float32: 1.0e-3,
numpy.float64: 1.0e-6}[dtype])
def test_exec_float(self):
logging.debug("ENTER: test_exec_float")
self._test_exec(numpy.float32)
logging.debug("EXIT: test_exec_float")
def test_exec_double(self):
logging.debug("ENTER: test_exec_double")
self._test_exec(numpy.float64)
logging.debug("EXIT: test_exec_double")
def _test_exec_complex(self, dtype):
x = numpy.zeros([32, 64], dtype=dtype)
x.real = numpy.random.rand(x.size).reshape(x.shape) - 0.5
x.imag = numpy.random.rand(x.size).reshape(x.shape) - 0.5
y = numpy.ones_like(x)
x_gold = x.copy()
try:
y_gold = numpy.fft.fft2(x)
except TypeError:
y_gold = None
xbuf = cu.MemAlloc(self.ctx, x)
ybuf = cu.MemAlloc(self.ctx, y)
fft = cufft.CUFFT(self.ctx)
fft.auto_allocation = False
sz = fft.make_plan_many(x.shape, 1,
{numpy.complex64: cufft.CUFFT_C2C,
numpy.complex128: cufft.CUFFT_Z2Z}[dtype])
tmp = cu.MemAlloc(self.ctx, sz)
fft.workarea = tmp
self.assertEqual(fft.workarea, tmp)
self.assertEqual(fft.execute, {numpy.complex64: fft.exec_c2c,
numpy.complex128: fft.exec_z2z}[dtype])
fft.execute(xbuf, ybuf, cufft.CUFFT_FORWARD)
ybuf.to_host(y)
if y_gold is not None:
delta = y - y_gold
max_diff = numpy.fabs(numpy.sqrt(delta.real * delta.real +
delta.imag * delta.imag)).max()
logging.debug("Forward max_diff is %.6e", max_diff)
self.assertLess(max_diff, {numpy.complex64: 1.0e-3,
numpy.complex128: 1.0e-6}[dtype])
y /= x.size
ybuf.to_device_async(y)
xbuf.memset32_async(0)
fft.execute(ybuf, xbuf, cufft.CUFFT_INVERSE)
xbuf.to_host(x)
delta = x - x_gold
max_diff = numpy.fabs(numpy.sqrt(delta.real * delta.real +
delta.imag * delta.imag)).max()
logging.debug("Inverse max_diff is %.6e", max_diff)
self.assertLess(max_diff, {numpy.complex64: 1.0e-3,
numpy.complex128: 1.0e-6}[dtype])
def test_exec_complex_float(self):
logging.debug("ENTER: test_exec_complex_float")
self._test_exec_complex(numpy.complex64)
logging.debug("EXIT: test_exec_complex_float")
def test_exec_complex_double(self):
logging.debug("ENTER: test_exec_complex_double")
self._test_exec_complex(numpy.complex128)
logging.debug("EXIT: test_exec_complex_double")
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
unittest.main()
| true
| true
|
79058f4721ac290d415e6df94c9376327041466d
| 2,470
|
py
|
Python
|
app/core/migrations/0001_initial.py
|
ergauravsoni/final-year-backend
|
473ba9e75101d25f41adfe0b756344ec23fa413c
|
[
"MIT"
] | null | null | null |
app/core/migrations/0001_initial.py
|
ergauravsoni/final-year-backend
|
473ba9e75101d25f41adfe0b756344ec23fa413c
|
[
"MIT"
] | null | null | null |
app/core/migrations/0001_initial.py
|
ergauravsoni/final-year-backend
|
473ba9e75101d25f41adfe0b756344ec23fa413c
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.4 on 2021-07-04 11:51
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Game',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tower_blocks_score', models.IntegerField(default=0)),
('bounce_score', models.IntegerField(default=0)),
('kill_birds_score', models.IntegerField(default=0)),
('snake_score', models.IntegerField(default=0)),
('last_updated', models.DateTimeField(auto_now_add=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 51.458333
| 266
| 0.631174
|
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Game',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tower_blocks_score', models.IntegerField(default=0)),
('bounce_score', models.IntegerField(default=0)),
('kill_birds_score', models.IntegerField(default=0)),
('snake_score', models.IntegerField(default=0)),
('last_updated', models.DateTimeField(auto_now_add=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| true
| true
|
79058f7ac41742757c7d9d6f859988cc9b35f5f5
| 2,369
|
py
|
Python
|
test/test_no_ssl.py
|
balabit-deps/balabit-os-6-python-urllib3
|
03fadded88b3631953f261ca8ed91121ee5383d1
|
[
"MIT"
] | 6
|
2017-10-25T14:19:18.000Z
|
2021-11-15T10:22:21.000Z
|
test/test_no_ssl.py
|
balabit-deps/balabit-os-6-python-urllib3
|
03fadded88b3631953f261ca8ed91121ee5383d1
|
[
"MIT"
] | 2
|
2018-09-04T20:59:45.000Z
|
2018-09-07T09:36:30.000Z
|
test/test_no_ssl.py
|
balabit-deps/balabit-os-6-python-urllib3
|
03fadded88b3631953f261ca8ed91121ee5383d1
|
[
"MIT"
] | 9
|
2017-10-25T14:19:24.000Z
|
2022-01-31T17:09:16.000Z
|
"""
Test what happens if Python was built without SSL
* Everything that does not involve HTTPS should still work
* HTTPS requests must fail with an error that points at the ssl module
"""
import sys
import unittest
class ImportBlocker(object):
"""
Block Imports
To be placed on ``sys.meta_path``. This ensures that the modules
specified cannot be imported, even if they are a builtin.
"""
def __init__(self, *namestoblock):
self.namestoblock = namestoblock
def find_module(self, fullname, path=None):
if fullname in self.namestoblock:
return self
return None
def load_module(self, fullname):
raise ImportError('import of {0} is blocked'.format(fullname))
class ModuleStash(object):
"""
Stashes away previously imported modules
If we reimport a module the data from coverage is lost, so we reuse the old
modules
"""
def __init__(self, namespace, modules=sys.modules):
self.namespace = namespace
self.modules = modules
self._data = {}
def stash(self):
self._data[self.namespace] = self.modules.pop(self.namespace, None)
for module in list(self.modules.keys()):
if module.startswith(self.namespace + '.'):
self._data[module] = self.modules.pop(module)
def pop(self):
self.modules.pop(self.namespace, None)
for module in list(self.modules.keys()):
if module.startswith(self.namespace + '.'):
self.modules.pop(module)
self.modules.update(self._data)
ssl_blocker = ImportBlocker('ssl', '_ssl')
module_stash = ModuleStash('urllib3')
class TestWithoutSSL(unittest.TestCase):
def setUp(self):
sys.modules.pop('ssl', None)
sys.modules.pop('_ssl', None)
module_stash.stash()
sys.meta_path.insert(0, ssl_blocker)
def tearDown(self):
sys.meta_path.remove(ssl_blocker)
module_stash.pop()
class TestImportWithoutSSL(TestWithoutSSL):
def test_cannot_import_ssl(self):
# python26 has neither contextmanagers (for assertRaises) nor
# importlib.
# 'import' inside 'lambda' is invalid syntax.
def import_ssl():
import ssl
self.assertRaises(ImportError, import_ssl)
def test_import_urllib3(self):
import urllib3
| 26.322222
| 79
| 0.653018
|
import sys
import unittest
class ImportBlocker(object):
def __init__(self, *namestoblock):
self.namestoblock = namestoblock
def find_module(self, fullname, path=None):
if fullname in self.namestoblock:
return self
return None
def load_module(self, fullname):
raise ImportError('import of {0} is blocked'.format(fullname))
class ModuleStash(object):
def __init__(self, namespace, modules=sys.modules):
self.namespace = namespace
self.modules = modules
self._data = {}
def stash(self):
self._data[self.namespace] = self.modules.pop(self.namespace, None)
for module in list(self.modules.keys()):
if module.startswith(self.namespace + '.'):
self._data[module] = self.modules.pop(module)
def pop(self):
self.modules.pop(self.namespace, None)
for module in list(self.modules.keys()):
if module.startswith(self.namespace + '.'):
self.modules.pop(module)
self.modules.update(self._data)
ssl_blocker = ImportBlocker('ssl', '_ssl')
module_stash = ModuleStash('urllib3')
class TestWithoutSSL(unittest.TestCase):
def setUp(self):
sys.modules.pop('ssl', None)
sys.modules.pop('_ssl', None)
module_stash.stash()
sys.meta_path.insert(0, ssl_blocker)
def tearDown(self):
sys.meta_path.remove(ssl_blocker)
module_stash.pop()
class TestImportWithoutSSL(TestWithoutSSL):
def test_cannot_import_ssl(self):
def import_ssl():
import ssl
self.assertRaises(ImportError, import_ssl)
def test_import_urllib3(self):
import urllib3
| true
| true
|
79058f9802c3b5bba3732dfd902a4855f279dbaa
| 297
|
py
|
Python
|
py25/bacpypes/service/test.py
|
amih90/bacpypes
|
27ab4f18aa252ceb6ffdc32d53af2995a2e92647
|
[
"MIT"
] | 240
|
2015-07-17T16:27:54.000Z
|
2022-03-29T13:53:06.000Z
|
py25/bacpypes/service/test.py
|
amih90/bacpypes
|
27ab4f18aa252ceb6ffdc32d53af2995a2e92647
|
[
"MIT"
] | 400
|
2015-07-23T05:37:52.000Z
|
2022-03-29T12:32:30.000Z
|
py25/bacpypes/service/test.py
|
amih90/bacpypes
|
27ab4f18aa252ceb6ffdc32d53af2995a2e92647
|
[
"MIT"
] | 143
|
2015-07-17T18:22:27.000Z
|
2022-03-22T01:21:24.000Z
|
#!/usr/bin/env python
"""
Test Service
"""
from ..debugging import bacpypes_debugging, ModuleLogger
# some debugging
_debug = 0
_log = ModuleLogger(globals())
def some_function(*args):
if _debug: some_function._debug("f %r", args)
return args[0] + 1
bacpypes_debugging(some_function)
| 16.5
| 56
| 0.720539
|
from ..debugging import bacpypes_debugging, ModuleLogger
_debug = 0
_log = ModuleLogger(globals())
def some_function(*args):
if _debug: some_function._debug("f %r", args)
return args[0] + 1
bacpypes_debugging(some_function)
| true
| true
|
7905910cd437177895c7ea56ca90edf0ff9764a1
| 2,668
|
py
|
Python
|
qtensor/optimisation/RGreedy.py
|
marwahaha/QTensor
|
936d078825a6418f9d32d2c176332422d8a4c137
|
[
"BSD-3-Clause"
] | 20
|
2020-09-08T20:32:44.000Z
|
2022-03-18T11:27:57.000Z
|
qtensor/optimisation/RGreedy.py
|
marwahaha/QTensor
|
936d078825a6418f9d32d2c176332422d8a4c137
|
[
"BSD-3-Clause"
] | 21
|
2020-10-09T04:44:48.000Z
|
2021-10-05T03:32:35.000Z
|
qtensor/optimisation/RGreedy.py
|
marwahaha/QTensor
|
936d078825a6418f9d32d2c176332422d8a4c137
|
[
"BSD-3-Clause"
] | 4
|
2020-12-18T01:37:10.000Z
|
2021-07-26T21:24:20.000Z
|
import numpy as np
import copy, operator
from qtensor.optimisation.Optimizer import OrderingOptimizer
from qtensor import utils
from functools import reduce
import networkx as nx
import qtree
def reducelist(f, lst, x=0):
prev = x
for i in lst:
prev = f(prev, i)
yield prev
class RGreedyOptimizer(OrderingOptimizer):
"""
An orderer that greedy selects vertices
using boltzman probabilities.
"""
def __init__(self, *args, temp=0.002, repeats=10, **kwargs):
super().__init__(*args, **kwargs)
self.temp = temp
self.repeats = repeats
def _get_ordering(self, graph, **kwargs):
node_names = nx.get_node_attributes(graph, 'name')
node_sizes = nx.get_node_attributes(graph, 'size')
peo, path = self._get_ordering_ints(graph)
peo = [qtree.optimizer.Var(var, size=node_sizes[var],
name=node_names[var])
for var in peo]
#print('tw=', max(path))
return peo, path
def _get_ordering_ints(self, old_graph, free_vars=[]):
best_peo = None
best_width = np.inf
best_widths = None
for i in range(self.repeats):
graph = copy.deepcopy(old_graph)
peo = []
widths = []
while graph.number_of_nodes():
ngs = np.array(list(
map(len, map(operator.itemgetter(1), graph.adjacency()))
))
weights = np.exp(-(ngs - np.min(ngs))/self.temp)
#print(ngs)
#print(weights)
# 1, 3, 5, 2, 1
distrib = np.array([0]+list(reducelist(lambda x, y:x+y, weights, 0)))
#print(distrib)
# 0, 1, 4, 9, 11, 12
rnd = np.random.random()*distrib[-1]
# between 0 and 12 = say, 5
# find the smallest value that larger than rnd
bool_map = distrib < rnd
# True, True, True, False, False, False
select_map = bool_map[1:] ^ bool_map[:-1]
selected_elem = np.array(list(graph.nodes))[select_map]
assert len(selected_elem)==1, 'Error in algorithm, please submit an issue'
selected_node = selected_elem[0]
utils.eliminate_node_no_structure(graph, selected_node)
peo.append(int(selected_node))
widths.append(int(ngs[select_map][0]))
if max(widths) < best_width:
best_peo = peo
best_widths = widths
best_width = max(widths)
return best_peo, best_widths
| 34.205128
| 90
| 0.552099
|
import numpy as np
import copy, operator
from qtensor.optimisation.Optimizer import OrderingOptimizer
from qtensor import utils
from functools import reduce
import networkx as nx
import qtree
def reducelist(f, lst, x=0):
prev = x
for i in lst:
prev = f(prev, i)
yield prev
class RGreedyOptimizer(OrderingOptimizer):
def __init__(self, *args, temp=0.002, repeats=10, **kwargs):
super().__init__(*args, **kwargs)
self.temp = temp
self.repeats = repeats
def _get_ordering(self, graph, **kwargs):
node_names = nx.get_node_attributes(graph, 'name')
node_sizes = nx.get_node_attributes(graph, 'size')
peo, path = self._get_ordering_ints(graph)
peo = [qtree.optimizer.Var(var, size=node_sizes[var],
name=node_names[var])
for var in peo]
return peo, path
def _get_ordering_ints(self, old_graph, free_vars=[]):
best_peo = None
best_width = np.inf
best_widths = None
for i in range(self.repeats):
graph = copy.deepcopy(old_graph)
peo = []
widths = []
while graph.number_of_nodes():
ngs = np.array(list(
map(len, map(operator.itemgetter(1), graph.adjacency()))
))
weights = np.exp(-(ngs - np.min(ngs))/self.temp)
distrib = np.array([0]+list(reducelist(lambda x, y:x+y, weights, 0)))
rnd = np.random.random()*distrib[-1]
bool_map = distrib < rnd
select_map = bool_map[1:] ^ bool_map[:-1]
selected_elem = np.array(list(graph.nodes))[select_map]
assert len(selected_elem)==1, 'Error in algorithm, please submit an issue'
selected_node = selected_elem[0]
utils.eliminate_node_no_structure(graph, selected_node)
peo.append(int(selected_node))
widths.append(int(ngs[select_map][0]))
if max(widths) < best_width:
best_peo = peo
best_widths = widths
best_width = max(widths)
return best_peo, best_widths
| true
| true
|
790591255c8418d80b1a9a3e1c9688f36153f42d
| 10,225
|
py
|
Python
|
env/lib/python3.8/site-packages/plotly/graph_objs/scatter3d/_textfont.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11,750
|
2015-10-12T07:03:39.000Z
|
2022-03-31T20:43:15.000Z
|
env/lib/python3.8/site-packages/plotly/graph_objs/scatter3d/_textfont.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 2,951
|
2015-10-12T00:41:25.000Z
|
2022-03-31T22:19:26.000Z
|
env/lib/python3.8/site-packages/plotly/graph_objs/scatter3d/_textfont.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 2,623
|
2015-10-15T14:40:27.000Z
|
2022-03-28T16:05:50.000Z
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Textfont(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scatter3d"
_path_str = "scatter3d.textfont"
_valid_props = {"color", "colorsrc", "family", "size", "sizesrc"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for size .
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
sizesrc
Sets the source reference on Chart Studio Cloud for
size .
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
size=None,
sizesrc=None,
**kwargs
):
"""
Construct a new Textfont object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatter3d.Textfont`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
sizesrc
Sets the source reference on Chart Studio Cloud for
size .
Returns
-------
Textfont
"""
super(Textfont, self).__init__("textfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatter3d.Textfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatter3d.Textfont`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("sizesrc", None)
_v = sizesrc if sizesrc is not None else _v
if _v is not None:
self["sizesrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 34.427609
| 82
| 0.557653
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Textfont(_BaseTraceHierarchyType):
_parent_path_str = "scatter3d"
_path_str = "scatter3d.textfont"
_valid_props = {"color", "colorsrc", "family", "size", "sizesrc"}
@property
def color(self):
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def colorsrc(self):
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
@property
def family(self):
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def size(self):
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def sizesrc(self):
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
sizesrc
Sets the source reference on Chart Studio Cloud for
size .
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
size=None,
sizesrc=None,
**kwargs
):
super(Textfont, self).__init__("textfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatter3d.Textfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatter3d.Textfont`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("sizesrc", None)
_v = sizesrc if sizesrc is not None else _v
if _v is not None:
self["sizesrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| true
| true
|
7905919d08d1078de9d56ab3d9bf43e0af85b7de
| 3,582
|
py
|
Python
|
conda/_vendor/auxlib/collection.py
|
peschue/conda
|
dc25e8c8765c5dfd1f99d697617bc6148224e194
|
[
"BSD-3-Clause"
] | 1
|
2018-12-21T22:11:55.000Z
|
2018-12-21T22:11:55.000Z
|
conda/_vendor/auxlib/collection.py
|
peschue/conda
|
dc25e8c8765c5dfd1f99d697617bc6148224e194
|
[
"BSD-3-Clause"
] | 1
|
2019-04-02T23:35:13.000Z
|
2019-04-02T23:35:13.000Z
|
conda/_vendor/auxlib/collection.py
|
peschue/conda
|
dc25e8c8765c5dfd1f99d697617bc6148224e194
|
[
"BSD-3-Clause"
] | 2
|
2018-03-02T19:55:14.000Z
|
2019-02-14T22:37:28.000Z
|
# -*- coding: utf-8 -*-
"""Common collection classes."""
from __future__ import print_function, division, absolute_import
from functools import reduce
from collections import Mapping, Set
from .compat import isiterable, iteritems, odict, text_type
def make_immutable(value):
# this function is recursive, and if nested data structures fold back on themselves,
# there will likely be recursion errors
if isinstance(value, Mapping):
if isinstance(value, frozendict):
return value
return frozendict((k, make_immutable(v)) for k, v in iteritems(value))
elif isinstance(value, Set):
if isinstance(value, frozenset):
return value
return frozenset(make_immutable(v) for v in value)
elif isiterable(value):
if isinstance(value, tuple):
return value
return tuple(make_immutable(v) for v in value)
else:
return value
# http://stackoverflow.com/a/14620633/2127762
class AttrDict(dict):
"""Sub-classes dict, and further allows attribute-like access to dictionary items.
Examples:
>>> d = AttrDict({'a': 1})
>>> d.a, d['a'], d.get('a')
(1, 1, 1)
>>> d.b = 2
>>> d.b, d['b']
(2, 2)
"""
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
class frozendict(odict):
def __key(self):
return tuple((k, self[k]) for k in sorted(self))
def __hash__(self):
return hash(self.__key())
def __eq__(self, other):
try:
return self.__key() == other.__key()
except AttributeError:
if isinstance(other, Mapping):
return self.__key() == frozendict(other).__key()
return False
def first(seq, key=lambda x: bool(x), default=None, apply=lambda x: x):
"""Give the first value that satisfies the key test.
Args:
seq (iterable):
key (callable): test for each element of iterable
default: returned when all elements fail test
apply (callable): applied to element before return, but not to default value
Returns: first element in seq that passes key, mutated with optional apply
Examples:
>>> first([0, False, None, [], (), 42])
42
>>> first([0, False, None, [], ()]) is None
True
>>> first([0, False, None, [], ()], default='ohai')
'ohai'
>>> import re
>>> m = first(re.match(regex, 'abc') for regex in ['b.*', 'a(.*)'])
>>> m.group(1)
'bc'
The optional `key` argument specifies a one-argument predicate function
like that used for `filter()`. The `key` argument, if supplied, must be
in keyword form. For example:
>>> first([1, 1, 3, 4, 5], key=lambda x: x % 2 == 0)
4
"""
return next((apply(x) for x in seq if key(x)), default() if callable(default) else default)
def firstitem(map, key=lambda k, v: bool(k), default=None, apply=lambda k, v: (k, v)):
return next((apply(k, v) for k, v in map if key(k, v)), default)
def last(seq, key=lambda x: bool(x), default=None, apply=lambda x: x):
return next((apply(x) for x in reversed(seq) if key(x)), default)
def call_each(seq):
"""Calls each element of sequence to invoke the side effect.
Args:
seq:
Returns: None
"""
try:
reduce(lambda _, y: y(), seq)
except TypeError as e:
if text_type(e) != "reduce() of empty sequence with no initial value":
raise
| 30.355932
| 95
| 0.595757
|
from __future__ import print_function, division, absolute_import
from functools import reduce
from collections import Mapping, Set
from .compat import isiterable, iteritems, odict, text_type
def make_immutable(value):
if isinstance(value, Mapping):
if isinstance(value, frozendict):
return value
return frozendict((k, make_immutable(v)) for k, v in iteritems(value))
elif isinstance(value, Set):
if isinstance(value, frozenset):
return value
return frozenset(make_immutable(v) for v in value)
elif isiterable(value):
if isinstance(value, tuple):
return value
return tuple(make_immutable(v) for v in value)
else:
return value
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
class frozendict(odict):
def __key(self):
return tuple((k, self[k]) for k in sorted(self))
def __hash__(self):
return hash(self.__key())
def __eq__(self, other):
try:
return self.__key() == other.__key()
except AttributeError:
if isinstance(other, Mapping):
return self.__key() == frozendict(other).__key()
return False
def first(seq, key=lambda x: bool(x), default=None, apply=lambda x: x):
return next((apply(x) for x in seq if key(x)), default() if callable(default) else default)
def firstitem(map, key=lambda k, v: bool(k), default=None, apply=lambda k, v: (k, v)):
return next((apply(k, v) for k, v in map if key(k, v)), default)
def last(seq, key=lambda x: bool(x), default=None, apply=lambda x: x):
return next((apply(x) for x in reversed(seq) if key(x)), default)
def call_each(seq):
try:
reduce(lambda _, y: y(), seq)
except TypeError as e:
if text_type(e) != "reduce() of empty sequence with no initial value":
raise
| true
| true
|
7905920f96a2533d5e180884ee6a4c005481232a
| 15,464
|
py
|
Python
|
plugins/modules/oci_vault_secret_actions.py
|
hanielburton/oci-ansible-collection
|
dfdffde637f746d346ba35569be8c3a3407022f2
|
[
"Apache-2.0"
] | null | null | null |
plugins/modules/oci_vault_secret_actions.py
|
hanielburton/oci-ansible-collection
|
dfdffde637f746d346ba35569be8c3a3407022f2
|
[
"Apache-2.0"
] | null | null | null |
plugins/modules/oci_vault_secret_actions.py
|
hanielburton/oci-ansible-collection
|
dfdffde637f746d346ba35569be8c3a3407022f2
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# Copyright (c) 2017, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_vault_secret_actions
short_description: Perform actions on a Secret resource in Oracle Cloud Infrastructure
description:
- Perform actions on a Secret resource in Oracle Cloud Infrastructure
- For I(action=cancel_secret_deletion), cancels the pending deletion of the specified secret. Canceling
a scheduled deletion restores the secret's lifecycle state to what
it was before you scheduled the secret for deletion.
- For I(action=schedule_secret_deletion), schedules the deletion of the specified secret. This sets the lifecycle state of the secret
to `PENDING_DELETION` and then deletes it after the specified retention period ends.
version_added: "2.9"
author: Oracle (@oracle)
options:
secret_id:
description:
- The OCID of the secret.
type: str
aliases: ["id"]
required: true
time_of_deletion:
description:
- An optional property indicating when to delete the secret version, expressed in L(RFC 3339,https://tools.ietf.org/html/rfc3339) timestamp format.
- Applicable only for I(action=schedule_secret_deletion).
type: str
action:
description:
- The action to perform on the Secret.
type: str
required: true
choices:
- "cancel_secret_deletion"
- "schedule_secret_deletion"
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: Perform action cancel_secret_deletion on secret
oci_vault_secret_actions:
secret_id: ocid1.secret.oc1..xxxxxxEXAMPLExxxxxx
action: cancel_secret_deletion
- name: Perform action schedule_secret_deletion on secret
oci_vault_secret_actions:
time_of_deletion: 2018-04-03T21:10:29.600Z
secret_id: ocid1.secret.oc1..xxxxxxEXAMPLExxxxxx
action: schedule_secret_deletion
"""
RETURN = """
secret:
description:
- Details of the Secret resource acted upon by the current operation
returned: on success
type: complex
contains:
compartment_id:
description:
- The OCID of the compartment where you want to create the secret.
returned: on success
type: string
sample: ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx
current_version_number:
description:
- The version number of the secret version that's currently in use.
returned: on success
type: int
sample: 56
defined_tags:
description:
- "Defined tags for this resource. Each key is predefined and scoped to a namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`"
returned: on success
type: dict
sample: {'Operations': {'CostCenter': 'US'}}
description:
description:
- A brief description of the secret. Avoid entering confidential information.
returned: on success
type: string
sample: description_example
freeform_tags:
description:
- "Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
Example: `{\\"Department\\": \\"Finance\\"}`"
returned: on success
type: dict
sample: {'Department': 'Finance'}
id:
description:
- The OCID of the secret.
returned: on success
type: string
sample: ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx
key_id:
description:
- The OCID of the master encryption key that is used to encrypt the secret.
returned: on success
type: string
sample: ocid1.key.oc1..xxxxxxEXAMPLExxxxxx
lifecycle_details:
description:
- Additional information about the current lifecycle state of the secret.
returned: on success
type: string
sample: lifecycle_details_example
lifecycle_state:
description:
- The current lifecycle state of the secret.
returned: on success
type: string
sample: CREATING
metadata:
description:
- Additional metadata that you can use to provide context about how to use the secret or during rotation or
other administrative tasks. For example, for a secret that you use to connect to a database, the additional
metadata might specify the connection endpoint and the connection string. Provide additional metadata as key-value pairs.
returned: on success
type: dict
sample: {}
secret_name:
description:
- The user-friendly name of the secret. Avoid entering confidential information.
returned: on success
type: string
sample: secret_name_example
secret_rules:
description:
- A list of rules that control how the secret is used and managed.
returned: on success
type: complex
contains:
rule_type:
description:
- The type of rule, which either controls when the secret contents expire or whether they can be reused.
returned: on success
type: string
sample: SECRET_EXPIRY_RULE
secret_version_expiry_interval:
description:
- A property indicating how long the secret contents will be considered valid, expressed in
L(ISO 8601,https://en.wikipedia.org/wiki/ISO_8601#Time_intervals) format. The secret needs to be
updated when the secret content expires. No enforcement mechanism exists at this time, but audit logs
record the expiration on the appropriate date, according to the time interval specified in the rule.
The timer resets after you update the secret contents.
The minimum value is 1 day and the maximum value is 90 days for this property. Currently, only intervals expressed in days are
supported.
For example, pass `P3D` to have the secret version expire every 3 days.
returned: on success
type: string
sample: secret_version_expiry_interval_example
time_of_absolute_expiry:
description:
- "An optional property indicating the absolute time when this secret will expire, expressed in L(RFC
3339,https://tools.ietf.org/html/rfc3339) timestamp format.
The minimum number of days from current time is 1 day and the maximum number of days from current time is 365 days.
Example: `2019-04-03T21:10:29.600Z`"
returned: on success
type: string
sample: 2019-04-03T21:10:29.600Z
is_secret_content_retrieval_blocked_on_expiry:
description:
- A property indicating whether to block retrieval of the secret content, on expiry. The default is false.
If the secret has already expired and you would like to retrieve the secret contents,
you need to edit the secret rule to disable this property, to allow reading the secret content.
returned: on success
type: bool
sample: true
is_enforced_on_deleted_secret_versions:
description:
- A property indicating whether the rule is applied even if the secret version with the content you are trying to reuse was deleted.
returned: on success
type: bool
sample: true
time_created:
description:
- "A property indicating when the secret was created, expressed in L(RFC 3339,https://tools.ietf.org/html/rfc3339) timestamp format.
Example: `2019-04-03T21:10:29.600Z`"
returned: on success
type: string
sample: 2019-04-03T21:10:29.600Z
time_of_current_version_expiry:
description:
- "An optional property indicating when the current secret version will expire, expressed in L(RFC 3339,https://tools.ietf.org/html/rfc3339)
timestamp format.
Example: `2019-04-03T21:10:29.600Z`"
returned: on success
type: string
sample: 2019-04-03T21:10:29.600Z
time_of_deletion:
description:
- "An optional property indicating when to delete the secret, expressed in L(RFC 3339,https://tools.ietf.org/html/rfc3339) timestamp format.
Example: `2019-04-03T21:10:29.600Z`"
returned: on success
type: string
sample: 2019-04-03T21:10:29.600Z
vault_id:
description:
- The OCID of the vault where the secret exists.
returned: on success
type: string
sample: ocid1.vault.oc1..xxxxxxEXAMPLExxxxxx
sample: {
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"current_version_number": 56,
"defined_tags": {'Operations': {'CostCenter': 'US'}},
"description": "description_example",
"freeform_tags": {'Department': 'Finance'},
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"key_id": "ocid1.key.oc1..xxxxxxEXAMPLExxxxxx",
"lifecycle_details": "lifecycle_details_example",
"lifecycle_state": "CREATING",
"metadata": {},
"secret_name": "secret_name_example",
"secret_rules": [{
"rule_type": "SECRET_EXPIRY_RULE",
"secret_version_expiry_interval": "secret_version_expiry_interval_example",
"time_of_absolute_expiry": "2019-04-03T21:10:29.600Z",
"is_secret_content_retrieval_blocked_on_expiry": true,
"is_enforced_on_deleted_secret_versions": true
}],
"time_created": "2019-04-03T21:10:29.600Z",
"time_of_current_version_expiry": "2019-04-03T21:10:29.600Z",
"time_of_deletion": "2019-04-03T21:10:29.600Z",
"vault_id": "ocid1.vault.oc1..xxxxxxEXAMPLExxxxxx"
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import (
oci_common_utils,
oci_wait_utils,
)
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIActionsHelperBase,
get_custom_class,
)
try:
from oci.vault import VaultsClient
from oci.vault.models import ScheduleSecretDeletionDetails
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class SecretActionsHelperGen(OCIActionsHelperBase):
"""
Supported actions:
cancel_secret_deletion
schedule_secret_deletion
"""
@staticmethod
def get_module_resource_id_param():
return "secret_id"
def get_module_resource_id(self):
return self.module.params.get("secret_id")
def get_get_fn(self):
return self.client.get_secret
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_secret, secret_id=self.module.params.get("secret_id"),
)
def cancel_secret_deletion(self):
return oci_wait_utils.call_and_wait(
call_fn=self.client.cancel_secret_deletion,
call_fn_args=(),
call_fn_kwargs=dict(secret_id=self.module.params.get("secret_id"),),
waiter_type=oci_wait_utils.NONE_WAITER_KEY,
operation="{0}_{1}".format(
self.module.params.get("action").upper(),
oci_common_utils.ACTION_OPERATION_KEY,
),
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_action_desired_states(
self.module.params.get("action")
),
)
def schedule_secret_deletion(self):
action_details = oci_common_utils.convert_input_data_to_model_class(
self.module.params, ScheduleSecretDeletionDetails
)
return oci_wait_utils.call_and_wait(
call_fn=self.client.schedule_secret_deletion,
call_fn_args=(),
call_fn_kwargs=dict(
secret_id=self.module.params.get("secret_id"),
schedule_secret_deletion_details=action_details,
),
waiter_type=oci_wait_utils.NONE_WAITER_KEY,
operation="{0}_{1}".format(
self.module.params.get("action").upper(),
oci_common_utils.ACTION_OPERATION_KEY,
),
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_action_desired_states(
self.module.params.get("action")
),
)
SecretActionsHelperCustom = get_custom_class("SecretActionsHelperCustom")
class ResourceHelper(SecretActionsHelperCustom, SecretActionsHelperGen):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec(
supports_create=False, supports_wait=False
)
module_args.update(
dict(
secret_id=dict(aliases=["id"], type="str", required=True),
time_of_deletion=dict(type="str"),
action=dict(
type="str",
required=True,
choices=["cancel_secret_deletion", "schedule_secret_deletion"],
),
)
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_helper = ResourceHelper(
module=module,
resource_type="secret",
service_client_class=VaultsClient,
namespace="vault",
)
result = resource_helper.perform_action(module.params.get("action"))
module.exit_json(**result)
if __name__ == "__main__":
main()
| 41.237333
| 159
| 0.620215
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_vault_secret_actions
short_description: Perform actions on a Secret resource in Oracle Cloud Infrastructure
description:
- Perform actions on a Secret resource in Oracle Cloud Infrastructure
- For I(action=cancel_secret_deletion), cancels the pending deletion of the specified secret. Canceling
a scheduled deletion restores the secret's lifecycle state to what
it was before you scheduled the secret for deletion.
- For I(action=schedule_secret_deletion), schedules the deletion of the specified secret. This sets the lifecycle state of the secret
to `PENDING_DELETION` and then deletes it after the specified retention period ends.
version_added: "2.9"
author: Oracle (@oracle)
options:
secret_id:
description:
- The OCID of the secret.
type: str
aliases: ["id"]
required: true
time_of_deletion:
description:
- An optional property indicating when to delete the secret version, expressed in L(RFC 3339,https://tools.ietf.org/html/rfc3339) timestamp format.
- Applicable only for I(action=schedule_secret_deletion).
type: str
action:
description:
- The action to perform on the Secret.
type: str
required: true
choices:
- "cancel_secret_deletion"
- "schedule_secret_deletion"
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: Perform action cancel_secret_deletion on secret
oci_vault_secret_actions:
secret_id: ocid1.secret.oc1..xxxxxxEXAMPLExxxxxx
action: cancel_secret_deletion
- name: Perform action schedule_secret_deletion on secret
oci_vault_secret_actions:
time_of_deletion: 2018-04-03T21:10:29.600Z
secret_id: ocid1.secret.oc1..xxxxxxEXAMPLExxxxxx
action: schedule_secret_deletion
"""
RETURN = """
secret:
description:
- Details of the Secret resource acted upon by the current operation
returned: on success
type: complex
contains:
compartment_id:
description:
- The OCID of the compartment where you want to create the secret.
returned: on success
type: string
sample: ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx
current_version_number:
description:
- The version number of the secret version that's currently in use.
returned: on success
type: int
sample: 56
defined_tags:
description:
- "Defined tags for this resource. Each key is predefined and scoped to a namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`"
returned: on success
type: dict
sample: {'Operations': {'CostCenter': 'US'}}
description:
description:
- A brief description of the secret. Avoid entering confidential information.
returned: on success
type: string
sample: description_example
freeform_tags:
description:
- "Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
Example: `{\\"Department\\": \\"Finance\\"}`"
returned: on success
type: dict
sample: {'Department': 'Finance'}
id:
description:
- The OCID of the secret.
returned: on success
type: string
sample: ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx
key_id:
description:
- The OCID of the master encryption key that is used to encrypt the secret.
returned: on success
type: string
sample: ocid1.key.oc1..xxxxxxEXAMPLExxxxxx
lifecycle_details:
description:
- Additional information about the current lifecycle state of the secret.
returned: on success
type: string
sample: lifecycle_details_example
lifecycle_state:
description:
- The current lifecycle state of the secret.
returned: on success
type: string
sample: CREATING
metadata:
description:
- Additional metadata that you can use to provide context about how to use the secret or during rotation or
other administrative tasks. For example, for a secret that you use to connect to a database, the additional
metadata might specify the connection endpoint and the connection string. Provide additional metadata as key-value pairs.
returned: on success
type: dict
sample: {}
secret_name:
description:
- The user-friendly name of the secret. Avoid entering confidential information.
returned: on success
type: string
sample: secret_name_example
secret_rules:
description:
- A list of rules that control how the secret is used and managed.
returned: on success
type: complex
contains:
rule_type:
description:
- The type of rule, which either controls when the secret contents expire or whether they can be reused.
returned: on success
type: string
sample: SECRET_EXPIRY_RULE
secret_version_expiry_interval:
description:
- A property indicating how long the secret contents will be considered valid, expressed in
L(ISO 8601,https://en.wikipedia.org/wiki/ISO_8601#Time_intervals) format. The secret needs to be
updated when the secret content expires. No enforcement mechanism exists at this time, but audit logs
record the expiration on the appropriate date, according to the time interval specified in the rule.
The timer resets after you update the secret contents.
The minimum value is 1 day and the maximum value is 90 days for this property. Currently, only intervals expressed in days are
supported.
For example, pass `P3D` to have the secret version expire every 3 days.
returned: on success
type: string
sample: secret_version_expiry_interval_example
time_of_absolute_expiry:
description:
- "An optional property indicating the absolute time when this secret will expire, expressed in L(RFC
3339,https://tools.ietf.org/html/rfc3339) timestamp format.
The minimum number of days from current time is 1 day and the maximum number of days from current time is 365 days.
Example: `2019-04-03T21:10:29.600Z`"
returned: on success
type: string
sample: 2019-04-03T21:10:29.600Z
is_secret_content_retrieval_blocked_on_expiry:
description:
- A property indicating whether to block retrieval of the secret content, on expiry. The default is false.
If the secret has already expired and you would like to retrieve the secret contents,
you need to edit the secret rule to disable this property, to allow reading the secret content.
returned: on success
type: bool
sample: true
is_enforced_on_deleted_secret_versions:
description:
- A property indicating whether the rule is applied even if the secret version with the content you are trying to reuse was deleted.
returned: on success
type: bool
sample: true
time_created:
description:
- "A property indicating when the secret was created, expressed in L(RFC 3339,https://tools.ietf.org/html/rfc3339) timestamp format.
Example: `2019-04-03T21:10:29.600Z`"
returned: on success
type: string
sample: 2019-04-03T21:10:29.600Z
time_of_current_version_expiry:
description:
- "An optional property indicating when the current secret version will expire, expressed in L(RFC 3339,https://tools.ietf.org/html/rfc3339)
timestamp format.
Example: `2019-04-03T21:10:29.600Z`"
returned: on success
type: string
sample: 2019-04-03T21:10:29.600Z
time_of_deletion:
description:
- "An optional property indicating when to delete the secret, expressed in L(RFC 3339,https://tools.ietf.org/html/rfc3339) timestamp format.
Example: `2019-04-03T21:10:29.600Z`"
returned: on success
type: string
sample: 2019-04-03T21:10:29.600Z
vault_id:
description:
- The OCID of the vault where the secret exists.
returned: on success
type: string
sample: ocid1.vault.oc1..xxxxxxEXAMPLExxxxxx
sample: {
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"current_version_number": 56,
"defined_tags": {'Operations': {'CostCenter': 'US'}},
"description": "description_example",
"freeform_tags": {'Department': 'Finance'},
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"key_id": "ocid1.key.oc1..xxxxxxEXAMPLExxxxxx",
"lifecycle_details": "lifecycle_details_example",
"lifecycle_state": "CREATING",
"metadata": {},
"secret_name": "secret_name_example",
"secret_rules": [{
"rule_type": "SECRET_EXPIRY_RULE",
"secret_version_expiry_interval": "secret_version_expiry_interval_example",
"time_of_absolute_expiry": "2019-04-03T21:10:29.600Z",
"is_secret_content_retrieval_blocked_on_expiry": true,
"is_enforced_on_deleted_secret_versions": true
}],
"time_created": "2019-04-03T21:10:29.600Z",
"time_of_current_version_expiry": "2019-04-03T21:10:29.600Z",
"time_of_deletion": "2019-04-03T21:10:29.600Z",
"vault_id": "ocid1.vault.oc1..xxxxxxEXAMPLExxxxxx"
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import (
oci_common_utils,
oci_wait_utils,
)
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIActionsHelperBase,
get_custom_class,
)
try:
from oci.vault import VaultsClient
from oci.vault.models import ScheduleSecretDeletionDetails
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class SecretActionsHelperGen(OCIActionsHelperBase):
@staticmethod
def get_module_resource_id_param():
return "secret_id"
def get_module_resource_id(self):
return self.module.params.get("secret_id")
def get_get_fn(self):
return self.client.get_secret
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_secret, secret_id=self.module.params.get("secret_id"),
)
def cancel_secret_deletion(self):
return oci_wait_utils.call_and_wait(
call_fn=self.client.cancel_secret_deletion,
call_fn_args=(),
call_fn_kwargs=dict(secret_id=self.module.params.get("secret_id"),),
waiter_type=oci_wait_utils.NONE_WAITER_KEY,
operation="{0}_{1}".format(
self.module.params.get("action").upper(),
oci_common_utils.ACTION_OPERATION_KEY,
),
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_action_desired_states(
self.module.params.get("action")
),
)
def schedule_secret_deletion(self):
action_details = oci_common_utils.convert_input_data_to_model_class(
self.module.params, ScheduleSecretDeletionDetails
)
return oci_wait_utils.call_and_wait(
call_fn=self.client.schedule_secret_deletion,
call_fn_args=(),
call_fn_kwargs=dict(
secret_id=self.module.params.get("secret_id"),
schedule_secret_deletion_details=action_details,
),
waiter_type=oci_wait_utils.NONE_WAITER_KEY,
operation="{0}_{1}".format(
self.module.params.get("action").upper(),
oci_common_utils.ACTION_OPERATION_KEY,
),
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_action_desired_states(
self.module.params.get("action")
),
)
SecretActionsHelperCustom = get_custom_class("SecretActionsHelperCustom")
class ResourceHelper(SecretActionsHelperCustom, SecretActionsHelperGen):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec(
supports_create=False, supports_wait=False
)
module_args.update(
dict(
secret_id=dict(aliases=["id"], type="str", required=True),
time_of_deletion=dict(type="str"),
action=dict(
type="str",
required=True,
choices=["cancel_secret_deletion", "schedule_secret_deletion"],
),
)
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_helper = ResourceHelper(
module=module,
resource_type="secret",
service_client_class=VaultsClient,
namespace="vault",
)
result = resource_helper.perform_action(module.params.get("action"))
module.exit_json(**result)
if __name__ == "__main__":
main()
| true
| true
|
790592a6e26ff673eddd59f451cc08a38e138677
| 1,136
|
py
|
Python
|
tests/unit/utils/test_youtube.py
|
ConorSheehan1/YouTubeTimestampRedditBot
|
5f36d96f6dca2d5f42b4c4d121008097c1c4f537
|
[
"MIT"
] | 1
|
2021-12-31T15:38:55.000Z
|
2021-12-31T15:38:55.000Z
|
tests/unit/utils/test_youtube.py
|
ConorSheehan1/YouTubeTimestampRedditBot
|
5f36d96f6dca2d5f42b4c4d121008097c1c4f537
|
[
"MIT"
] | 1
|
2022-01-17T18:48:12.000Z
|
2022-01-17T18:48:12.000Z
|
tests/unit/utils/test_youtube.py
|
ConorSheehan1/YouTubeTimestampRedditBot
|
5f36d96f6dca2d5f42b4c4d121008097c1c4f537
|
[
"MIT"
] | null | null | null |
# Standard Library
import unittest
# YouTubeTimestampRedditBot
from src.utils.youtube import is_youtube_url_without_timestamp
class Youtube(unittest.TestCase):
def test_is_youtube_url_without_timestamp(self):
dicts = [
# no timestamps
{"input": "https://youtube.com/asdf", "expected_output": True},
{"input": "wwww.youtube.com/asdf", "expected_output": True},
{"input": "wwww.youtu.be/asdf", "expected_output": True},
# has timestamps
{"input": "https://youtube.com/asdf?t=1m", "expected_output": False},
{"input": "wwww.youtube.com?watch=asdf&t=1m", "expected_output": False},
{"input": "wwww.youtu.be/asdf?t=12s", "expected_output": False},
# not youtube
{"input": "wwww.asdf.com", "expected_output": False},
{"input": "https://youfoo.com", "expected_output": False},
]
for (i, d) in enumerate(dicts):
with self.subTest(i=i):
assert (
is_youtube_url_without_timestamp(d["input"]) == d["expected_output"]
)
| 39.172414
| 88
| 0.582746
|
import unittest
from src.utils.youtube import is_youtube_url_without_timestamp
class Youtube(unittest.TestCase):
def test_is_youtube_url_without_timestamp(self):
dicts = [
{"input": "https://youtube.com/asdf", "expected_output": True},
{"input": "wwww.youtube.com/asdf", "expected_output": True},
{"input": "wwww.youtu.be/asdf", "expected_output": True},
{"input": "https://youtube.com/asdf?t=1m", "expected_output": False},
{"input": "wwww.youtube.com?watch=asdf&t=1m", "expected_output": False},
{"input": "wwww.youtu.be/asdf?t=12s", "expected_output": False},
{"input": "wwww.asdf.com", "expected_output": False},
{"input": "https://youfoo.com", "expected_output": False},
]
for (i, d) in enumerate(dicts):
with self.subTest(i=i):
assert (
is_youtube_url_without_timestamp(d["input"]) == d["expected_output"]
)
| true
| true
|
790592ca158e7920ce9710711ea9d87850f00b5e
| 4,040
|
py
|
Python
|
samples/v1/language_entities_gcs.py
|
busunkim96/python-language
|
f16bd6dae66990516320941748325b59f4eeebc6
|
[
"Apache-2.0"
] | null | null | null |
samples/v1/language_entities_gcs.py
|
busunkim96/python-language
|
f16bd6dae66990516320941748325b59f4eeebc6
|
[
"Apache-2.0"
] | 40
|
2019-07-16T10:04:48.000Z
|
2020-01-20T09:04:59.000Z
|
samples/v1/language_entities_gcs.py
|
busunkim96/python-language
|
f16bd6dae66990516320941748325b59f4eeebc6
|
[
"Apache-2.0"
] | 2
|
2019-07-18T00:05:31.000Z
|
2019-11-27T14:17:22.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DO NOT EDIT! This is a generated sample ("Request", "language_entities_gcs")
# To install the latest published package dependency, execute the following:
# pip install google-cloud-language
# sample-metadata
# title: Analyzing Entities (GCS)
# description: Analyzing Entities in text file stored in Cloud Storage
# usage: python3 samples/v1/language_entities_gcs.py [--gcs_content_uri "gs://cloud-samples-data/language/entity.txt"]
# [START language_entities_gcs]
from google.cloud import language_v1
from google.cloud.language_v1 import enums
def sample_analyze_entities(gcs_content_uri):
"""
Analyzing Entities in text file stored in Cloud Storage
Args:
gcs_content_uri Google Cloud Storage URI where the file content is located.
e.g. gs://[Your Bucket]/[Path to File]
"""
client = language_v1.LanguageServiceClient()
# gcs_content_uri = 'gs://cloud-samples-data/language/entity.txt'
# Available types: PLAIN_TEXT, HTML
type_ = enums.Document.Type.PLAIN_TEXT
# Optional. If not specified, the language is automatically detected.
# For list of supported languages:
# https://cloud.google.com/natural-language/docs/languages
language = "en"
document = {"gcs_content_uri": gcs_content_uri, "type": type_, "language": language}
# Available values: NONE, UTF8, UTF16, UTF32
encoding_type = enums.EncodingType.UTF8
response = client.analyze_entities(document, encoding_type=encoding_type)
# Loop through entitites returned from the API
for entity in response.entities:
print(u"Representative name for the entity: {}".format(entity.name))
# Get entity type, e.g. PERSON, LOCATION, ADDRESS, NUMBER, et al
print(u"Entity type: {}".format(enums.Entity.Type(entity.type).name))
# Get the salience score associated with the entity in the [0, 1.0] range
print(u"Salience score: {}".format(entity.salience))
# Loop over the metadata associated with entity. For many known entities,
# the metadata is a Wikipedia URL (wikipedia_url) and Knowledge Graph MID (mid).
# Some entity types may have additional metadata, e.g. ADDRESS entities
# may have metadata for the address street_name, postal_code, et al.
for metadata_name, metadata_value in entity.metadata.items():
print(u"{}: {}".format(metadata_name, metadata_value))
# Loop over the mentions of this entity in the input document.
# The API currently supports proper noun mentions.
for mention in entity.mentions:
print(u"Mention text: {}".format(mention.text.content))
# Get the mention type, e.g. PROPER for proper noun
print(
u"Mention type: {}".format(enums.EntityMention.Type(mention.type).name)
)
# Get the language of the text, which will be the same as
# the language specified in the request or, if not specified,
# the automatically-detected language.
print(u"Language of the text: {}".format(response.language))
# [END language_entities_gcs]
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--gcs_content_uri",
type=str,
default="gs://cloud-samples-data/language/entity.txt",
)
args = parser.parse_args()
sample_analyze_entities(args.gcs_content_uri)
if __name__ == "__main__":
main()
| 38.113208
| 120
| 0.70297
|
from google.cloud import language_v1
from google.cloud.language_v1 import enums
def sample_analyze_entities(gcs_content_uri):
client = language_v1.LanguageServiceClient()
type_ = enums.Document.Type.PLAIN_TEXT
language = "en"
document = {"gcs_content_uri": gcs_content_uri, "type": type_, "language": language}
encoding_type = enums.EncodingType.UTF8
response = client.analyze_entities(document, encoding_type=encoding_type)
for entity in response.entities:
print(u"Representative name for the entity: {}".format(entity.name))
print(u"Entity type: {}".format(enums.Entity.Type(entity.type).name))
print(u"Salience score: {}".format(entity.salience))
for metadata_name, metadata_value in entity.metadata.items():
print(u"{}: {}".format(metadata_name, metadata_value))
for mention in entity.mentions:
print(u"Mention text: {}".format(mention.text.content))
print(
u"Mention type: {}".format(enums.EntityMention.Type(mention.type).name)
)
print(u"Language of the text: {}".format(response.language))
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--gcs_content_uri",
type=str,
default="gs://cloud-samples-data/language/entity.txt",
)
args = parser.parse_args()
sample_analyze_entities(args.gcs_content_uri)
if __name__ == "__main__":
main()
| true
| true
|
7905931b1d56097f7faadcc6929bf37a6fd624b9
| 3,396
|
py
|
Python
|
azure/mgmt/monitor/models/rule_management_event_data_source.py
|
EnjoyLifeFund/py36pkgs
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | 2
|
2020-07-29T14:22:17.000Z
|
2020-11-06T18:47:40.000Z
|
azure/mgmt/monitor/models/rule_management_event_data_source.py
|
EnjoyLifeFund/py36pkgs
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | 1
|
2016-08-01T07:37:04.000Z
|
2016-08-01T07:37:04.000Z
|
azure/mgmt/monitor/models/rule_management_event_data_source.py
|
EnjoyLifeFund/py36pkgs
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | 1
|
2020-12-12T21:04:41.000Z
|
2020-12-12T21:04:41.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .rule_data_source import RuleDataSource
class RuleManagementEventDataSource(RuleDataSource):
"""A rule management event data source. The discriminator fields is always
RuleManagementEventDataSource in this case.
:param resource_uri: the resource identifier of the resource the rule
monitors.
:type resource_uri: str
:param odatatype: Polymorphic Discriminator
:type odatatype: str
:param event_name: the event name.
:type event_name: str
:param event_source: the event source.
:type event_source: str
:param level: the level.
:type level: str
:param operation_name: The name of the operation that should be checked
for. If no name is provided, any operation will match.
:type operation_name: str
:param resource_group_name: the resource group name.
:type resource_group_name: str
:param resource_provider_name: the resource provider name.
:type resource_provider_name: str
:param status: The status of the operation that should be checked for. If
no status is provided, any status will match.
:type status: str
:param sub_status: the substatus.
:type sub_status: str
:param claims: the claims.
:type claims: :class:`RuleManagementEventClaimsDataSource
<azure.mgmt.monitor.models.RuleManagementEventClaimsDataSource>`
"""
_validation = {
'odatatype': {'required': True},
}
_attribute_map = {
'resource_uri': {'key': 'resourceUri', 'type': 'str'},
'odatatype': {'key': 'odata\\.type', 'type': 'str'},
'event_name': {'key': 'eventName', 'type': 'str'},
'event_source': {'key': 'eventSource', 'type': 'str'},
'level': {'key': 'level', 'type': 'str'},
'operation_name': {'key': 'operationName', 'type': 'str'},
'resource_group_name': {'key': 'resourceGroupName', 'type': 'str'},
'resource_provider_name': {'key': 'resourceProviderName', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'sub_status': {'key': 'subStatus', 'type': 'str'},
'claims': {'key': 'claims', 'type': 'RuleManagementEventClaimsDataSource'},
}
def __init__(self, resource_uri=None, event_name=None, event_source=None, level=None, operation_name=None, resource_group_name=None, resource_provider_name=None, status=None, sub_status=None, claims=None):
super(RuleManagementEventDataSource, self).__init__(resource_uri=resource_uri)
self.event_name = event_name
self.event_source = event_source
self.level = level
self.operation_name = operation_name
self.resource_group_name = resource_group_name
self.resource_provider_name = resource_provider_name
self.status = status
self.sub_status = sub_status
self.claims = claims
self.odatatype = 'Microsoft.Azure.Management.Insights.Models.RuleManagementEventDataSource'
| 44.103896
| 209
| 0.659305
|
from .rule_data_source import RuleDataSource
class RuleManagementEventDataSource(RuleDataSource):
_validation = {
'odatatype': {'required': True},
}
_attribute_map = {
'resource_uri': {'key': 'resourceUri', 'type': 'str'},
'odatatype': {'key': 'odata\\.type', 'type': 'str'},
'event_name': {'key': 'eventName', 'type': 'str'},
'event_source': {'key': 'eventSource', 'type': 'str'},
'level': {'key': 'level', 'type': 'str'},
'operation_name': {'key': 'operationName', 'type': 'str'},
'resource_group_name': {'key': 'resourceGroupName', 'type': 'str'},
'resource_provider_name': {'key': 'resourceProviderName', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'sub_status': {'key': 'subStatus', 'type': 'str'},
'claims': {'key': 'claims', 'type': 'RuleManagementEventClaimsDataSource'},
}
def __init__(self, resource_uri=None, event_name=None, event_source=None, level=None, operation_name=None, resource_group_name=None, resource_provider_name=None, status=None, sub_status=None, claims=None):
super(RuleManagementEventDataSource, self).__init__(resource_uri=resource_uri)
self.event_name = event_name
self.event_source = event_source
self.level = level
self.operation_name = operation_name
self.resource_group_name = resource_group_name
self.resource_provider_name = resource_provider_name
self.status = status
self.sub_status = sub_status
self.claims = claims
self.odatatype = 'Microsoft.Azure.Management.Insights.Models.RuleManagementEventDataSource'
| true
| true
|
790593cfe62b7bcbe5c632cb438fd0aadd8ff5d1
| 621
|
py
|
Python
|
exit/losses.py
|
exitudio/neural-network-pytorch
|
2831eb92d396187cc0e043234c2dfd17fc83ae3b
|
[
"MIT"
] | null | null | null |
exit/losses.py
|
exitudio/neural-network-pytorch
|
2831eb92d396187cc0e043234c2dfd17fc83ae3b
|
[
"MIT"
] | null | null | null |
exit/losses.py
|
exitudio/neural-network-pytorch
|
2831eb92d396187cc0e043234c2dfd17fc83ae3b
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
import numpy as np
from .constants import EPSILON
import torch
class Loss(ABC):
def __init__(self, expected_output, predict_output):
self._expected_output = expected_output
self._predict_output = predict_output
@abstractmethod
def get_loss(self):
pass
def crossEntropy(expected_output, predict_output):
return -(expected_output * torch.log(predict_output) +
(1-expected_output) * torch.log(1-predict_output+EPSILON)).mean()
def l2(expected_output, predict_output):
return ((predict_output - expected_output) ** 2).mean()
| 25.875
| 78
| 0.724638
|
from abc import ABC, abstractmethod
import numpy as np
from .constants import EPSILON
import torch
class Loss(ABC):
def __init__(self, expected_output, predict_output):
self._expected_output = expected_output
self._predict_output = predict_output
@abstractmethod
def get_loss(self):
pass
def crossEntropy(expected_output, predict_output):
return -(expected_output * torch.log(predict_output) +
(1-expected_output) * torch.log(1-predict_output+EPSILON)).mean()
def l2(expected_output, predict_output):
return ((predict_output - expected_output) ** 2).mean()
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.