text
stringlengths 2
999k
|
|---|
import os
import matplotlib
from pyspark.sql import SparkSession
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pyspark.sql.functions as F
import numpy as np
import statsmodels.api as sm
import pandas as pd
class TaskWaitTimeCDF(object):
def __init__(self, workload_name, df, image_folder_location):
self.workload_name = workload_name
self.df = df
self.folder = image_folder_location
def generate_content(self):
plot_location = self.generate_graphs()
return None, plot_location
def generate_graphs(self, show=False):
filename = "task_wait_time_cdf_{0}.png".format(self.workload_name)
if os.path.isfile(os.path.join(self.folder, filename)):
return filename
plt.figure()
df = self.df.filter(F.col("wait_time") >= 0)
if df.count() > 1000:
permilles = self.df.approxQuantile("wait_time", [float(i) / 1000 for i in range(0, 1001)], 0.001)
task_wait_times = sorted(pd.DataFrame({"wait_time": permilles})["wait_time"].tolist())
else:
task_wait_times = sorted(df.toPandas()["wait_time"].tolist())
if len(task_wait_times) == 0 or max(task_wait_times) == -1:
plt.text(0.5, 0.5, 'Not available;\nTrace does not contain this information.', horizontalalignment='center',
verticalalignment='center', transform=plt.axes().transAxes, fontsize=16)
plt.grid(False)
else:
ecdf = sm.distributions.ECDF(task_wait_times)
# Change min to 0 to make it start at 0
x = np.linspace(min(task_wait_times), max(task_wait_times))
y = ecdf(x)
plt.step(x, y)
plt.xlabel('Wait time (s)', fontsize=18)
plt.ylabel('P', fontsize=18)
plt.axes().set_xlim(0, None)
plt.margins(0.05)
plt.tight_layout()
plt.savefig(os.path.join(self.folder, filename), dpi=200, format='png')
if show:
plt.show()
return filename
if __name__ == '__main__':
tasks_loc = "/media/lfdversluis/datastore/SC19-data/parquet-flattened/pegasus_P1_parquet/tasks/schema-1.0"
spark = (SparkSession.builder
.master("local[5]")
.appName("WTA Analysis")
.config("spark.executor.memory", "3G")
.config("spark.driver.memory", "12G")
.getOrCreate())
task_df = spark.read.parquet(tasks_loc)
gne = TaskWaitTimeCDF("test", task_df, ".")
gne.generate_graphs(show=True)
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import json
import unittest
import bleach
import doctest
import mock
import multiprocessing
import os
import re
import signal
import sqlalchemy
import subprocess
import tempfile
import warnings
from datetime import timedelta
from dateutil.relativedelta import relativedelta
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from freezegun import freeze_time
from numpy.testing import assert_array_almost_equal
from six.moves.urllib.parse import urlencode
from time import sleep
from airflow import configuration
from airflow.executors import SequentialExecutor
from airflow.models import Variable
configuration.conf.load_test_config()
from airflow import jobs, models, DAG, utils, macros, settings, exceptions
from airflow.models import BaseOperator
from airflow.operators.bash_operator import BashOperator
from airflow.operators.check_operator import CheckOperator, ValueCheckOperator
from airflow.operators.dagrun_operator import TriggerDagRunOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.hooks.base_hook import BaseHook
from airflow.hooks.sqlite_hook import SqliteHook
from airflow.bin import cli
from airflow.www import app as application
from airflow.settings import Session
from airflow.utils import timezone
from airflow.utils.timezone import datetime
from airflow.utils.state import State
from airflow.utils.dates import infer_time_unit, round_time, scale_time_units
from lxml import html
from airflow.exceptions import AirflowException
from airflow.configuration import AirflowConfigException, run_command
from jinja2.sandbox import SecurityError
from jinja2 import UndefinedError
import six
NUM_EXAMPLE_DAGS = 20
DEV_NULL = '/dev/null'
TEST_DAG_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
DEFAULT_DATE = datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]
TEST_DAG_ID = 'unit_tests'
try:
import cPickle as pickle
except ImportError:
# Python 3
import pickle
def reset(dag_id=TEST_DAG_ID):
session = Session()
tis = session.query(models.TaskInstance).filter_by(dag_id=dag_id)
tis.delete()
session.commit()
session.close()
reset()
class OperatorSubclass(BaseOperator):
"""
An operator to test template substitution
"""
template_fields = ['some_templated_field']
def __init__(self, some_templated_field, *args, **kwargs):
super(OperatorSubclass, self).__init__(*args, **kwargs)
self.some_templated_field = some_templated_field
def execute(*args, **kwargs):
pass
class CoreTest(unittest.TestCase):
default_scheduler_args = {"num_runs": 1}
def setUp(self):
configuration.conf.load_test_config()
self.dagbag = models.DagBag(
dag_folder=DEV_NULL, include_examples=True)
self.args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG(TEST_DAG_ID, default_args=self.args)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
self.run_after_loop = self.dag_bash.get_task('run_after_loop')
self.run_this_last = self.dag_bash.get_task('run_this_last')
def test_schedule_dag_no_previous_runs(self):
"""
Tests scheduling a dag with no previous runs
"""
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_no_previous_runs')
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
datetime(2015, 1, 2, 0, 0),
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
dag.clear()
def test_schedule_dag_fake_scheduled_previous(self):
"""
Test scheduling a dag where there is a prior DagRun
which has the same run_id as the next run should have
"""
delta = timedelta(hours=1)
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_fake_scheduled_previous',
schedule_interval=delta,
start_date=DEFAULT_DATE)
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=DEFAULT_DATE))
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
dag.create_dagrun(run_id=models.DagRun.id_for_date(DEFAULT_DATE),
execution_date=DEFAULT_DATE,
state=State.SUCCESS,
external_trigger=True)
dag_run = scheduler.create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
DEFAULT_DATE + delta,
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
def test_schedule_dag_once(self):
"""
Tests scheduling a dag scheduled for @once - should be scheduled the first time
it is called, and not scheduled the second.
"""
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_once')
dag.schedule_interval = '@once'
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
dag_run2 = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertIsNone(dag_run2)
dag.clear()
def test_fractional_seconds(self):
"""
Tests if fractional seconds are stored in the database
"""
dag = DAG(TEST_DAG_ID + 'test_fractional_seconds')
dag.schedule_interval = '@once'
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
start_date = timezone.utcnow()
run = dag.create_dagrun(
run_id='test_' + start_date.isoformat(),
execution_date=start_date,
start_date=start_date,
state=State.RUNNING,
external_trigger=False
)
run.refresh_from_db()
self.assertEqual(start_date, run.execution_date,
"dag run execution_date loses precision")
self.assertEqual(start_date, run.start_date,
"dag run start_date loses precision ")
def test_schedule_dag_start_end_dates(self):
"""
Tests that an attempt to schedule a task after the Dag's end_date
does not succeed.
"""
delta = timedelta(hours=1)
runs = 3
start_date = DEFAULT_DATE
end_date = start_date + (runs - 1) * delta
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_start_end_dates',
start_date=start_date,
end_date=end_date,
schedule_interval=delta)
dag.add_task(models.BaseOperator(task_id='faketastic',
owner='Also fake'))
# Create and schedule the dag runs
dag_runs = []
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
for i in range(runs):
dag_runs.append(scheduler.create_dag_run(dag))
additional_dag_run = scheduler.create_dag_run(dag)
for dag_run in dag_runs:
self.assertIsNotNone(dag_run)
self.assertIsNone(additional_dag_run)
@freeze_time('2016-01-01')
def test_schedule_dag_no_end_date_up_to_today_only(self):
"""
Tests that a Dag created without an end_date can only be scheduled up
to and including the current datetime.
For example, if today is 2016-01-01 and we are scheduling from a
start_date of 2015-01-01, only jobs up to, but not including
2016-01-01 should be scheduled.
"""
session = settings.Session()
delta = timedelta(days=1)
start_date = DEFAULT_DATE
runs = 365
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_no_end_date_up_to_today_only',
start_date=start_date,
schedule_interval=delta)
dag.add_task(models.BaseOperator(task_id='faketastic',
owner='Also fake'))
dag_runs = []
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
for i in range(runs):
dag_run = scheduler.create_dag_run(dag)
dag_runs.append(dag_run)
# Mark the DagRun as complete
dag_run.state = State.SUCCESS
session.merge(dag_run)
session.commit()
# Attempt to schedule an additional dag run (for 2016-01-01)
additional_dag_run = scheduler.create_dag_run(dag)
for dag_run in dag_runs:
self.assertIsNotNone(dag_run)
self.assertIsNone(additional_dag_run)
def test_confirm_unittest_mod(self):
self.assertTrue(configuration.conf.get('core', 'unit_test_mode'))
def test_pickling(self):
dp = self.dag.pickle()
self.assertEqual(dp.pickle.dag_id, self.dag.dag_id)
def test_rich_comparison_ops(self):
class DAGsubclass(DAG):
pass
dag_eq = DAG(TEST_DAG_ID, default_args=self.args)
dag_diff_load_time = DAG(TEST_DAG_ID, default_args=self.args)
dag_diff_name = DAG(TEST_DAG_ID + '_neq', default_args=self.args)
dag_subclass = DAGsubclass(TEST_DAG_ID, default_args=self.args)
dag_subclass_diff_name = DAGsubclass(
TEST_DAG_ID + '2', default_args=self.args)
for d in [dag_eq, dag_diff_name, dag_subclass, dag_subclass_diff_name]:
d.last_loaded = self.dag.last_loaded
# test identity equality
self.assertEqual(self.dag, self.dag)
# test dag (in)equality based on _comps
self.assertEqual(dag_eq, self.dag)
self.assertNotEqual(dag_diff_name, self.dag)
self.assertNotEqual(dag_diff_load_time, self.dag)
# test dag inequality based on type even if _comps happen to match
self.assertNotEqual(dag_subclass, self.dag)
# a dag should equal an unpickled version of itself
d = pickle.dumps(self.dag)
self.assertEqual(pickle.loads(d), self.dag)
# dags are ordered based on dag_id no matter what the type is
self.assertLess(self.dag, dag_diff_name)
self.assertGreater(self.dag, dag_diff_load_time)
self.assertLess(self.dag, dag_subclass_diff_name)
# greater than should have been created automatically by functools
self.assertGreater(dag_diff_name, self.dag)
# hashes are non-random and match equality
self.assertEqual(hash(self.dag), hash(self.dag))
self.assertEqual(hash(dag_eq), hash(self.dag))
self.assertNotEqual(hash(dag_diff_name), hash(self.dag))
self.assertNotEqual(hash(dag_subclass), hash(self.dag))
def test_check_operators(self):
conn_id = "sqlite_default"
captainHook = BaseHook.get_hook(conn_id=conn_id)
captainHook.run("CREATE TABLE operator_test_table (a, b)")
captainHook.run("insert into operator_test_table values (1,2)")
t = CheckOperator(
task_id='check',
sql="select count(*) from operator_test_table",
conn_id=conn_id,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
t = ValueCheckOperator(
task_id='value_check',
pass_value=95,
tolerance=0.1,
conn_id=conn_id,
sql="SELECT 100",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
captainHook.run("drop table operator_test_table")
def test_clear_api(self):
task = self.dag_bash.tasks[0]
task.clear(
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
upstream=True, downstream=True)
ti = models.TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.are_dependents_done()
def test_illegal_args(self):
"""
Tests that Operators reject illegal arguments
"""
with warnings.catch_warnings(record=True) as w:
t = BashOperator(
task_id='test_illegal_args',
bash_command='echo success',
dag=self.dag,
illegal_argument_1234='hello?')
self.assertTrue(
issubclass(w[0].category, PendingDeprecationWarning))
self.assertIn(
'Invalid arguments were passed to BashOperator.',
w[0].message.args[0])
def test_bash_operator(self):
t = BashOperator(
task_id='test_bash_operator',
bash_command="echo success",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_multi_byte_output(self):
t = BashOperator(
task_id='test_multi_byte_bash_operator',
bash_command=u"echo \u2600",
dag=self.dag,
output_encoding='utf-8')
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_kill(self):
import psutil
sleep_time = "100%d" % os.getpid()
t = BashOperator(
task_id='test_bash_operator_kill',
execution_timeout=timedelta(seconds=1),
bash_command="/bin/bash -c 'sleep %s'" % sleep_time,
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
sleep(2)
pid = -1
for proc in psutil.process_iter():
if proc.cmdline() == ['sleep', sleep_time]:
pid = proc.pid
if pid != -1:
os.kill(pid, signal.SIGTERM)
self.fail("BashOperator's subprocess still running after stopping on timeout!")
def test_trigger_dagrun(self):
def trigga(context, obj):
if True:
return obj
t = TriggerDagRunOperator(
task_id='test_trigger_dagrun',
trigger_dag_id='example_bash_operator',
python_callable=trigga,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_dryrun(self):
t = BashOperator(
task_id='test_dryrun',
bash_command="echo success",
dag=self.dag)
t.dry_run()
def test_sqlite(self):
import airflow.operators.sqlite_operator
t = airflow.operators.sqlite_operator.SqliteOperator(
task_id='time_sqlite',
sql="CREATE TABLE IF NOT EXISTS unitest (dummy VARCHAR(20))",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_timeout(self):
t = PythonOperator(
task_id='test_timeout',
execution_timeout=timedelta(seconds=1),
python_callable=lambda: sleep(5),
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_python_op(self):
def test_py_op(templates_dict, ds, **kwargs):
if not templates_dict['ds'] == ds:
raise Exception("failure")
t = PythonOperator(
task_id='test_py_op',
provide_context=True,
python_callable=test_py_op,
templates_dict={'ds': "{{ ds }}"},
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_complex_template(self):
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field['bar'][1],
context['ds'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field={
'foo': '123',
'bar': ['baz', '{{ ds }}']
},
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_variable(self):
"""
Test the availability of variables in templates
"""
val = {
'test_value': 'a test value'
}
Variable.set("a_variable", val['test_value'])
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
val['test_value'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.value.a_variable }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_json_variable(self):
"""
Test the availability of variables (serialized as JSON) in templates
"""
val = {
'test_value': {'foo': 'bar', 'obj': {'v1': 'yes', 'v2': 'no'}}
}
Variable.set("a_variable", val['test_value'], serialize_json=True)
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
val['test_value']['obj']['v2'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.json.a_variable.obj.v2 }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_json_variable_as_value(self):
"""
Test the availability of variables (serialized as JSON) in templates, but
accessed as a value
"""
val = {
'test_value': {'foo': 'bar'}
}
Variable.set("a_variable", val['test_value'], serialize_json=True)
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
u'{"foo": "bar"}')
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.value.a_variable }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_non_bool(self):
"""
Test templates can handle objects with no sense of truthiness
"""
class NonBoolObject(object):
def __len__(self):
return NotImplemented
def __bool__(self):
return NotImplemented
t = OperatorSubclass(
task_id='test_bad_template_obj',
some_templated_field=NonBoolObject(),
dag=self.dag)
t.resolve_template_files()
def test_import_examples(self):
self.assertEqual(len(self.dagbag.dags), NUM_EXAMPLE_DAGS)
def test_local_task_job(self):
TI = models.TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
job = jobs.LocalTaskJob(task_instance=ti, ignore_ti_state=True)
job.run()
def test_raw_job(self):
TI = models.TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
ti.dag = self.dag_bash
ti.run(ignore_ti_state=True)
def test_doctests(self):
modules = [utils, macros]
for mod in modules:
failed, tests = doctest.testmod(mod)
if failed:
raise Exception("Failed a doctest")
def test_variable_set_get_round_trip(self):
Variable.set("tested_var_set_id", "Monday morning breakfast")
self.assertEqual("Monday morning breakfast", Variable.get("tested_var_set_id"))
def test_variable_set_get_round_trip_json(self):
value = {"a": 17, "b": 47}
Variable.set("tested_var_set_id", value, serialize_json=True)
self.assertEqual(value, Variable.get("tested_var_set_id", deserialize_json=True))
def test_get_non_existing_var_should_return_default(self):
default_value = "some default val"
self.assertEqual(default_value, Variable.get("thisIdDoesNotExist",
default_var=default_value))
def test_get_non_existing_var_should_not_deserialize_json_default(self):
default_value = "}{ this is a non JSON default }{"
self.assertEqual(default_value, Variable.get("thisIdDoesNotExist",
default_var=default_value,
deserialize_json=True))
def test_variable_setdefault_round_trip(self):
key = "tested_var_setdefault_1_id"
value = "Monday morning breakfast in Paris"
Variable.setdefault(key, value)
self.assertEqual(value, Variable.get(key))
def test_variable_setdefault_round_trip_json(self):
key = "tested_var_setdefault_2_id"
value = {"city": 'Paris', "Hapiness": True}
Variable.setdefault(key, value, deserialize_json=True)
self.assertEqual(value, Variable.get(key, deserialize_json=True))
def test_variable_setdefault_existing_json(self):
key = "tested_var_setdefault_2_id"
value = {"city": 'Paris', "Hapiness": True}
Variable.set(key, value, serialize_json=True)
val = Variable.setdefault(key, value, deserialize_json=True)
# Check the returned value, and the stored value are handled correctly.
self.assertEqual(value, val)
self.assertEqual(value, Variable.get(key, deserialize_json=True))
def test_parameterized_config_gen(self):
cfg = configuration.parameterized_config(configuration.DEFAULT_CONFIG)
# making sure some basic building blocks are present:
self.assertIn("[core]", cfg)
self.assertIn("dags_folder", cfg)
self.assertIn("sql_alchemy_conn", cfg)
self.assertIn("fernet_key", cfg)
# making sure replacement actually happened
self.assertNotIn("{AIRFLOW_HOME}", cfg)
self.assertNotIn("{FERNET_KEY}", cfg)
def test_config_use_original_when_original_and_fallback_are_present(self):
self.assertTrue(configuration.conf.has_option("core", "FERNET_KEY"))
self.assertFalse(configuration.conf.has_option("core", "FERNET_KEY_CMD"))
FERNET_KEY = configuration.conf.get('core', 'FERNET_KEY')
configuration.conf.set("core", "FERNET_KEY_CMD", "printf HELLO")
FALLBACK_FERNET_KEY = configuration.conf.get(
"core",
"FERNET_KEY"
)
self.assertEqual(FERNET_KEY, FALLBACK_FERNET_KEY)
# restore the conf back to the original state
configuration.conf.remove_option("core", "FERNET_KEY_CMD")
def test_config_throw_error_when_original_and_fallback_is_absent(self):
self.assertTrue(configuration.conf.has_option("core", "FERNET_KEY"))
self.assertFalse(configuration.conf.has_option("core", "FERNET_KEY_CMD"))
FERNET_KEY = configuration.conf.get("core", "FERNET_KEY")
configuration.conf.remove_option("core", "FERNET_KEY")
with self.assertRaises(AirflowConfigException) as cm:
configuration.conf.get("core", "FERNET_KEY")
exception = str(cm.exception)
message = "section/key [core/fernet_key] not found in config"
self.assertEqual(message, exception)
# restore the conf back to the original state
configuration.conf.set("core", "FERNET_KEY", FERNET_KEY)
self.assertTrue(configuration.conf.has_option("core", "FERNET_KEY"))
def test_config_override_original_when_non_empty_envvar_is_provided(self):
key = "AIRFLOW__CORE__FERNET_KEY"
value = "some value"
self.assertNotIn(key, os.environ)
os.environ[key] = value
FERNET_KEY = configuration.conf.get('core', 'FERNET_KEY')
self.assertEqual(value, FERNET_KEY)
# restore the envvar back to the original state
del os.environ[key]
def test_config_override_original_when_empty_envvar_is_provided(self):
key = "AIRFLOW__CORE__FERNET_KEY"
value = ""
self.assertNotIn(key, os.environ)
os.environ[key] = value
FERNET_KEY = configuration.conf.get('core', 'FERNET_KEY')
self.assertEqual(value, FERNET_KEY)
# restore the envvar back to the original state
del os.environ[key]
def test_round_time(self):
rt1 = round_time(datetime(2015, 1, 1, 6), timedelta(days=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt1)
rt2 = round_time(datetime(2015, 1, 2), relativedelta(months=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt2)
rt3 = round_time(datetime(2015, 9, 16, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 16, 0, 0), rt3)
rt4 = round_time(datetime(2015, 9, 15, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 15, 0, 0), rt4)
rt5 = round_time(datetime(2015, 9, 14, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt5)
rt6 = round_time(datetime(2015, 9, 13, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt6)
def test_infer_time_unit(self):
self.assertEqual('minutes', infer_time_unit([130, 5400, 10]))
self.assertEqual('seconds', infer_time_unit([110, 50, 10, 100]))
self.assertEqual('hours', infer_time_unit([100000, 50000, 10000, 20000]))
self.assertEqual('days', infer_time_unit([200000, 100000]))
def test_scale_time_units(self):
# use assert_almost_equal from numpy.testing since we are comparing
# floating point arrays
arr1 = scale_time_units([130, 5400, 10], 'minutes')
assert_array_almost_equal(arr1, [2.167, 90.0, 0.167], decimal=3)
arr2 = scale_time_units([110, 50, 10, 100], 'seconds')
assert_array_almost_equal(arr2, [110.0, 50.0, 10.0, 100.0], decimal=3)
arr3 = scale_time_units([100000, 50000, 10000, 20000], 'hours')
assert_array_almost_equal(arr3, [27.778, 13.889, 2.778, 5.556],
decimal=3)
arr4 = scale_time_units([200000, 100000], 'days')
assert_array_almost_equal(arr4, [2.315, 1.157], decimal=3)
def test_duplicate_dependencies(self):
regexp = "Dependency (.*)runme_0(.*)run_after_loop(.*) " \
"already registered"
with self.assertRaisesRegexp(AirflowException, regexp):
self.runme_0.set_downstream(self.run_after_loop)
with self.assertRaisesRegexp(AirflowException, regexp):
self.run_after_loop.set_upstream(self.runme_0)
def test_bad_trigger_rule(self):
with self.assertRaises(AirflowException):
DummyOperator(
task_id='test_bad_trigger',
trigger_rule="non_existant",
dag=self.dag)
def test_terminate_task(self):
"""If a task instance's db state get deleted, it should fail"""
TI = models.TaskInstance
dag = self.dagbag.dags.get('test_utils')
task = dag.task_dict.get('sleeps_forever')
ti = TI(task=task, execution_date=DEFAULT_DATE)
job = jobs.LocalTaskJob(
task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
# Running task instance asynchronously
p = multiprocessing.Process(target=job.run)
p.start()
sleep(5)
settings.engine.dispose()
session = settings.Session()
ti.refresh_from_db(session=session)
# making sure it's actually running
self.assertEqual(State.RUNNING, ti.state)
ti = session.query(TI).filter_by(
dag_id=task.dag_id,
task_id=task.task_id,
execution_date=DEFAULT_DATE
).one()
# deleting the instance should result in a failure
session.delete(ti)
session.commit()
# waiting for the async task to finish
p.join()
# making sure that the task ended up as failed
ti.refresh_from_db(session=session)
self.assertEqual(State.FAILED, ti.state)
session.close()
def test_task_fail_duration(self):
"""If a task fails, the duration should be recorded in TaskFail"""
p = BashOperator(
task_id='pass_sleepy',
bash_command='sleep 3',
dag=self.dag)
f = BashOperator(
task_id='fail_sleepy',
bash_command='sleep 5',
execution_timeout=timedelta(seconds=3),
retry_delay=timedelta(seconds=0),
dag=self.dag)
session = settings.Session()
try:
p.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except:
pass
try:
f.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except:
pass
p_fails = session.query(models.TaskFail).filter_by(
task_id='pass_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
f_fails = session.query(models.TaskFail).filter_by(
task_id='fail_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
print(f_fails)
self.assertEqual(0, len(p_fails))
self.assertEqual(1, len(f_fails))
# C
self.assertGreaterEqual(sum([f.duration for f in f_fails]), 3)
def test_dag_stats(self):
"""Correctly sets/dirties/cleans rows of DagStat table"""
session = settings.Session()
session.query(models.DagRun).delete()
session.query(models.DagStat).delete()
session.commit()
models.DagStat.update([], session=session)
run1 = self.dag_bash.create_dagrun(
run_id="run1",
execution_date=DEFAULT_DATE,
state=State.RUNNING)
models.DagStat.update([self.dag_bash.dag_id], session=session)
qry = session.query(models.DagStat).all()
self.assertEqual(3, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
for stats in qry:
if stats.state == State.RUNNING:
self.assertEqual(stats.count, 1)
else:
self.assertEqual(stats.count, 0)
self.assertFalse(stats.dirty)
run2 = self.dag_bash.create_dagrun(
run_id="run2",
execution_date=DEFAULT_DATE + timedelta(days=1),
state=State.RUNNING)
models.DagStat.update([self.dag_bash.dag_id], session=session)
qry = session.query(models.DagStat).all()
self.assertEqual(3, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
for stats in qry:
if stats.state == State.RUNNING:
self.assertEqual(stats.count, 2)
else:
self.assertEqual(stats.count, 0)
self.assertFalse(stats.dirty)
session.query(models.DagRun).first().state = State.SUCCESS
session.commit()
models.DagStat.update([self.dag_bash.dag_id], session=session)
qry = session.query(models.DagStat).filter(models.DagStat.state == State.SUCCESS).all()
self.assertEqual(1, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
self.assertEqual(State.SUCCESS, qry[0].state)
self.assertEqual(1, qry[0].count)
self.assertFalse(qry[0].dirty)
qry = session.query(models.DagStat).filter(models.DagStat.state == State.RUNNING).all()
self.assertEqual(1, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
self.assertEqual(State.RUNNING, qry[0].state)
self.assertEqual(1, qry[0].count)
self.assertFalse(qry[0].dirty)
session.query(models.DagRun).delete()
session.query(models.DagStat).delete()
session.commit()
session.close()
def test_run_command(self):
if six.PY3:
write = r'sys.stdout.buffer.write("\u1000foo".encode("utf8"))'
else:
write = r'sys.stdout.write(u"\u1000foo".encode("utf8"))'
cmd = 'import sys; {0}; sys.stdout.flush()'.format(write)
self.assertEqual(run_command("python -c '{0}'".format(cmd)),
u'\u1000foo' if six.PY3 else 'foo')
self.assertEqual(run_command('echo "foo bar"'), u'foo bar\n')
self.assertRaises(AirflowConfigException, run_command, 'bash -c "exit 1"')
def test_trigger_dagrun_with_execution_date(self):
utc_now = timezone.utcnow()
run_id = 'trig__' + utc_now.isoformat()
def payload_generator(context, object):
object.run_id = run_id
return object
task = TriggerDagRunOperator(task_id='test_trigger_dagrun_with_execution_date',
trigger_dag_id='example_bash_operator',
python_callable=payload_generator,
execution_date=utc_now,
dag=self.dag)
task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
dag_runs = models.DagRun.find(dag_id='example_bash_operator',
run_id=run_id)
self.assertEquals(len(dag_runs), 1)
dag_run = dag_runs[0]
self.assertEquals(dag_run.execution_date, utc_now)
class CliTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(CliTests, cls).setUpClass()
cls._cleanup()
def setUp(self):
super(CliTests, self).setUp()
configuration.load_test_config()
app = application.create_app()
app.config['TESTING'] = True
self.parser = cli.CLIFactory.get_parser()
self.dagbag = models.DagBag(dag_folder=DEV_NULL, include_examples=True)
self.session = Session()
def tearDown(self):
self._cleanup(session=self.session)
super(CliTests, self).tearDown()
@staticmethod
def _cleanup(session=None):
if session is None:
session = Session()
session.query(models.Pool).delete()
session.query(models.Variable).delete()
session.commit()
session.close()
def test_cli_list_dags(self):
args = self.parser.parse_args(['list_dags', '--report'])
cli.list_dags(args)
def test_cli_create_user_random_password(self):
args = self.parser.parse_args([
'create_user', '-u', 'test1', '-l', 'doe', '-f', 'jon',
'-e', 'jdoe@foo.com', '-r', 'Viewer', '--use_random_password'
])
cli.create_user(args)
def test_cli_create_user_supplied_password(self):
args = self.parser.parse_args([
'create_user', '-u', 'test2', '-l', 'doe', '-f', 'jon',
'-e', 'jdoe@apache.org', '-r', 'Viewer', '-p', 'test'
])
cli.create_user(args)
def test_cli_list_tasks(self):
for dag_id in self.dagbag.dags.keys():
args = self.parser.parse_args(['list_tasks', dag_id])
cli.list_tasks(args)
args = self.parser.parse_args([
'list_tasks', 'example_bash_operator', '--tree'])
cli.list_tasks(args)
@mock.patch("airflow.bin.cli.db_utils.initdb")
def test_cli_initdb(self, initdb_mock):
cli.initdb(self.parser.parse_args(['initdb']))
initdb_mock.assert_called_once_with(False)
@mock.patch("airflow.bin.cli.db_utils.resetdb")
def test_cli_resetdb(self, resetdb_mock):
cli.resetdb(self.parser.parse_args(['resetdb', '--yes']))
resetdb_mock.assert_called_once_with(False)
def test_cli_connections_list(self):
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(['connections', '--list']))
stdout = mock_stdout.getvalue()
conns = [[x.strip("'") for x in re.findall("'\w+'", line)[:2]]
for ii, line in enumerate(stdout.split('\n'))
if ii % 2 == 1]
conns = [conn for conn in conns if len(conn) > 0]
# Assert that some of the connections are present in the output as
# expected:
self.assertIn(['aws_default', 'aws'], conns)
self.assertIn(['beeline_default', 'beeline'], conns)
self.assertIn(['emr_default', 'emr'], conns)
self.assertIn(['mssql_default', 'mssql'], conns)
self.assertIn(['mysql_default', 'mysql'], conns)
self.assertIn(['postgres_default', 'postgres'], conns)
self.assertIn(['wasb_default', 'wasb'], conns)
self.assertIn(['segment_default', 'segment'], conns)
# Attempt to list connections with invalid cli args
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--list', '--conn_id=fake', '--conn_uri=fake-uri',
'--conn_type=fake-type', '--conn_host=fake_host',
'--conn_login=fake_login', '--conn_password=fake_password',
'--conn_schema=fake_schema', '--conn_port=fake_port', '--conn_extra=fake_extra']))
stdout = mock_stdout.getvalue()
# Check list attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are not compatible with the " +
"--list flag: ['conn_id', 'conn_uri', 'conn_extra', " +
"'conn_type', 'conn_host', 'conn_login', " +
"'conn_password', 'conn_schema', 'conn_port']"),
])
def test_cli_connections_list_redirect(self):
cmd = ['airflow', 'connections', '--list']
with tempfile.TemporaryFile() as fp:
p = subprocess.Popen(cmd, stdout=fp)
p.wait()
self.assertEqual(0, p.returncode)
def test_cli_connections_add_delete(self):
# Add connections:
uri = 'postgresql://airflow:airflow@host:5432/airflow'
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new1',
'--conn_uri=%s' % uri]))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new2',
'--conn_uri=%s' % uri]))
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new3',
'--conn_uri=%s' % uri, '--conn_extra', "{'extra': 'yes'}"]))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new4',
'--conn_uri=%s' % uri, '--conn_extra', "{'extra': 'yes'}"]))
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new5',
'--conn_type=hive_metastore', '--conn_login=airflow',
'--conn_password=airflow', '--conn_host=host',
'--conn_port=9083', '--conn_schema=airflow']))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new6',
'--conn_uri', "", '--conn_type=google_cloud_platform', '--conn_extra', "{'extra': 'yes'}"]))
stdout = mock_stdout.getvalue()
# Check addition stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tSuccessfully added `conn_id`=new1 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new2 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new3 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new4 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new5 : " +
"hive_metastore://airflow:airflow@host:9083/airflow"),
("\tSuccessfully added `conn_id`=new6 : " +
"google_cloud_platform://:@:")
])
# Attempt to add duplicate
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new1',
'--conn_uri=%s' % uri]))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tA connection with `conn_id`=new1 already exists",
])
# Attempt to add without providing conn_id
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_uri=%s' % uri]))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are required to add a connection:" +
" ['conn_id']"),
])
# Attempt to add without providing conn_uri
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new']))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are required to add a connection:" +
" ['conn_uri or conn_type']"),
])
# Prepare to add connections
session = settings.Session()
extra = {'new1': None,
'new2': None,
'new3': "{'extra': 'yes'}",
'new4': "{'extra': 'yes'}"}
# Add connections
for index in range(1, 6):
conn_id = 'new%s' % index
result = (session
.query(models.Connection)
.filter(models.Connection.conn_id == conn_id)
.first())
result = (result.conn_id, result.conn_type, result.host,
result.port, result.get_extra())
if conn_id in ['new1', 'new2', 'new3', 'new4']:
self.assertEqual(result, (conn_id, 'postgres', 'host', 5432,
extra[conn_id]))
elif conn_id == 'new5':
self.assertEqual(result, (conn_id, 'hive_metastore', 'host',
9083, None))
elif conn_id == 'new6':
self.assertEqual(result, (conn_id, 'google_cloud_platform',
None, None, "{'extra': 'yes'}"))
# Delete connections
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new1']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new2']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new3']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new4']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new5']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new6']))
stdout = mock_stdout.getvalue()
# Check deletion stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tSuccessfully deleted `conn_id`=new1",
"\tSuccessfully deleted `conn_id`=new2",
"\tSuccessfully deleted `conn_id`=new3",
"\tSuccessfully deleted `conn_id`=new4",
"\tSuccessfully deleted `conn_id`=new5",
"\tSuccessfully deleted `conn_id`=new6"
])
# Check deletions
for index in range(1, 7):
conn_id = 'new%s' % index
result = (session.query(models.Connection)
.filter(models.Connection.conn_id == conn_id)
.first())
self.assertTrue(result is None)
# Attempt to delete a non-existing connnection
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=fake']))
stdout = mock_stdout.getvalue()
# Check deletion attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tDid not find a connection with `conn_id`=fake",
])
# Attempt to delete with invalid cli args
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=fake',
'--conn_uri=%s' % uri, '--conn_type=fake-type']))
stdout = mock_stdout.getvalue()
# Check deletion attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are not compatible with the " +
"--delete flag: ['conn_uri', 'conn_type']"),
])
session.close()
def test_cli_test(self):
cli.test(self.parser.parse_args([
'test', 'example_bash_operator', 'runme_0',
DEFAULT_DATE.isoformat()]))
cli.test(self.parser.parse_args([
'test', 'example_bash_operator', 'runme_0', '--dry_run',
DEFAULT_DATE.isoformat()]))
def test_cli_test_with_params(self):
cli.test(self.parser.parse_args([
'test', 'example_passing_params_via_test_command', 'run_this',
'-tp', '{"foo":"bar"}', DEFAULT_DATE.isoformat()]))
cli.test(self.parser.parse_args([
'test', 'example_passing_params_via_test_command', 'also_run_this',
'-tp', '{"foo":"bar"}', DEFAULT_DATE.isoformat()]))
def test_cli_run(self):
cli.run(self.parser.parse_args([
'run', 'example_bash_operator', 'runme_0', '-l',
DEFAULT_DATE.isoformat()]))
def test_task_state(self):
cli.task_state(self.parser.parse_args([
'task_state', 'example_bash_operator', 'runme_0',
DEFAULT_DATE.isoformat()]))
def test_dag_state(self):
self.assertEqual(None, cli.dag_state(self.parser.parse_args([
'dag_state', 'example_bash_operator', DEFAULT_DATE.isoformat()])))
def test_pause(self):
args = self.parser.parse_args([
'pause', 'example_bash_operator'])
cli.pause(args)
self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [True, 1])
args = self.parser.parse_args([
'unpause', 'example_bash_operator'])
cli.unpause(args)
self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [False, 0])
def test_subdag_clear(self):
args = self.parser.parse_args([
'clear', 'example_subdag_operator', '--no_confirm'])
cli.clear(args)
args = self.parser.parse_args([
'clear', 'example_subdag_operator', '--no_confirm', '--exclude_subdags'])
cli.clear(args)
def test_get_dags(self):
dags = cli.get_dags(self.parser.parse_args(['clear', 'example_subdag_operator', '-c']))
self.assertEqual(len(dags), 1)
dags = cli.get_dags(self.parser.parse_args(['clear', 'subdag', '-dx', '-c']))
self.assertGreater(len(dags), 1)
with self.assertRaises(AirflowException):
cli.get_dags(self.parser.parse_args(['clear', 'foobar', '-dx', '-c']))
def test_backfill(self):
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '-t', 'runme_0', '--dry_run',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '--dry_run',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '-l',
'-s', DEFAULT_DATE.isoformat()]))
def test_process_subdir_path_with_placeholder(self):
self.assertEqual(os.path.join(settings.DAGS_FOLDER, 'abc'), cli.process_subdir('DAGS_FOLDER/abc'))
def test_trigger_dag(self):
cli.trigger_dag(self.parser.parse_args([
'trigger_dag', 'example_bash_operator',
'-c', '{"foo": "bar"}']))
self.assertRaises(
ValueError,
cli.trigger_dag,
self.parser.parse_args([
'trigger_dag', 'example_bash_operator',
'--run_id', 'trigger_dag_xxx',
'-c', 'NOT JSON'])
)
def test_delete_dag(self):
DM = models.DagModel
key = "my_dag_id"
session = settings.Session()
session.add(DM(dag_id=key))
session.commit()
cli.delete_dag(self.parser.parse_args([
'delete_dag', key, '--yes']))
self.assertEqual(session.query(DM).filter_by(dag_id=key).count(), 0)
self.assertRaises(
AirflowException,
cli.delete_dag,
self.parser.parse_args([
'delete_dag',
'does_not_exist_dag',
'--yes'])
)
def test_pool_create(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
self.assertEqual(self.session.query(models.Pool).count(), 1)
def test_pool_get(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
try:
cli.pool(self.parser.parse_args(['pool', '-g', 'foo']))
except Exception as e:
self.fail("The 'pool -g foo' command raised unexpectedly: %s" % e)
def test_pool_delete(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
cli.pool(self.parser.parse_args(['pool', '-x', 'foo']))
self.assertEqual(self.session.query(models.Pool).count(), 0)
def test_pool_no_args(self):
try:
cli.pool(self.parser.parse_args(['pool']))
except Exception as e:
self.fail("The 'pool' command raised unexpectedly: %s" % e)
def test_variables(self):
# Checks if all subcommands are properly received
cli.variables(self.parser.parse_args([
'variables', '-s', 'foo', '{"foo":"bar"}']))
cli.variables(self.parser.parse_args([
'variables', '-g', 'foo']))
cli.variables(self.parser.parse_args([
'variables', '-g', 'baz', '-d', 'bar']))
cli.variables(self.parser.parse_args([
'variables']))
cli.variables(self.parser.parse_args([
'variables', '-x', 'bar']))
cli.variables(self.parser.parse_args([
'variables', '-i', DEV_NULL]))
cli.variables(self.parser.parse_args([
'variables', '-e', DEV_NULL]))
cli.variables(self.parser.parse_args([
'variables', '-s', 'bar', 'original']))
# First export
cli.variables(self.parser.parse_args([
'variables', '-e', 'variables1.json']))
first_exp = open('variables1.json', 'r')
cli.variables(self.parser.parse_args([
'variables', '-s', 'bar', 'updated']))
cli.variables(self.parser.parse_args([
'variables', '-s', 'foo', '{"foo":"oops"}']))
cli.variables(self.parser.parse_args([
'variables', '-x', 'foo']))
# First import
cli.variables(self.parser.parse_args([
'variables', '-i', 'variables1.json']))
self.assertEqual('original', models.Variable.get('bar'))
self.assertEqual('{"foo": "bar"}', models.Variable.get('foo'))
# Second export
cli.variables(self.parser.parse_args([
'variables', '-e', 'variables2.json']))
second_exp = open('variables2.json', 'r')
self.assertEqual(first_exp.read(), second_exp.read())
second_exp.close()
first_exp.close()
# Second import
cli.variables(self.parser.parse_args([
'variables', '-i', 'variables2.json']))
self.assertEqual('original', models.Variable.get('bar'))
self.assertEqual('{"foo": "bar"}', models.Variable.get('foo'))
os.remove('variables1.json')
os.remove('variables2.json')
def _wait_pidfile(self, pidfile):
while True:
try:
with open(pidfile) as f:
return int(f.read())
except:
sleep(1)
def test_cli_webserver_foreground(self):
# Confirm that webserver hasn't been launched.
# pgrep returns exit status 1 if no process matched.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Run webserver in foreground and terminate it.
p = subprocess.Popen(["airflow", "webserver"])
p.terminate()
p.wait()
# Assert that no process remains.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
@unittest.skipIf("TRAVIS" in os.environ and bool(os.environ["TRAVIS"]),
"Skipping test due to lack of required file permission")
def test_cli_webserver_foreground_with_pid(self):
# Run webserver in foreground with --pid option
pidfile = tempfile.mkstemp()[1]
p = subprocess.Popen(["airflow", "webserver", "--pid", pidfile])
# Check the file specified by --pid option exists
self._wait_pidfile(pidfile)
# Terminate webserver
p.terminate()
p.wait()
@unittest.skipIf("TRAVIS" in os.environ and bool(os.environ["TRAVIS"]),
"Skipping test due to lack of required file permission")
def test_cli_webserver_background(self):
import psutil
# Confirm that webserver hasn't been launched.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Run webserver in background.
subprocess.Popen(["airflow", "webserver", "-D"])
pidfile = cli.setup_locations("webserver")[0]
self._wait_pidfile(pidfile)
# Assert that gunicorn and its monitor are launched.
self.assertEqual(0, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(0, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Terminate monitor process.
pidfile = cli.setup_locations("webserver-monitor")[0]
pid = self._wait_pidfile(pidfile)
p = psutil.Process(pid)
p.terminate()
p.wait()
# Assert that no process remains.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Patch for causing webserver timeout
@mock.patch("airflow.bin.cli.get_num_workers_running", return_value=0)
def test_cli_webserver_shutdown_when_gunicorn_master_is_killed(self, _):
# Shorten timeout so that this test doesn't take too long time
configuration.conf.set("webserver", "web_server_master_timeout", "10")
args = self.parser.parse_args(['webserver'])
with self.assertRaises(SystemExit) as e:
cli.webserver(args)
self.assertEqual(e.exception.code, 1)
class SecurityTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
configuration.conf.set("webserver", "expose_config", "True")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
self.dagbag = models.DagBag(
dag_folder=DEV_NULL, include_examples=True)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
def get_csrf(self, response):
tree = html.fromstring(response.data)
form = tree.find('.//form')
return form.find('.//input[@name="_csrf_token"]').value
def test_csrf_rejection(self):
endpoints = ([
"/admin/queryview/",
"/admin/airflow/paused?dag_id=example_python_operator&is_paused=false",
])
for endpoint in endpoints:
response = self.app.post(endpoint)
self.assertIn('CSRF token is missing', response.data.decode('utf-8'))
def test_csrf_acceptance(self):
response = self.app.get("/admin/queryview/")
csrf = self.get_csrf(response)
response = self.app.post("/admin/queryview/", data=dict(csrf_token=csrf))
self.assertEqual(200, response.status_code)
def test_xss(self):
try:
self.app.get("/admin/airflow/tree?dag_id=<script>alert(123456)</script>")
except:
# exception is expected here since dag doesnt exist
pass
response = self.app.get("/admin/log", follow_redirects=True)
self.assertIn(bleach.clean("<script>alert(123456)</script>"), response.data.decode('UTF-8'))
def test_chart_data_template(self):
"""Protect chart_data from being able to do RCE."""
session = settings.Session()
Chart = models.Chart
chart1 = Chart(
label='insecure_chart',
conn_id='airflow_db',
chart_type='bar',
sql="SELECT {{ ''.__class__.__mro__[1].__subclasses__() }}"
)
chart2 = Chart(
label="{{ ''.__class__.__mro__[1].__subclasses__() }}",
conn_id='airflow_db',
chart_type='bar',
sql="SELECT 1"
)
chart3 = Chart(
label="{{ subprocess.check_output('ls') }}",
conn_id='airflow_db',
chart_type='bar',
sql="SELECT 1"
)
session.add(chart1)
session.add(chart2)
session.add(chart3)
session.commit()
chart1 = session.query(Chart).filter(Chart.label == 'insecure_chart').first()
with self.assertRaises(SecurityError):
self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart1.id))
chart2 = session.query(Chart).filter(
Chart.label == "{{ ''.__class__.__mro__[1].__subclasses__() }}"
).first()
with self.assertRaises(SecurityError):
self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart2.id))
chart3 = session.query(Chart).filter(
Chart.label == "{{ subprocess.check_output('ls') }}"
).first()
with self.assertRaises(UndefinedError):
self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart3.id))
def tearDown(self):
configuration.conf.set("webserver", "expose_config", "False")
self.dag_bash.clear(start_date=DEFAULT_DATE, end_date=timezone.utcnow())
class WebUiTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
configuration.conf.set("webserver", "expose_config", "True")
app = application.create_app()
app.config['TESTING'] = True
app.config['WTF_CSRF_METHODS'] = []
self.app = app.test_client()
self.dagbag = models.DagBag(include_examples=True)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.dag_python = self.dagbag.dags['example_python_operator']
self.sub_dag = self.dagbag.dags['example_subdag_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
self.example_xcom = self.dagbag.dags['example_xcom']
self.dagrun_python = self.dag_python.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
self.sub_dag.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
self.example_xcom.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
def test_index(self):
response = self.app.get('/', follow_redirects=True)
resp_html = response.data.decode('utf-8')
self.assertIn("DAGs", resp_html)
self.assertIn("example_bash_operator", resp_html)
# The HTML should contain data for the last-run. A link to the specific run,
# and the text of the date.
url = "/admin/airflow/graph?" + urlencode({
"dag_id": self.dag_python.dag_id,
"execution_date": self.dagrun_python.execution_date,
}).replace("&", "&")
self.assertIn(url, resp_html)
self.assertIn(
self.dagrun_python.execution_date.strftime("%Y-%m-%d %H:%M"),
resp_html)
def test_query(self):
response = self.app.get('/admin/queryview/')
self.assertIn("Ad Hoc Query", response.data.decode('utf-8'))
response = self.app.post(
"/admin/queryview/", data=dict(
conn_id="airflow_db",
sql="SELECT+COUNT%281%29+as+TEST+FROM+task_instance"))
self.assertIn("TEST", response.data.decode('utf-8'))
def test_health(self):
response = self.app.get('/health')
self.assertIn('The server is healthy!', response.data.decode('utf-8'))
def test_noaccess(self):
response = self.app.get('/admin/airflow/noaccess')
self.assertIn("You don't seem to have access.", response.data.decode('utf-8'))
def test_pickle_info(self):
response = self.app.get('/admin/airflow/pickle_info')
self.assertIn('{', response.data.decode('utf-8'))
def test_dag_views(self):
response = self.app.get(
'/admin/airflow/graph?dag_id=example_bash_operator')
self.assertIn("runme_0", response.data.decode('utf-8'))
# confirm that the graph page loads when execution_date is blank
response = self.app.get(
'/admin/airflow/graph?dag_id=example_bash_operator&execution_date=')
self.assertIn("runme_0", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/tree?num_runs=25&dag_id=example_bash_operator')
self.assertIn("runme_0", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/duration?days=30&dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/tries?days=30&dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/landing_times?'
'days=30&dag_id=example_python_operator')
self.assertIn("example_python_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/landing_times?'
'days=30&dag_id=example_xcom')
self.assertIn("example_xcom", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/gantt?dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/code?dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/blocked')
response = self.app.get(
'/admin/configurationview/')
self.assertIn("Airflow Configuration", response.data.decode('utf-8'))
self.assertIn("Running Configuration", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/rendered?'
'task_id=runme_1&dag_id=example_bash_operator&'
'execution_date={}'.format(DEFAULT_DATE_ISO))
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/log?task_id=run_this_last&'
'dag_id=example_bash_operator&execution_date={}'
''.format(DEFAULT_DATE_ISO))
self.assertIn("run_this_last", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/task?'
'task_id=runme_0&dag_id=example_bash_operator&'
'execution_date={}'.format(DEFAULT_DATE_DS))
self.assertIn("Attributes", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/dag_stats')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/task_stats')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
url = (
"/admin/airflow/success?task_id=print_the_context&"
"dag_id=example_python_operator&upstream=false&downstream=false&"
"future=false&past=false&execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
response = self.app.get(url + "&confirmed=true")
response = self.app.get(
'/admin/airflow/clear?task_id=print_the_context&'
'dag_id=example_python_operator&future=true&past=false&'
'upstream=true&downstream=false&'
'execution_date={}&'
'origin=/admin'.format(DEFAULT_DATE_DS))
self.assertIn("Wait a minute", response.data.decode('utf-8'))
url = (
"/admin/airflow/success?task_id=section-1&"
"dag_id=example_subdag_operator&upstream=true&downstream=true&"
"future=false&past=false&execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
self.assertIn("section-1-task-1", response.data.decode('utf-8'))
self.assertIn("section-1-task-2", response.data.decode('utf-8'))
self.assertIn("section-1-task-3", response.data.decode('utf-8'))
self.assertIn("section-1-task-4", response.data.decode('utf-8'))
self.assertIn("section-1-task-5", response.data.decode('utf-8'))
response = self.app.get(url + "&confirmed=true")
url = (
"/admin/airflow/clear?task_id=print_the_context&"
"dag_id=example_python_operator&future=false&past=false&"
"upstream=false&downstream=true&"
"execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
response = self.app.get(url + "&confirmed=true")
url = (
"/admin/airflow/run?task_id=runme_0&"
"dag_id=example_bash_operator&ignore_all_deps=false&ignore_ti_state=true&"
"ignore_task_deps=true&execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
response = self.app.get(
"/admin/airflow/refresh?dag_id=example_bash_operator")
response = self.app.get("/admin/airflow/refresh_all")
response = self.app.post(
"/admin/airflow/paused?"
"dag_id=example_python_operator&is_paused=false")
self.assertIn("OK", response.data.decode('utf-8'))
response = self.app.get("/admin/xcom", follow_redirects=True)
self.assertIn("Xcoms", response.data.decode('utf-8'))
def test_charts(self):
session = Session()
chart_label = "Airflow task instance by type"
chart = session.query(
models.Chart).filter(models.Chart.label == chart_label).first()
chart_id = chart.id
session.close()
response = self.app.get(
'/admin/airflow/chart'
'?chart_id={}&iteration_no=1'.format(chart_id))
self.assertIn("Airflow task instance by type", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/chart_data'
'?chart_id={}&iteration_no=1'.format(chart_id))
self.assertIn("example", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/dag_details?dag_id=example_branch_operator')
self.assertIn("run_this_first", response.data.decode('utf-8'))
def test_fetch_task_instance(self):
url = (
"/admin/airflow/object/task_instances?"
"dag_id=example_python_operator&"
"execution_date={}".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("print_the_context", response.data.decode('utf-8'))
def tearDown(self):
configuration.conf.set("webserver", "expose_config", "False")
self.dag_bash.clear(start_date=DEFAULT_DATE, end_date=timezone.utcnow())
session = Session()
session.query(models.DagRun).delete()
session.query(models.TaskInstance).delete()
session.commit()
session.close()
class SecureModeWebUiTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
configuration.conf.set("core", "secure_mode", "True")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
def test_query(self):
response = self.app.get('/admin/queryview/')
self.assertEqual(response.status_code, 404)
def test_charts(self):
response = self.app.get('/admin/chart/')
self.assertEqual(response.status_code, 404)
def tearDown(self):
configuration.conf.remove_option("core", "SECURE_MODE")
class PasswordUserTest(unittest.TestCase):
def setUp(self):
user = models.User()
from airflow.contrib.auth.backends.password_auth import PasswordUser
self.password_user = PasswordUser(user)
self.password_user.username = "password_test"
@mock.patch('airflow.contrib.auth.backends.password_auth.generate_password_hash')
def test_password_setter(self, mock_gen_pass_hash):
mock_gen_pass_hash.return_value = b"hashed_pass" if six.PY3 else "hashed_pass"
self.password_user.password = "secure_password"
mock_gen_pass_hash.assert_called_with("secure_password", 12)
def test_password_unicode(self):
# In python2.7 no conversion is required back to str
# In python >= 3 the method must convert from bytes to str
self.password_user.password = "secure_password"
self.assertIsInstance(self.password_user.password, str)
def test_password_user_authenticate(self):
self.password_user.password = "secure_password"
self.assertTrue(self.password_user.authenticate("secure_password"))
def test_password_authenticate_session(self):
from airflow.contrib.auth.backends.password_auth import PasswordUser
self.password_user.password = 'test_password'
session = Session()
session.add(self.password_user)
session.commit()
query_user = session.query(PasswordUser).filter_by(
username=self.password_user.username).first()
self.assertTrue(query_user.authenticate('test_password'))
session.query(models.User).delete()
session.commit()
session.close()
class WebPasswordAuthTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.password_auth")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
from airflow.contrib.auth.backends.password_auth import PasswordUser
session = Session()
user = models.User()
password_user = PasswordUser(user)
password_user.username = 'airflow_passwordauth'
password_user.password = 'password'
print(password_user._password)
session.add(password_user)
session.commit()
session.close()
def get_csrf(self, response):
tree = html.fromstring(response.data)
form = tree.find('.//form')
return form.find('.//input[@name="_csrf_token"]').value
def login(self, username, password):
response = self.app.get('/admin/airflow/login')
csrf_token = self.get_csrf(response)
return self.app.post('/admin/airflow/login', data=dict(
username=username,
password=password,
csrf_token=csrf_token
), follow_redirects=True)
def logout(self):
return self.app.get('/admin/airflow/logout', follow_redirects=True)
def test_login_logout_password_auth(self):
self.assertTrue(configuration.conf.getboolean('webserver', 'authenticate'))
response = self.login('user1', 'whatever')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('airflow_passwordauth', 'wrongpassword')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('airflow_passwordauth', 'password')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.logout()
self.assertIn('form-signin', response.data.decode('utf-8'))
def test_unauthorized_password_auth(self):
response = self.app.get("/admin/airflow/landing_times")
self.assertEqual(response.status_code, 302)
def tearDown(self):
configuration.load_test_config()
session = Session()
session.query(models.User).delete()
session.commit()
session.close()
configuration.conf.set("webserver", "authenticate", "False")
class WebLdapAuthTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.ldap_auth")
try:
configuration.conf.add_section("ldap")
except:
pass
configuration.conf.set("ldap", "uri", "ldap://localhost:3890")
configuration.conf.set("ldap", "user_filter", "objectClass=*")
configuration.conf.set("ldap", "user_name_attr", "uid")
configuration.conf.set("ldap", "bind_user", "cn=Manager,dc=example,dc=com")
configuration.conf.set("ldap", "bind_password", "insecure")
configuration.conf.set("ldap", "basedn", "dc=example,dc=com")
configuration.conf.set("ldap", "cacert", "")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
def get_csrf(self, response):
tree = html.fromstring(response.data)
form = tree.find('.//form')
return form.find('.//input[@name="_csrf_token"]').value
def login(self, username, password):
response = self.app.get('/admin/airflow/login')
csrf_token = self.get_csrf(response)
return self.app.post('/admin/airflow/login', data=dict(
username=username,
password=password,
csrf_token=csrf_token
), follow_redirects=True)
def logout(self):
return self.app.get('/admin/airflow/logout', follow_redirects=True)
def test_login_logout_ldap(self):
self.assertTrue(configuration.conf.getboolean('webserver', 'authenticate'))
response = self.login('user1', 'userx')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('userz', 'user1')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('user1', 'user1')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.logout()
self.assertIn('form-signin', response.data.decode('utf-8'))
def test_unauthorized(self):
response = self.app.get("/admin/airflow/landing_times")
self.assertEqual(response.status_code, 302)
def test_no_filter(self):
response = self.login('user1', 'user1')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
self.assertIn('Connections', response.data.decode('utf-8'))
def test_with_filters(self):
configuration.conf.set('ldap', 'superuser_filter',
'description=superuser')
configuration.conf.set('ldap', 'data_profiler_filter',
'description=dataprofiler')
response = self.login('dataprofiler', 'dataprofiler')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.login('superuser', 'superuser')
self.assertIn('Connections', response.data.decode('utf-8'))
def tearDown(self):
configuration.load_test_config()
session = Session()
session.query(models.User).delete()
session.commit()
session.close()
configuration.conf.set("webserver", "authenticate", "False")
class LdapGroupTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.ldap_auth")
try:
configuration.conf.add_section("ldap")
except:
pass
configuration.conf.set("ldap", "uri", "ldap://localhost:3890")
configuration.conf.set("ldap", "user_filter", "objectClass=*")
configuration.conf.set("ldap", "user_name_attr", "uid")
configuration.conf.set("ldap", "bind_user", "cn=Manager,dc=example,dc=com")
configuration.conf.set("ldap", "bind_password", "insecure")
configuration.conf.set("ldap", "basedn", "dc=example,dc=com")
configuration.conf.set("ldap", "cacert", "")
def test_group_belonging(self):
from airflow.contrib.auth.backends.ldap_auth import LdapUser
users = {"user1": ["group1", "group3"],
"user2": ["group2"]
}
for user in users:
mu = models.User(username=user,
is_superuser=False)
auth = LdapUser(mu)
self.assertEqual(set(users[user]), set(auth.ldap_groups))
def tearDown(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
class FakeWebHDFSHook(object):
def __init__(self, conn_id):
self.conn_id = conn_id
def get_conn(self):
return self.conn_id
def check_for_path(self, hdfs_path):
return hdfs_path
class FakeSnakeBiteClientException(Exception):
pass
class FakeSnakeBiteClient(object):
def __init__(self):
self.started = True
def ls(self, path, include_toplevel=False):
"""
the fake snakebite client
:param path: the array of path to test
:param include_toplevel: to return the toplevel directory info
:return: a list for path for the matching queries
"""
if path[0] == '/datadirectory/empty_directory' and not include_toplevel:
return []
elif path[0] == '/datadirectory/datafile':
return [{
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/datafile'
}]
elif path[0] == '/datadirectory/empty_directory' and include_toplevel:
return [{
'group': u'supergroup',
'permission': 493,
'file_type': 'd',
'access_time': 0,
'block_replication': 0,
'modification_time': 1481132141540,
'length': 0,
'blocksize': 0,
'owner': u'hdfs',
'path': '/datadirectory/empty_directory'
}]
elif path[0] == '/datadirectory/not_empty_directory' and include_toplevel:
return [{
'group': u'supergroup',
'permission': 493,
'file_type': 'd',
'access_time': 0,
'block_replication': 0,
'modification_time': 1481132141540,
'length': 0,
'blocksize': 0,
'owner': u'hdfs',
'path': '/datadirectory/empty_directory'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/not_empty_directory/test_file'
}]
elif path[0] == '/datadirectory/not_empty_directory':
return [{
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/not_empty_directory/test_file'
}]
elif path[0] == '/datadirectory/not_existing_file_or_directory':
raise FakeSnakeBiteClientException
elif path[0] == '/datadirectory/regex_dir':
return [{
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862, 'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/test1file'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/test2file'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/test3file'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/copying_file_1.txt._COPYING_'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/copying_file_3.txt.sftp'
}]
else:
raise FakeSnakeBiteClientException
class FakeHDFSHook(object):
def __init__(self, conn_id=None):
self.conn_id = conn_id
def get_conn(self):
client = FakeSnakeBiteClient()
return client
class ConnectionTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
utils.db.initdb()
os.environ['AIRFLOW_CONN_TEST_URI'] = (
'postgres://username:password@ec2.compute.com:5432/the_database')
os.environ['AIRFLOW_CONN_TEST_URI_NO_CREDS'] = (
'postgres://ec2.compute.com/the_database')
def tearDown(self):
env_vars = ['AIRFLOW_CONN_TEST_URI', 'AIRFLOW_CONN_AIRFLOW_DB']
for ev in env_vars:
if ev in os.environ:
del os.environ[ev]
def test_using_env_var(self):
c = SqliteHook.get_connection(conn_id='test_uri')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertEqual('username', c.login)
self.assertEqual('password', c.password)
self.assertEqual(5432, c.port)
def test_using_unix_socket_env_var(self):
c = SqliteHook.get_connection(conn_id='test_uri_no_creds')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertIsNone(c.login)
self.assertIsNone(c.password)
self.assertIsNone(c.port)
def test_param_setup(self):
c = models.Connection(conn_id='local_mysql', conn_type='mysql',
host='localhost', login='airflow',
password='airflow', schema='airflow')
self.assertEqual('localhost', c.host)
self.assertEqual('airflow', c.schema)
self.assertEqual('airflow', c.login)
self.assertEqual('airflow', c.password)
self.assertIsNone(c.port)
def test_env_var_priority(self):
c = SqliteHook.get_connection(conn_id='airflow_db')
self.assertNotEqual('ec2.compute.com', c.host)
os.environ['AIRFLOW_CONN_AIRFLOW_DB'] = \
'postgres://username:password@ec2.compute.com:5432/the_database'
c = SqliteHook.get_connection(conn_id='airflow_db')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertEqual('username', c.login)
self.assertEqual('password', c.password)
self.assertEqual(5432, c.port)
del os.environ['AIRFLOW_CONN_AIRFLOW_DB']
def test_dbapi_get_uri(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
self.assertEqual('postgres://username:password@ec2.compute.com:5432/the_database', hook.get_uri())
conn2 = BaseHook.get_connection(conn_id='test_uri_no_creds')
hook2 = conn2.get_hook()
self.assertEqual('postgres://ec2.compute.com/the_database', hook2.get_uri())
def test_dbapi_get_sqlalchemy_engine(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
engine = hook.get_sqlalchemy_engine()
self.assertIsInstance(engine, sqlalchemy.engine.Engine)
self.assertEqual('postgres://username:password@ec2.compute.com:5432/the_database', str(engine.url))
def test_get_connections_env_var(self):
conns = SqliteHook.get_connections(conn_id='test_uri')
assert len(conns) == 1
assert conns[0].host == 'ec2.compute.com'
assert conns[0].schema == 'the_database'
assert conns[0].login == 'username'
assert conns[0].password == 'password'
assert conns[0].port == 5432
def test_get_connections_db(self):
conns = BaseHook.get_connections(conn_id='airflow_db')
assert len(conns) == 1
assert conns[0].host == 'localhost'
assert conns[0].schema == 'airflow'
assert conns[0].login == 'root'
class WebHDFSHookTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
def test_simple_init(self):
from airflow.hooks.webhdfs_hook import WebHDFSHook
c = WebHDFSHook()
self.assertIsNone(c.proxy_user)
def test_init_proxy_user(self):
from airflow.hooks.webhdfs_hook import WebHDFSHook
c = WebHDFSHook(proxy_user='someone')
self.assertEqual('someone', c.proxy_user)
HDFSHook = None
if six.PY2:
from airflow.hooks.hdfs_hook import HDFSHook
import snakebite
@unittest.skipIf(HDFSHook is None,
"Skipping test because HDFSHook is not installed")
class HDFSHookTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
os.environ['AIRFLOW_CONN_HDFS_DEFAULT'] = ('hdfs://localhost:8020')
def test_get_client(self):
client = HDFSHook(proxy_user='foo').get_conn()
self.assertIsInstance(client, snakebite.client.Client)
self.assertEqual('localhost', client.host)
self.assertEqual(8020, client.port)
self.assertEqual('foo', client.service.channel.effective_user)
@mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient')
@mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections')
def test_get_autoconfig_client(self, mock_get_connections,
MockAutoConfigClient):
c = models.Connection(conn_id='hdfs', conn_type='hdfs',
host='localhost', port=8020, login='foo',
extra=json.dumps({'autoconfig': True}))
mock_get_connections.return_value = [c]
HDFSHook(hdfs_conn_id='hdfs').get_conn()
MockAutoConfigClient.assert_called_once_with(effective_user='foo',
use_sasl=False)
@mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient')
def test_get_autoconfig_client_no_conn(self, MockAutoConfigClient):
HDFSHook(hdfs_conn_id='hdfs_missing', autoconfig=True).get_conn()
MockAutoConfigClient.assert_called_once_with(effective_user=None,
use_sasl=False)
@mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections')
def test_get_ha_client(self, mock_get_connections):
c1 = models.Connection(conn_id='hdfs_default', conn_type='hdfs',
host='localhost', port=8020)
c2 = models.Connection(conn_id='hdfs_default', conn_type='hdfs',
host='localhost2', port=8020)
mock_get_connections.return_value = [c1, c2]
client = HDFSHook().get_conn()
self.assertIsInstance(client, snakebite.client.HAClient)
send_email_test = mock.Mock()
class EmailTest(unittest.TestCase):
def setUp(self):
configuration.conf.remove_option('email', 'EMAIL_BACKEND')
@mock.patch('airflow.utils.email.send_email')
def test_default_backend(self, mock_send_email):
res = utils.email.send_email('to', 'subject', 'content')
mock_send_email.assert_called_with('to', 'subject', 'content')
self.assertEqual(mock_send_email.return_value, res)
@mock.patch('airflow.utils.email.send_email_smtp')
def test_custom_backend(self, mock_send_email):
configuration.conf.set('email', 'EMAIL_BACKEND', 'tests.core.send_email_test')
utils.email.send_email('to', 'subject', 'content')
send_email_test.assert_called_with(
'to', 'subject', 'content', files=None, dryrun=False,
cc=None, bcc=None, mime_charset='us-ascii', mime_subtype='mixed')
self.assertFalse(mock_send_email.called)
class EmailSmtpTest(unittest.TestCase):
def setUp(self):
configuration.conf.set('smtp', 'SMTP_SSL', 'False')
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_smtp(self, mock_send_mime):
attachment = tempfile.NamedTemporaryFile()
attachment.write(b'attachment')
attachment.seek(0)
utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name])
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), call_args[0])
self.assertEqual(['to'], call_args[1])
msg = call_args[2]
self.assertEqual('subject', msg['Subject'])
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), msg['From'])
self.assertEqual(2, len(msg.get_payload()))
filename = u'attachment; filename="' + os.path.basename(attachment.name) + '"'
self.assertEqual(filename, msg.get_payload()[-1].get(u'Content-Disposition'))
mimeapp = MIMEApplication('attachment')
self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload())
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_smtp_with_multibyte_content(self, mock_send_mime):
utils.email.send_email_smtp('to', 'subject', '🔥', mime_charset='utf-8')
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
msg = call_args[2]
mimetext = MIMEText('🔥', 'mixed', 'utf-8')
self.assertEqual(mimetext.get_payload(), msg.get_payload()[0].get_payload())
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_bcc_smtp(self, mock_send_mime):
attachment = tempfile.NamedTemporaryFile()
attachment.write(b'attachment')
attachment.seek(0)
utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name], cc='cc', bcc='bcc')
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), call_args[0])
self.assertEqual(['to', 'cc', 'bcc'], call_args[1])
msg = call_args[2]
self.assertEqual('subject', msg['Subject'])
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), msg['From'])
self.assertEqual(2, len(msg.get_payload()))
self.assertEqual(u'attachment; filename="' + os.path.basename(attachment.name) + '"',
msg.get_payload()[-1].get(u'Content-Disposition'))
mimeapp = MIMEApplication('attachment')
self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload())
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime(self, mock_smtp, mock_smtp_ssl):
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
msg = MIMEMultipart()
utils.email.send_MIME_email('from', 'to', msg, dryrun=False)
mock_smtp.assert_called_with(
configuration.conf.get('smtp', 'SMTP_HOST'),
configuration.conf.getint('smtp', 'SMTP_PORT'),
)
self.assertTrue(mock_smtp.return_value.starttls.called)
mock_smtp.return_value.login.assert_called_with(
configuration.conf.get('smtp', 'SMTP_USER'),
configuration.conf.get('smtp', 'SMTP_PASSWORD'),
)
mock_smtp.return_value.sendmail.assert_called_with('from', 'to', msg.as_string())
self.assertTrue(mock_smtp.return_value.quit.called)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_ssl(self, mock_smtp, mock_smtp_ssl):
configuration.conf.set('smtp', 'SMTP_SSL', 'True')
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)
self.assertFalse(mock_smtp.called)
mock_smtp_ssl.assert_called_with(
configuration.conf.get('smtp', 'SMTP_HOST'),
configuration.conf.getint('smtp', 'SMTP_PORT'),
)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_noauth(self, mock_smtp, mock_smtp_ssl):
configuration.conf.remove_option('smtp', 'SMTP_USER')
configuration.conf.remove_option('smtp', 'SMTP_PASSWORD')
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)
self.assertFalse(mock_smtp_ssl.called)
mock_smtp.assert_called_with(
configuration.conf.get('smtp', 'SMTP_HOST'),
configuration.conf.getint('smtp', 'SMTP_PORT'),
)
self.assertFalse(mock_smtp.login.called)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_dryrun(self, mock_smtp, mock_smtp_ssl):
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=True)
self.assertFalse(mock_smtp.called)
self.assertFalse(mock_smtp_ssl.called)
if __name__ == '__main__':
unittest.main()
|
import csv
import collections, itertools
import nltk.classify.util, nltk.metrics
from nltk.classify import NaiveBayesClassifier
from nltk.corpus import movie_reviews, stopwords
from nltk.collocations import BigramCollocationFinder
from nltk.metrics import BigramAssocMeasures
from nltk.probability import FreqDist, ConditionalFreqDist
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
import random
from nltk.stem.snowball import EnglishStemmer
stopset = set(stopwords.words('english'))
lmtzr = EnglishStemmer(True)
#Different frequencies/scoring function between unigram and bigram
def bigram_word_feats(words, score_fn=BigramAssocMeasures.chi_sq, n=150):
bigram_finder = BigramCollocationFinder.from_words(words)
bigrams = bigram_finder.nbest(score_fn, n)
return dict([(ngram, True) for ngram in itertools.chain(words, bigrams)])
#Construction du dictionnaire en tenant compte des stopwords
def stopword_filtered_word_feats(words):
return dict([(word, True) for word in words if word not in stopset])
#Construction du dictionnaire en tenant compte des stopwords
def stemming_word_feats(words):
return dict([(lmtzr.stem(word), True) for word in words])
# Construction du dictionnaire avec variable bool indiquant presence du mot
def word_feats(words):
return dict([(word, True) for word in words])
#Classe permettant l'extraction des phrases du fichier
class PipeDialect(csv.Dialect):
delimiter = "|"
quotechar = None
escapechar = None
doublequote = None
lineterminator = "\r\n"
quoting = csv.QUOTE_NONE
skipinitialspace = False
#Classifieur binaire de base
def evaluate_classifier(featx):
fneg = "data.neg.txt"
fpos = "data.pos.txt"
f = "data.txt"
fileNeg = open(fneg, "rb")
filePos = open(fpos, "rb")
file = open(f, "rb")
reader = csv.reader(file, PipeDialect())
readerNeg = csv.reader(fileNeg, PipeDialect())
readerPos = csv.reader(filePos, PipeDialect())
sentencesNeg = []
sentencesPos = []
wordsNeg = []
wordsPos = []
for row in readerNeg:
sentencesNeg.append(row[2].lower())
for row in readerPos:
sentencesPos.append(row[2].lower())
tokenizer = RegexpTokenizer(r'\w+')
for i in range(0, len(sentencesNeg)-1):
wordsNeg.append(tokenizer.tokenize(sentencesNeg[i]))
for i in range(0, len(sentencesPos)-1):
wordsPos.append(tokenizer.tokenize(sentencesPos[i]))
words = wordsNeg + wordsPos
print len(set([y for x in words for y in x]))
negfeats = [(featx(wordsNeg[i]), 'neg') for i in range(0, len(wordsNeg)-1)]
posfeats = [(featx(wordsPos[i]), 'pos') for i in range(0, len(wordsPos)-1)]
print len(set([lmtzr.stem(y) for x in words for y in x]))
random.shuffle(negfeats)
random.shuffle(posfeats)
negcutoff = len(negfeats)*3/4
poscutoff = len(posfeats)*3/4
trainfeats = negfeats[:negcutoff] + posfeats[:poscutoff]
testfeats = negfeats[negcutoff:] + posfeats[poscutoff:]
print 'train on %d instances, test on %d instances' % (len(trainfeats), len(testfeats))
classifier = NaiveBayesClassifier.train(trainfeats)
refsets = collections.defaultdict(set)
testsets = collections.defaultdict(set)
for i, (feats, label) in enumerate(testfeats):
refsets[label].add(i)
observed = classifier.classify(feats)
testsets[observed].add(i)
print 'accuracy:', nltk.classify.util.accuracy(classifier, testfeats)
print 'pos precision:', nltk.metrics.precision(refsets['pos'], testsets['pos'])
print 'pos recall:', nltk.metrics.recall(refsets['pos'], testsets['pos'])
print 'neg precision:', nltk.metrics.precision(refsets['neg'], testsets['neg'])
print 'neg recall:', nltk.metrics.recall(refsets['neg'], testsets['neg'])
classifier.show_most_informative_features()
file.close()
filePos.close()
fileNeg.close()
print 'evaluating single word features'
evaluate_classifier(word_feats)
print 'evaluating single word features with no stopword'
evaluate_classifier(stopword_filtered_word_feats)
print 'evaluating single word features with no stopword and stemming'
evaluate_classifier(stemming_word_feats)
|
import torch.nn as nn
import torch
from utils import conv_out_size, Flatten
import constants as c
class DiscriminatorModel(nn.Module):
def __init__(self):
super(DiscriminatorModel, self).__init__()
self.d_model = []
for scale_num in xrange(c.NUM_SCALE_NETS):
scale_factor = 1. / 2 ** ((c.NUM_SCALE_NETS - 1) - scale_num)
scale_model = self.d_single_scale_model(scale_num, int(c.HEIGHT * scale_factor), int(c.WIDTH * scale_factor))
self.d_model.append(scale_model)
self.d_model = nn.Sequential(*self.d_model)
def d_single_scale_model(self, scale_index, height, width):
"""
Sets up the model graph in TensorFlow.
"""
##
# Layer setup
##
# convolution
ws = []
fc_layer_sizes = c.SCALE_FC_LAYER_SIZES_D[scale_index]
last_out_height = height
last_out_width = width
for i in xrange(len(c.SCALE_KERNEL_SIZES_D[scale_index])):
ws.append(nn.Conv2d(c.SCALE_CONV_FMS_D[scale_index][i], c.SCALE_CONV_FMS_D[scale_index][i + 1],
kernel_size=c.SCALE_KERNEL_SIZES_D[scale_index][i],
padding=c.SCALE_PADDING_SIZES_D[scale_index][i]))
ws.append(nn.AvgPool2d(kernel_size=2, stride=2, padding=1))
ws.append(nn.LeakyReLU(c.LEAK))
last_out_height = conv_out_size(input=last_out_height, padding=c.SCALE_PADDING_SIZES_D[scale_index][i],
kernel=c.SCALE_KERNEL_SIZES_D[scale_index][i], stride=1)
last_out_width = conv_out_size(
last_out_width, c.SCALE_PADDING_SIZES_D[scale_index][i], c.SCALE_KERNEL_SIZES_D[scale_index][i], 1)
last_out_height = conv_out_size(input=last_out_height, kernel=2, padding=1, stride=2)
last_out_width = conv_out_size(input=last_out_width, kernel=2, padding=1, stride=2)
ws.append(nn.AvgPool2d(kernel_size=2, stride=2, padding=1))
last_out_height = conv_out_size(input=last_out_height, kernel=2, padding=1, stride=2)
last_out_width = conv_out_size(input=last_out_width, kernel=2, padding=1, stride=2)
# fully-connected
# Add in an initial layer to go from the last conv to the first fully-connected.
# Use /2 for the height and width because there is a 2x2 pooling layer
fc_layer_sizes.insert(0, last_out_height * last_out_width * c.SCALE_CONV_FMS_D[scale_index][-1])
ws.append(Flatten())
for i in xrange(len(fc_layer_sizes) - 1):
ws.append(nn.Linear(fc_layer_sizes[i], fc_layer_sizes[i + 1]))
if i == len(fc_layer_sizes) - 2:
ws.append(nn.Sigmoid())
else:
ws.append(nn.LeakyReLU(c.LEAK, inplace=True))
d_single_scale = nn.Sequential(*ws)
return d_single_scale
def forward(self, input_):
scale_preds = []
for scale_num in xrange(c.NUM_SCALE_NETS):
# get predictions from the scale network
single_scale_pred = self.d_model[scale_num](input_[scale_num])
single_scale_pred = torch.clamp(single_scale_pred, 0.001, 0.999) # for stability
scale_preds.append(single_scale_pred)
return scale_preds
|
from math import ceil
import matplotlib.pyplot as plt
from skimage import io
from skimage.morphology import disk
from examples.utils import Timer
from softcolor.morphology import MorphologyInCIELab, soften_structuring_element
if __name__ == "__main__":
img = io.imread('images/lena-512.gif')
img = img[100:200, 100:200, :]
morphology = MorphologyInCIELab()
se = disk(3)
se = soften_structuring_element(se)
with Timer() as t:
img_contrasted, img_contrasted_steps = morphology.contrast_mapping_with_steps(img,
structuring_element=se,
num_iterations=3)
print('Time elapsed contrast mapping: {} s'.format(t.interval))
_, axs = plt.subplots(nrows=2, ncols=2)
[a.axis('off') for a in axs.flat]
axs[0, 1].imshow(se)
axs[0, 0].imshow(img)
axs[1, 0].imshow(img_contrasted)
plt.show()
_, axs = plt.subplots(nrows=3, ncols=ceil(len(img_contrasted_steps)/3))
[a.axis('off') for a in axs.flat]
for idx_step, step in enumerate(img_contrasted_steps):
axs.flat[idx_step].imshow(step)
plt.show()
|
#!/usr/bin/env python
list1 = [func() for func in [lambda:i*i for i in range(1,10)]]
list2 = [(lambda:i*i)() for i in range(1,10)]
print 'list1 = ' + str(list1)
print 'list2 = ' + str(list2)
funcs = []
for i in range(1,10):
funcs.append(lambda:i*i)
list3 = [func() for fun in funcs]
print 'list3 = ' + str(list3)
list4 = list(map(lambda f:f(), map(lambda i:(lambda:i*i), range(1,10)) ))
print 'list4 = ' + str(list4)
i = 1
funcs = []
while i<10:
funcs.append(lambda:i*i)
i += 1
list5 = [func() for fun in funcs]
print 'list5 = ' + str(list5)
|
import streamlit as st
import streamlit.components.v1 as components
import pandas as pd
import networkx as nx
import pickle
from pyvis.network import Network
from pages import make_network
from transformers import DistilBertTokenizerFast
from transformers import TFDistilBertForSequenceClassification
# set header title
def app():
## load models
# pre-trained attention model should be saved in a folder name sentiment
# pre-trained clustering model should be saved as a file named k_means_model.pickle
attention_model = TFDistilBertForSequenceClassification.from_pretrained("./pages/sentiment/", output_attentions=True)
tokenizer = DistilBertTokenizerFast.from_pretrained('distilbert-base-uncased')
clustering_model = pickle.load(open('./pages/k_means_model.pickle', 'rb'))
st.markdown("### Input a Custom Query")
st.markdown("What kind of mobile electronic product are you interested in?")
query = st.text_input("Enter a custom query")
if not query:
st.warning("Please enter a custom query")
else:
st.write("Your query is: ", query)
df = make_network.make_network(query, tokenizer, attention_model, clustering_model)
HtmlFile = open('nodes.html','r',encoding='utf-8')
source_code = HtmlFile.read()
components.html(source_code, height = 1000, width = 1200)
st.dataframe(df.head(100).sort_values("rating", ascending = False))
#st.dataframe(df.head(15).sort_values("helpful_votes", ascending = False).sort_values("star_rating", ascending = False))
|
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck
# License: BSD 3 clause
# extensions and modifications (c) Martin Werner
from __future__ import print_function
import logging
import numpy as np
from optparse import OptionParser
import sys
from time import time
#import matplotlib.pyplot as plt
from sklearn.datasets import fetch_20newsgroups, load_files
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_selection import SelectFromModel
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import RidgeClassifier, LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB, GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.ensemble import RandomForestClassifier,ExtraTreesClassifier
from sklearn.utils.extmath import density
from sklearn import metrics
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--report",
action="store_true", dest="print_report",
help="Print a detailed classification report.")
op.add_option("--resample",
action="store_true", dest="resample",
help="Use balanced sample.")
op.add_option("--chi2_select",
action="store", type="int", dest="select_chi2",
help="Select some number of features using a chi-squared test")
op.add_option("--confusion_matrix",
action="store_true", dest="print_cm",
help="Print the confusion matrix.")
op.add_option("--top10",
action="store_true", dest="print_top10",
help="Print ten most discriminative terms per class"
" for every classifier.")
op.add_option("--all_categories",
action="store_true", dest="all_categories",
help="Whether to use all categories or not.")
op.add_option("--use_hashing",
action="store_true",
help="Use a hashing vectorizer.")
op.add_option("--n_features",
action="store", type=int, default=2 ** 16,
help="n_features when using the hashing vectorizer.")
op.add_option("--filtered",
action="store_true",
help="Remove newsgroup information that is easily overfit: "
"headers, signatures, and quoting.")
def is_interactive():
return not hasattr(sys.modules['__main__'], '__file__')
# work-around for Jupyter notebook and IPython console
argv = [] if is_interactive() else sys.argv[1:]
(opts, args) = op.parse_args(argv)
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
#print(__doc__)
#op.print_help()
#print()
# #############################################################################
# Load some categories from the training set
print("Loading LA")
data_train = load_files("./la/left")
data_test = load_files("./la/right")
# order of labels in `target_names` can be different from `categories`
target_names = data_train.target_names
def size_mb(docs):
return sum(len(s) for s in docs) / 1e6
data_train_size_mb = size_mb(data_train.data)
data_test_size_mb = size_mb(data_test.data)
print("%d documents - %0.3fMB (training set)" % (
len(data_train.data), data_train_size_mb))
print("%d documents - %0.3fMB (test set)" % (
len(data_test.data), data_test_size_mb))
print()
# split a training set and a test set
y_train, y_test = data_train.target, data_test.target
print("Extracting features from the training data using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
vectorizer = HashingVectorizer(stop_words='english', alternate_sign=False,
n_features=opts.n_features)
X_train = vectorizer.transform(data_train.data)
else:
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
X_train = vectorizer.fit_transform(data_train.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_train_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_train.shape)
print()
print("Extracting features from the test data using the same vectorizer")
t0 = time()
X_test = vectorizer.transform(data_test.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_test_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_test.shape)
print()
# mapping from integer feature name to original token string
if opts.use_hashing:
feature_names = None
else:
feature_names = vectorizer.get_feature_names()
if opts.select_chi2:
print("Extracting %d best features by a chi-squared test" %
opts.select_chi2)
t0 = time()
ch2 = SelectKBest(chi2, k=opts.select_chi2)
X_train = ch2.fit_transform(X_train, y_train)
X_test = ch2.transform(X_test)
if feature_names:
# keep selected feature names
feature_names = [feature_names[i] for i
in ch2.get_support(indices=True)]
print("done in %fs" % (time() - t0))
print()
if feature_names:
feature_names = np.asarray(feature_names)
def trim(s):
"""Trim string to fit on terminal (assuming 80-column display)"""
return s if len(s) <= 80 else s[:77] + "..."
# #############################################################################
# Benchmark classifiers
def benchmark(clf):
print('_' * 80)
print("Training: ")
print(clf)
t0 = time()
clf.fit(X_train, y_train)
train_time = time() - t0
print("train time: %0.3fs" % train_time)
t0 = time()
pred = clf.predict(X_test)
test_time = time() - t0
print("test time: %0.3fs" % test_time)
score = metrics.accuracy_score(y_test, pred)
print("accuracy: %0.3f" % score)
if hasattr(clf, 'coef_'):
print("dimensionality: %d" % clf.coef_.shape[1])
print("density: %f" % density(clf.coef_))
if opts.print_top10 and feature_names is not None:
print("top 10 keywords per class:")
for i, label in enumerate(target_names):
top10 = np.argsort(clf.coef_[i])[-10:]
print(trim("%s: %s" % (label, " ".join(feature_names[top10]))))
print()
pred_train = clf.predict(X_train)
print("trainig")
print(metrics.classification_report(y_train, pred_train, target_names=target_names))
if opts.print_report:
print("classification report:")
print(metrics.classification_report(y_test, pred,
target_names=target_names))
if opts.print_cm:
print("confusion matrix:")
print(metrics.confusion_matrix(y_test, pred))
print()
clf_descr = str(clf).split('(')[0]
return clf_descr, score, train_time, test_time
results = []
for clf, name in (
(RidgeClassifier(tol=1e-2, solver="auto"), "Ridge Classifier"),
(Perceptron(max_iter=50), "Perceptron"),
(PassiveAggressiveClassifier(max_iter=50), "Passive-Aggressive"),
(LogisticRegression(penalty="l2",
dual=True,
tol=0.0001,
C=0.1,
fit_intercept=True,
intercept_scaling=1,
class_weight=None,
random_state=None,
solver="liblinear",
max_iter=100,
multi_class="ovr",
verbose=0,
warm_start=False,
n_jobs=1), "MaxEnt"),
# (KNeighborsClassifier(n_neighbors=10), "kNN"),
# (RandomForestClassifier(n_estimators=100), "Random forest"),
# (ExtraTreesClassifier(n_estimators=100), "ExtraTree")
):
print('=' * 80)
print(name)
results.append(benchmark(clf))
for penalty in ["l2", "l1"]:
print('=' * 80)
print("%s penalty" % penalty.upper())
# Train Liblinear model
results.append(benchmark(LinearSVC(penalty=penalty, dual=False,
tol=1e-3)))
# Train SGD model
results.append(benchmark(SGDClassifier(alpha=.0001, max_iter=50,
penalty=penalty)))
# Train SGD with Elastic Net penalty
print('=' * 80)
print("Elastic-Net penalty")
results.append(benchmark(SGDClassifier(alpha=.0001, max_iter=50,
penalty="elasticnet")))
# Train NearestCentroid without threshold
print('=' * 80)
print("NearestCentroid (aka Rocchio classifier)")
results.append(benchmark(NearestCentroid()))
# Train sparse Naive Bayes classifiers
print('=' * 80)
print("Naive Bayes")
results.append(benchmark(MultinomialNB(alpha=.01)))
results.append(benchmark(BernoulliNB(alpha=.01)))
print('=' * 80)
print("LinearSVC with L1-based feature selection")
# The smaller C, the stronger the regularization.
# The more regularization, the more sparsity.
results.append(benchmark(Pipeline([
('feature_selection', SelectFromModel(LinearSVC(penalty="l1", dual=False,
tol=1e-3))),
('classification', LinearSVC(penalty="l2"))])))
# make some plots
indices = np.arange(len(results))
results = [[x[i] for x in results] for i in range(4)]
clf_names, score, training_time, test_time = results
training_time = np.array(training_time) / np.max(training_time)
test_time = np.array(test_time) / np.max(test_time)
#plt.figure(figsize=(12, 8))
#plt.title("Score")
#plt.barh(indices, score, .2, label="score", color='navy')
#plt.barh(indices + .3, training_time, .2, label="training time",
# color='c')
#plt.barh(indices + .6, test_time, .2, label="test time", color='darkorange')
#plt.yticks(())
#plt.legend(loc='best')
#plt.subplots_adjust(left=.25)
#plt.subplots_adjust(top=.95)
#plt.subplots_adjust(bottom=.05)
#for i, c in zip(indices, clf_names):
# plt.text(-.3, i, c)#
#plt.show()
|
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
import requests
import redis
import time
import datetime
import logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
cache = redis.Redis(host='redis', port=6379)
# TODO extract this IPs from the file.
picUrls = {'photo_garage12' : 'https://192.168.2.248/cgi-bin/currentpic.cgi',
'photo_garage13' : 'https://192.168.2.249/cgi-bin/currentpic.cgi',
'photo_office4' : 'https://192.168.100.201/cgi-bin/currentpic.cgi'}
def update(msg):
for key in picUrls:
val = cache.get(key)
if val is not None:
cache.delete(key)
valStr = val.decode("utf-8")
resultTarget = valStr.split(',')[1]
logging.info("loading photo from: " + picUrls[key])
# TODO Extract the authentication from this file.
content = requests.get(picUrls[key], verify=False, auth=('root','')).content
logging.info("sending photo to: (" + resultTarget + ')')
cache.setex(resultTarget, datetime.timedelta(seconds=10), value=content)
pubsub = cache.pubsub()
# Set Config redis key expire listener
cache.config_set('notify-keyspace-events', 'Exsg')
pubsub.psubscribe(**{"__key*__:*": update})
pubsub.run_in_thread(sleep_time=0.01)
if __name__ == "__main__":
logging.info("Starting image loader")
while True:
time.sleep(1)
|
from conans import ConanFile, CMake, tools
from conans.errors import ConanException, ConanInvalidConfiguration
from collections import namedtuple, OrderedDict
import os
required_conan_version = ">=1.33.0"
class PocoConan(ConanFile):
name = "poco"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://pocoproject.org"
topics = ("conan", "poco", "building", "networking", "server", "mobile", "embedded")
exports_sources = "CMakeLists.txt", "patches/**"
generators = "cmake", "cmake_find_package"
settings = "os", "arch", "compiler", "build_type"
license = "BSL-1.0"
description = "Modern, powerful open source C++ class libraries for building network- and internet-based " \
"applications that run on desktop, server, mobile and embedded systems."
options = {
"shared": [True, False],
"fPIC": [True, False],
"enable_fork": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"enable_fork": True,
}
_PocoComponent = namedtuple("_PocoComponent", ("option", "default_option", "dependencies", "is_lib"))
_poco_component_tree = {
"mod_poco": _PocoComponent("enable_apacheconnector", False, ("PocoUtil", "PocoNet", ), False), # also external apr and apr-util
"PocoCppParser": _PocoComponent("enable_cppparser", False, ("PocoFoundation", ), False),
# "PocoCppUnit": _PocoComponent("enable_cppunit", False, ("PocoFoundation", ), False)),
"PocoCrypto": _PocoComponent("enable_crypto", True, ("PocoFoundation", ), True), # also external openssl
"PocoData": _PocoComponent("enable_data", True, ("PocoFoundation", ), True),
"PocoDataMySQL": _PocoComponent("enable_data_mysql", True, ("PocoData", ), True),
"PocoDataODBC": _PocoComponent("enable_data_odbc", False, ("PocoData", ), True),
"PocoDataPostgreSQL": _PocoComponent("enable_data_postgresql", True, ("PocoData", ), True), # also external postgresql
"PocoDataSQLite": _PocoComponent("enable_data_sqlite", True, ("PocoData", ), True), # also external sqlite3
"PocoEncodings": _PocoComponent("enable_encodings", True, ("PocoFoundation", ), True),
# "PocoEncodingsCompiler": _PocoComponent("enable_encodingscompiler", False, ("PocoNet", "PocoUtil", ), False),
"PocoFoundation": _PocoComponent(None, "PocoFoundation", (), True),
"PocoJSON": _PocoComponent("enable_json", True, ("PocoFoundation", ), True),
"PocoJWT": _PocoComponent("enable_jwt", True, ("PocoJSON", "PocoCrypto", ), True),
"PocoMongoDB": _PocoComponent("enable_mongodb", True, ("PocoNet", ), True),
"PocoNet": _PocoComponent("enable_net", True, ("PocoFoundation", ), True),
"PocoNetSSL": _PocoComponent("enable_netssl", True, ("PocoCrypto", "PocoUtil", "PocoNet", ), True), # also external openssl
"PocoNetSSLWin": _PocoComponent("enable_netssl_win", False, ("PocoNet", "PocoUtil", ), True),
"PocoPDF": _PocoComponent("enable_pdf", False, ("PocoXML", "PocoUtil", ), True),
"PocoPageCompiler": _PocoComponent("enable_pagecompiler", False, ("PocoNet", "PocoUtil", ), False),
"PocoFile2Page": _PocoComponent("enable_pagecompiler_file2page", False, ("PocoNet", "PocoUtil", "PocoXML", "PocoJSON", ), False),
"PocoPocoDoc": _PocoComponent("enable_pocodoc", False, ("PocoUtil", "PocoXML", "PocoCppParser", ), False),
"PocoRedis": _PocoComponent("enable_redis", True, ("PocoNet", ), True),
"PocoSevenZip": _PocoComponent("enable_sevenzip", False, ("PocoUtil", "PocoXML", ), True),
"PocoUtil": _PocoComponent("enable_util", True, ("PocoFoundation", "PocoXML", "PocoJSON", ), True),
"PocoXML": _PocoComponent("enable_xml", True, ("PocoFoundation", ), True),
"PocoZip": _PocoComponent("enable_zip", True, ("PocoUtil", "PocoXML", ), True),
"PocoActiveRecord": _PocoComponent("enable_active_record", True, ("PocoFoundation", "PocoData", ), True),
}
for comp in _poco_component_tree.values():
if comp.option:
options[comp.option] = [True, False]
default_options[comp.option] = comp.default_option
del comp
@property
def _poco_ordered_components(self):
remaining_components = dict((compname, set(compopts.dependencies)) for compname, compopts in self._poco_component_tree.items())
ordered_components = []
while remaining_components:
components_no_deps = set(compname for compname, compopts in remaining_components.items() if not compopts)
if not components_no_deps:
raise ConanException("The poco dependency tree is invalid and contains a cycle")
for c in components_no_deps:
remaining_components.pop(c)
ordered_components.extend(components_no_deps)
for rname in remaining_components.keys():
remaining_components[rname] = remaining_components[rname].difference(components_no_deps)
ordered_components.reverse()
return ordered_components
_cmake = None
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _build_subfolder(self):
return "build_subfolder"
def source(self):
tools.get(**self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
del self.options.enable_fork
else:
del self.options.enable_netssl_win
if tools.Version(self.version) < "1.9":
del self.options.enable_encodings
if tools.Version(self.version) < "1.10":
del self.options.enable_data_mysql
del self.options.enable_data_postgresql
del self.options.enable_jwt
if tools.Version(self.version) < "1.11":
del self.options.enable_active_record
def configure(self):
if self.options.shared:
del self.options.fPIC
if not self.options.enable_xml:
util_dependencies = self._poco_component_tree["PocoUtil"].dependencies
self._poco_component_tree["PocoUtil"] = self._poco_component_tree["PocoUtil"]._replace(dependencies = tuple(x for x in util_dependencies if x != "PocoXML"))
if not self.options.enable_json:
util_dependencies = self._poco_component_tree["PocoUtil"].dependencies
self._poco_component_tree["PocoUtil"] = self._poco_component_tree["PocoUtil"]._replace(dependencies = tuple(x for x in util_dependencies if x != "PocoJSON"))
def validate(self):
if self.options.enable_apacheconnector:
raise ConanInvalidConfiguration("Apache connector not supported: https://github.com/pocoproject/poco/issues/1764")
if self.settings.compiler == "Visual Studio":
if self.options.shared and "MT" in str(self.settings.compiler.runtime):
raise ConanInvalidConfiguration("Cannot build shared poco libraries with MT(d) runtime")
for compopt in self._poco_component_tree.values():
if not compopt.option:
continue
if self.options.get_safe(compopt.option, False):
for compdep in compopt.dependencies:
if not self._poco_component_tree[compdep].option:
continue
if not self.options.get_safe(self._poco_component_tree[compdep].option, False):
raise ConanInvalidConfiguration("option {} requires also option {}".format(compopt.option, self._poco_component_tree[compdep].option))
if self.options.enable_data_sqlite:
if self.options["sqlite3"].threadsafe == 0:
raise ConanInvalidConfiguration("sqlite3 must be built with threadsafe enabled")
if self.options.enable_netssl and self.options.get_safe("enable_netssl_win", False):
raise ConanInvalidConfiguration("Conflicting enable_netssl[_win] settings")
def requirements(self):
self.requires("pcre/8.44")
self.requires("zlib/1.2.11")
if self.options.enable_xml:
self.requires("expat/2.4.1")
if self.options.enable_data_sqlite:
self.requires("sqlite3/3.35.5")
if self.options.enable_apacheconnector:
self.requires("apr/1.7.0")
self.requires("apr-util/1.6.1")
# FIXME: missing apache2 recipe
raise ConanInvalidConfiguration("apache2 is not (yet) available on CCI")
if self.options.enable_netssl or \
self.options.enable_crypto or \
self.options.get_safe("enable_jwt", False):
self.requires("openssl/1.1.1k")
if self.options.enable_data_odbc and self.settings.os != "Windows":
self.requires("odbc/2.3.9")
if self.options.get_safe("enable_data_postgresql", False):
self.requires("libpq/13.2")
if self.options.get_safe("enable_data_mysql", False):
self.requires("apr/1.7.0")
self.requires("apr-util/1.6.1")
self.requires("libmysqlclient/8.0.25")
def _patch_sources(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
self._cmake.definitions["CMAKE_BUILD_TYPE"] = self.settings.build_type
if tools.Version(self.version) < "1.10.1":
self._cmake.definitions["POCO_STATIC"] = not self.options.shared
for comp in self._poco_component_tree.values():
if not comp.option:
continue
self._cmake.definitions[comp.option.upper()] = self.options.get_safe(comp.option, False)
self._cmake.definitions["POCO_UNBUNDLED"] = True
self._cmake.definitions["CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS_SKIP"] = True
if self.settings.os == "Windows" and self.settings.compiler == "Visual Studio": # MT or MTd
self._cmake.definitions["POCO_MT"] = "ON" if "MT" in str(self.settings.compiler.runtime) else "OFF"
if self.options.get_safe("enable_data_postgresql", False):
self._cmake.definitions["PostgreSQL_ROOT_DIR"] = self.deps_cpp_info["libpq"].rootpath
self._cmake.definitions["PostgreSQL_ROOT_INCLUDE_DIRS"] = ";".join(self.deps_cpp_info["libpq"].include_paths)
self._cmake.definitions["PostgreSQL_ROOT_LIBRARY_DIRS"] = ";".join(self.deps_cpp_info["libpq"].lib_paths)
if self.options.get_safe("enable_data_mysql", False):
self._cmake.definitions["MYSQL_ROOT_DIR"] = self.deps_cpp_info["libmysqlclient"].rootpath
self._cmake.definitions["MYSQL_ROOT_INCLUDE_DIRS"] = ";".join(self.deps_cpp_info["libmysqlclient"].include_paths)
self._cmake.definitions["MYSQL_INCLUDE_DIR"] = ";".join(self.deps_cpp_info["libmysqlclient"].include_paths)
self._cmake.definitions["MYSQL_ROOT_LIBRARY_DIRS"] = ";".join(self.deps_cpp_info["libmysqlclient"].lib_paths)
self._cmake.definitions["APR_ROOT_DIR"] = self.deps_cpp_info["apr"].rootpath
self._cmake.definitions["APR_ROOT_INCLUDE_DIRS"] = ";".join(self.deps_cpp_info["apr"].include_paths)
self._cmake.definitions["APR_ROOT_LIBRARY_DIRS"] = ";".join(self.deps_cpp_info["apr"].lib_paths)
self._cmake.definitions["APRUTIL_ROOT_DIR"] = self.deps_cpp_info["apr-util"].rootpath
self._cmake.definitions["APRUTIL_ROOT_INCLUDE_DIRS"] = ";".join(self.deps_cpp_info["apr-util"].include_paths)
self._cmake.definitions["APRUTIL_ROOT_LIBRARY_DIRS"] = ";".join(self.deps_cpp_info["apr-util"].lib_paths)
self.output.info(self._cmake.definitions)
# On Windows, Poco needs a message (MC) compiler.
with tools.vcvars(self.settings) if self.settings.compiler == "Visual Studio" else tools.no_op():
self._cmake.configure(build_dir=self._build_subfolder)
# Disable fork
if not self.options.get_safe("enable_fork", True):
self._cmake.definitions["POCO_NO_FORK_EXEC"] = True
return self._cmake
def build(self):
self._patch_sources()
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy("LICENSE", dst="licenses", src=self._source_subfolder)
cmake = self._configure_cmake()
cmake.install()
tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
tools.rmdir(os.path.join(self.package_folder, "cmake"))
tools.remove_files_by_mask(os.path.join(self.package_folder, "bin"), "*.pdb")
@property
def _ordered_libs(self):
libs = []
for compname in self._poco_ordered_components:
comp_options = self._poco_component_tree[compname]
if comp_options.is_lib:
if not comp_options.option:
libs.append(compname)
elif self.options.get_safe(comp_options.option, False):
libs.append(compname)
return libs
def package_info(self):
suffix = str(self.settings.compiler.runtime).lower() \
if self.settings.compiler == "Visual Studio" and not self.options.shared \
else ("d" if self.settings.build_type == "Debug" else "")
self.cpp_info.libs = list("{}{}".format(lib, suffix) for lib in self._ordered_libs)
if self.settings.os == "Linux":
self.cpp_info.system_libs.extend(["pthread", "dl", "rt"])
if self.settings.compiler == "Visual Studio":
self.cpp_info.defines.append("POCO_NO_AUTOMATIC_LIBS")
if not self.options.shared:
self.cpp_info.defines.append("POCO_STATIC=ON")
if self.settings.os == "Windows":
self.cpp_info.system_libs.extend(["ws2_32", "iphlpapi", "crypt32"])
if self.options.enable_data_odbc:
self.cpp_info.system_libs.extend(["odbc32", "odbccp32"])
self.cpp_info.defines.append("POCO_UNBUNDLED")
if self.options.enable_util:
if not self.options.enable_json:
self.cpp_info.defines.append("POCO_UTIL_NO_JSONCONFIGURATION")
if not self.options.enable_xml:
self.cpp_info.defines.append("POCO_UTIL_NO_XMLCONFIGURATION")
self.cpp_info.names["cmake_find_package"] = "Poco"
self.cpp_info.names["cmake_find_package_multi"] = "Poco"
|
# encoding: utf8
from __future__ import unicode_literals
from django.db import models, migrations
def fill_dogs(apps, schema_editor):
CatsBreed = apps.get_model('main', 'CatsBreed')
Dog = apps.get_model('main', 'Dog')
bulldog, created = Dog.objects.get_or_create(breed=u'Бульдог')
dachshund, created = Dog.objects.get_or_create(breed=u'Такса')
CatsBreed.objects.get(name=u'Сиамская').can_live_with.add(bulldog, dachshund)
class Migration(migrations.Migration):
dependencies = [
('main', '0002_fill_cat_breeds'),
]
operations = [
migrations.RunPython(fill_dogs)
]
|
import torch
import mmdet2trt.core.post_processing.batched_nms as batched_nms
import mmdet2trt.ops.util_ops as mm2trt_util
from mmdet2trt.core.bbox import batched_distance2bbox
from mmdet2trt.models.builder import register_wraper
from mmdet2trt.models.dense_heads.anchor_free_head import AnchorFreeHeadWraper
@register_wraper('mmdet.models.VFNetHead')
class VFNetHeadWraper(AnchorFreeHeadWraper):
def __init__(self, module):
super(VFNetHeadWraper, self).__init__(module)
self.rcnn_nms = batched_nms.BatchedNMS(
module.test_cfg.score_thr,
module.test_cfg.nms.iou_threshold,
backgroundLabelId=-1)
def _get_points_single(self, feat, stride, flatten=False):
y, x = super()._get_points_single(feat, stride, flatten)
if self.module.use_atss:
points = torch.stack(
(x.reshape(-1) * stride, y.reshape(-1) * stride),
dim=-1) + stride * self.module.anchor_center_offset
else:
points = torch.stack(
(x.reshape(-1) * stride, y.reshape(-1) * stride),
dim=-1) + stride // 2
return points
def forward(self, feat, x):
module = self.module
cfg = self.test_cfg
cls_scores, bbox_preds, bbox_preds_refine = module(feat)
mlvl_points = self.get_points(cls_scores)
mlvl_bboxes = []
mlvl_scores = []
for cls_score, bbox_pred, points in zip(cls_scores, bbox_preds_refine,
mlvl_points):
scores = cls_score.permute(0, 2, 3, 1).reshape(
cls_score.shape[0], -1, module.cls_out_channels).sigmoid()
bbox_pred = bbox_pred.permute(0, 2, 3,
1).reshape(bbox_pred.shape[0], -1, 4)
points = points.unsqueeze(0)
points = points.expand_as(bbox_pred[:, :, :2])
nms_pre = cfg.get('nms_pre', -1)
if nms_pre > 0:
# concate zero to enable topk,
# dirty way, will find a better way in future
scores = mm2trt_util.pad_with_value(scores, 1, nms_pre, 0.)
bbox_pred = mm2trt_util.pad_with_value(bbox_pred, 1, nms_pre)
points = mm2trt_util.pad_with_value(points, 1, nms_pre)
# do topk
max_scores, _ = scores.max(dim=2)
_, topk_inds = max_scores.topk(nms_pre, dim=1)
points = mm2trt_util.gather_topk(points, 1, topk_inds)
bbox_pred = mm2trt_util.gather_topk(bbox_pred, 1, topk_inds)
scores = mm2trt_util.gather_topk(scores, 1, topk_inds)
bboxes = batched_distance2bbox(points, bbox_pred, x.shape[2:])
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
mlvl_bboxes = torch.cat(mlvl_bboxes, dim=1)
mlvl_scores = torch.cat(mlvl_scores, dim=1)
mlvl_proposals = mlvl_bboxes.unsqueeze(2)
max_scores, _ = mlvl_scores.max(dim=2)
topk_pre = max(1000, nms_pre)
_, topk_inds = max_scores.topk(min(topk_pre, mlvl_scores.shape[1]),
dim=1)
mlvl_proposals = mm2trt_util.gather_topk(mlvl_proposals, 1, topk_inds)
mlvl_scores = mm2trt_util.gather_topk(mlvl_scores, 1, topk_inds)
num_bboxes = mlvl_proposals.shape[1]
num_detected, proposals, scores, cls_id = self.rcnn_nms(
mlvl_scores, mlvl_proposals, num_bboxes, self.test_cfg.max_per_img)
return num_detected, proposals, scores, cls_id
|
#!/usr/bin/env python
# Copyright (c) 2015-2017 The Bitcoin Money developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Perform basic ELF security checks on a series of executables.
Exit status will be 0 if successful, and the program will be silent.
Otherwise the exit status will be 1 and it will log which executables failed which checks.
Needs `readelf` (for ELF) and `objdump` (for PE).
'''
from __future__ import division,print_function,unicode_literals
import subprocess
import sys
import os
READELF_CMD = os.getenv('READELF', '/usr/bin/readelf')
OBJDUMP_CMD = os.getenv('OBJDUMP', '/usr/bin/objdump')
NONFATAL = {'HIGH_ENTROPY_VA'} # checks which are non-fatal for now but only generate a warning
def check_ELF_PIE(executable):
'''
Check for position independent executable (PIE), allowing for address space randomization.
'''
p = subprocess.Popen([READELF_CMD, '-h', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
ok = False
for line in stdout.split(b'\n'):
line = line.split()
if len(line)>=2 and line[0] == b'Type:' and line[1] == b'DYN':
ok = True
return ok
def get_ELF_program_headers(executable):
'''Return type and flags for ELF program headers'''
p = subprocess.Popen([READELF_CMD, '-l', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
in_headers = False
count = 0
headers = []
for line in stdout.split(b'\n'):
if line.startswith(b'Program Headers:'):
in_headers = True
if line == b'':
in_headers = False
if in_headers:
if count == 1: # header line
ofs_typ = line.find(b'Type')
ofs_offset = line.find(b'Offset')
ofs_flags = line.find(b'Flg')
ofs_align = line.find(b'Align')
if ofs_typ == -1 or ofs_offset == -1 or ofs_flags == -1 or ofs_align == -1:
raise ValueError('Cannot parse elfread -lW output')
elif count > 1:
typ = line[ofs_typ:ofs_offset].rstrip()
flags = line[ofs_flags:ofs_align].rstrip()
headers.append((typ, flags))
count += 1
return headers
def check_ELF_NX(executable):
'''
Check that no sections are writable and executable (including the stack)
'''
have_wx = False
have_gnu_stack = False
for (typ, flags) in get_ELF_program_headers(executable):
if typ == b'GNU_STACK':
have_gnu_stack = True
if b'W' in flags and b'E' in flags: # section is both writable and executable
have_wx = True
return have_gnu_stack and not have_wx
def check_ELF_RELRO(executable):
'''
Check for read-only relocations.
GNU_RELRO program header must exist
Dynamic section must have BIND_NOW flag
'''
have_gnu_relro = False
for (typ, flags) in get_ELF_program_headers(executable):
# Note: not checking flags == 'R': here as linkers set the permission differently
# This does not affect security: the permission flags of the GNU_RELRO program header are ignored, the PT_LOAD header determines the effective permissions.
# However, the dynamic linker need to write to this area so these are RW.
# Glibc itself takes care of mprotecting this area R after relocations are finished.
# See also http://permalink.gmane.org/gmane.comp.gnu.binutils/71347
if typ == b'GNU_RELRO':
have_gnu_relro = True
have_bindnow = False
p = subprocess.Popen([READELF_CMD, '-d', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
for line in stdout.split(b'\n'):
tokens = line.split()
if len(tokens)>1 and tokens[1] == b'(BIND_NOW)' or (len(tokens)>2 and tokens[1] == b'(FLAGS)' and b'BIND_NOW' in tokens[2]):
have_bindnow = True
return have_gnu_relro and have_bindnow
def check_ELF_Canary(executable):
'''
Check for use of stack canary
'''
p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
ok = False
for line in stdout.split(b'\n'):
if b'__stack_chk_fail' in line:
ok = True
return ok
def get_PE_dll_characteristics(executable):
'''
Get PE DllCharacteristics bits.
Returns a tuple (arch,bits) where arch is 'i386:x86-64' or 'i386'
and bits is the DllCharacteristics value.
'''
p = subprocess.Popen([OBJDUMP_CMD, '-x', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
arch = ''
bits = 0
for line in stdout.split('\n'):
tokens = line.split()
if len(tokens)>=2 and tokens[0] == 'architecture:':
arch = tokens[1].rstrip(',')
if len(tokens)>=2 and tokens[0] == 'DllCharacteristics':
bits = int(tokens[1],16)
return (arch,bits)
IMAGE_DLL_CHARACTERISTICS_HIGH_ENTROPY_VA = 0x0020
IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE = 0x0040
IMAGE_DLL_CHARACTERISTICS_NX_COMPAT = 0x0100
def check_PE_DYNAMIC_BASE(executable):
'''PIE: DllCharacteristics bit 0x40 signifies dynamicbase (ASLR)'''
(arch,bits) = get_PE_dll_characteristics(executable)
reqbits = IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE
return (bits & reqbits) == reqbits
# On 64 bit, must support high-entropy 64-bit address space layout randomization in addition to DYNAMIC_BASE
# to have secure ASLR.
def check_PE_HIGH_ENTROPY_VA(executable):
'''PIE: DllCharacteristics bit 0x20 signifies high-entropy ASLR'''
(arch,bits) = get_PE_dll_characteristics(executable)
if arch == 'i386:x86-64':
reqbits = IMAGE_DLL_CHARACTERISTICS_HIGH_ENTROPY_VA
else: # Unnecessary on 32-bit
assert(arch == 'i386')
reqbits = 0
return (bits & reqbits) == reqbits
def check_PE_NX(executable):
'''NX: DllCharacteristics bit 0x100 signifies nxcompat (DEP)'''
(arch,bits) = get_PE_dll_characteristics(executable)
return (bits & IMAGE_DLL_CHARACTERISTICS_NX_COMPAT) == IMAGE_DLL_CHARACTERISTICS_NX_COMPAT
CHECKS = {
'ELF': [
('PIE', check_ELF_PIE),
('NX', check_ELF_NX),
('RELRO', check_ELF_RELRO),
('Canary', check_ELF_Canary)
],
'PE': [
('DYNAMIC_BASE', check_PE_DYNAMIC_BASE),
('HIGH_ENTROPY_VA', check_PE_HIGH_ENTROPY_VA),
('NX', check_PE_NX)
]
}
def identify_executable(executable):
with open(filename, 'rb') as f:
magic = f.read(4)
if magic.startswith(b'MZ'):
return 'PE'
elif magic.startswith(b'\x7fELF'):
return 'ELF'
return None
if __name__ == '__main__':
retval = 0
for filename in sys.argv[1:]:
try:
etype = identify_executable(filename)
if etype is None:
print('%s: unknown format' % filename)
retval = 1
continue
failed = []
warning = []
for (name, func) in CHECKS[etype]:
if not func(filename):
if name in NONFATAL:
warning.append(name)
else:
failed.append(name)
if failed:
print('%s: failed %s' % (filename, ' '.join(failed)))
retval = 1
if warning:
print('%s: warning %s' % (filename, ' '.join(warning)))
except IOError:
print('%s: cannot open' % filename)
retval = 1
sys.exit(retval)
|
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\sims\sim_info_utils.py
# Compiled at: 2017-04-18 21:11:56
# Size of source mod 2**32: 1876 bytes
import functools, inspect, services
def sim_info_auto_finder(fn):
is_generator = inspect.isgeneratorfunction(fn)
if is_generator:
@functools.wraps(fn)
def wrapped(*args, **kwargs):
sim_info_manager = services.sim_info_manager()
for sim_id in fn(*args, **kwargs):
sim_info = sim_info_manager.get(sim_id)
if sim_info is not None:
yield sim_info
else:
@functools.wraps(fn)
def wrapped(*args, **kwargs):
sim_ids = fn(*args, **kwargs)
if sim_ids is None:
return
sim_info_manager = services.sim_info_manager()
sim_infos = []
for sim_id in sim_ids:
sim_info = sim_info_manager.get(sim_id)
if sim_info is not None:
sim_infos.append(sim_info)
return tuple(sim_infos)
return wrapped
def apply_super_affordance_commodity_flags(sim, key, super_affordances):
if sim is not None:
if super_affordances:
flags = set()
for affordance in super_affordances:
flags |= affordance.commodity_flags
if flags:
sim.add_dynamic_commodity_flags(key, flags)
def remove_super_affordance_commodity_flags(sim, key):
if sim is not None:
sim.remove_dynamic_commodity_flags(key)
|
from rpython.jit.backend.llsupport.test.ztranslation_test import TranslationTestJITStats
from rpython.translator.translator import TranslationContext
from rpython.config.translationoption import DEFL_GC
from rpython.jit.backend.x86.arch import WORD
import sys
class TestTranslationJITStatsX86(TranslationTestJITStats):
def _check_cbuilder(self, cbuilder):
#We assume here that we have sse2. If not, the CPUClass
# needs to be changed to CPU386_NO_SSE2, but well.
if WORD == 4 and sys.platform != 'win32':
assert '-msse2' in cbuilder.eci.compile_extra
assert '-mfpmath=sse' in cbuilder.eci.compile_extra
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-04-19 16:03
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('climatemodels', '0014_auto_20160418_1100'),
]
operations = [
migrations.CreateModel(
name='AgroEconomicModelling',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('impact_model', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='climatemodels.ImpactModel')),
],
options={
'verbose_name_plural': 'Agro-Economic Modelling',
'verbose_name': 'Agro-Economic Modelling',
},
),
migrations.CreateModel(
name='ComputableGeneralEquilibriumModelling',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('impact_model', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='climatemodels.ImpactModel')),
],
options={
'abstract': False,
},
),
migrations.RenameField(
model_name='inputdata',
old_name='data_set',
new_name='name',
),
migrations.AddField(
model_name='inputdata',
name='caveats',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='inputdata',
name='scenario',
field=models.CharField(blank=True, max_length=500, null=True),
),
migrations.AddField(
model_name='inputdata',
name='variables',
field=models.ManyToManyField(blank=True, to='climatemodels.ClimateVariable'),
),
]
|
# -*- coding: UTF-8 -*-
# NVDAObjects/behaviors.py
# A part of NonVisual Desktop Access (NVDA)
# This file is covered by the GNU General Public License.
# See the file COPYING for more details.
# Copyright (C) 2006-2019 NV Access Limited, Peter Vágner, Joseph Lee, Bill Dengler
"""Mix-in classes which provide common behaviour for particular types of controls across different APIs.
Behaviors described in this mix-in include providing table navigation commands for certain table rows, terminal input and output support, announcing notifications and suggestion items and so on.
"""
import os
import time
import threading
import difflib
import tones
import queueHandler
import eventHandler
import controlTypes
import speech
import characterProcessing
import config
from . import NVDAObject, NVDAObjectTextInfo
import textInfos
import editableText
from logHandler import log
from scriptHandler import script
import api
import ui
import braille
import nvwave
class ProgressBar(NVDAObject):
progressValueCache={} #key is made of "speech" or "beep" and an x,y coordinate, value is the last percentage
def event_valueChange(self):
pbConf=config.conf["presentation"]["progressBarUpdates"]
states=self.states
if pbConf["progressBarOutputMode"]=="off" or controlTypes.STATE_INVISIBLE in states or controlTypes.STATE_OFFSCREEN in states:
return super(ProgressBar,self).event_valueChange()
val=self.value
try:
percentage = min(max(0.0, float(val.strip("%\0"))), 100.0)
except (AttributeError, ValueError):
log.debugWarning("Invalid value: %r" % val)
return super(ProgressBar, self).event_valueChange()
braille.handler.handleUpdate(self)
if not pbConf["reportBackgroundProgressBars"] and not self.isInForeground:
return
try:
left,top,width,height=self.location
except:
left=top=width=height=0
x = left + (width // 2)
y = top+ (height // 2)
lastBeepProgressValue=self.progressValueCache.get("beep,%d,%d"%(x,y),None)
if pbConf["progressBarOutputMode"] in ("beep","both") and (lastBeepProgressValue is None or abs(percentage-lastBeepProgressValue)>=pbConf["beepPercentageInterval"]):
tones.beep(pbConf["beepMinHZ"]*2**(percentage/25.0),40)
self.progressValueCache["beep,%d,%d"%(x,y)]=percentage
lastSpeechProgressValue=self.progressValueCache.get("speech,%d,%d"%(x,y),None)
if pbConf["progressBarOutputMode"] in ("speak","both") and (lastSpeechProgressValue is None or abs(percentage-lastSpeechProgressValue)>=pbConf["speechPercentageInterval"]):
queueHandler.queueFunction(queueHandler.eventQueue,speech.speakMessage,_("%d percent")%percentage)
self.progressValueCache["speech,%d,%d"%(x,y)]=percentage
class Dialog(NVDAObject):
"""Overrides the description property to obtain dialog text.
"""
@classmethod
def getDialogText(cls,obj,allowFocusedDescendants=True):
"""This classmethod walks through the children of the given object, and collects up and returns any text that seems to be part of a dialog's message text.
@param obj: the object who's children you want to collect the text from
@type obj: L{IAccessible}
@param allowFocusedDescendants: if false no text will be returned at all if one of the descendants is focused.
@type allowFocusedDescendants: boolean
"""
children=obj.children
textList=[]
childCount=len(children)
for index in range(childCount):
child=children[index]
childStates=child.states
childRole=child.role
#We don't want to handle invisible or unavailable objects
if controlTypes.STATE_INVISIBLE in childStates or controlTypes.STATE_UNAVAILABLE in childStates:
continue
#For particular objects, we want to descend in to them and get their children's message text
if childRole in (
controlTypes.ROLE_PROPERTYPAGE,
controlTypes.ROLE_PANE,
controlTypes.ROLE_PANEL,
controlTypes.ROLE_WINDOW,
controlTypes.ROLE_GROUPING,
controlTypes.ROLE_PARAGRAPH,
controlTypes.ROLE_SECTION,
controlTypes.ROLE_TEXTFRAME,
controlTypes.ROLE_UNKNOWN
):
#Grab text from descendants, but not for a child which inherits from Dialog and has focusable descendants
#Stops double reporting when focus is in a property page in a dialog
childText=cls.getDialogText(child,not isinstance(child,Dialog))
if childText:
textList.append(childText)
elif childText is None:
return None
continue
#If the child is focused we should just stop and return None
if not allowFocusedDescendants and controlTypes.STATE_FOCUSED in child.states:
return None
# We only want text from certain controls.
if not (
# Static text, labels and links
childRole in (controlTypes.ROLE_STATICTEXT,controlTypes.ROLE_LABEL,controlTypes.ROLE_LINK)
# Read-only, non-multiline edit fields
or (childRole==controlTypes.ROLE_EDITABLETEXT and controlTypes.STATE_READONLY in childStates and controlTypes.STATE_MULTILINE not in childStates)
):
continue
#We should ignore a text object directly after a grouping object, as it's probably the grouping's description
if index>0 and children[index-1].role==controlTypes.ROLE_GROUPING:
continue
#Like the last one, but a graphic might be before the grouping's description
if index>1 and children[index-1].role==controlTypes.ROLE_GRAPHIC and children[index-2].role==controlTypes.ROLE_GROUPING:
continue
childName=child.name
if childName and index<(childCount-1) and children[index+1].role not in (controlTypes.ROLE_GRAPHIC,controlTypes.ROLE_STATICTEXT,controlTypes.ROLE_SEPARATOR,controlTypes.ROLE_WINDOW,controlTypes.ROLE_PANE,controlTypes.ROLE_BUTTON) and children[index+1].name==childName:
# This is almost certainly the label for the next object, so skip it.
continue
isNameIncluded=child.TextInfo is NVDAObjectTextInfo or childRole in (controlTypes.ROLE_LABEL,controlTypes.ROLE_STATICTEXT)
childText=child.makeTextInfo(textInfos.POSITION_ALL).text
if not childText or childText.isspace() and child.TextInfo is not NVDAObjectTextInfo:
childText=child.basicText
isNameIncluded=True
if not isNameIncluded:
# The label isn't in the text, so explicitly include it first.
if childName:
textList.append(childName)
if childText:
textList.append(childText)
return "\n".join(textList)
def _get_description(self):
superDesc = super(Dialog, self).description
if superDesc and not superDesc.isspace():
# The object already provides a useful description, so don't override it.
return superDesc
return self.getDialogText(self)
value = None
def _get_isPresentableFocusAncestor(self):
# Only fetch this the first time it is requested,
# as it is very slow due to getDialogText and the answer shouldn't change anyway.
self.isPresentableFocusAncestor = res = super(Dialog, self).isPresentableFocusAncestor
return res
class EditableText(editableText.EditableText, NVDAObject):
"""Provides scripts to report appropriately when moving the caret in editable text fields.
This does not handle selection changes.
To handle selection changes, use either L{EditableTextWithAutoSelectDetection} or L{EditableTextWithoutAutoSelectDetection}.
"""
shouldFireCaretMovementFailedEvents = True
def initOverlayClass(self):
# #4264: the caret_newLine script can only be bound for processes other than NVDA's process
# As Pressing enter on an edit field can cause modal dialogs to appear, yet gesture.send and api.processPendingEvents may call.wx.yield which ends in a freeze.
if self.announceNewLineText and self.processID!=os.getpid():
self.bindGesture("kb:enter","caret_newLine")
self.bindGesture("kb:numpadEnter","caret_newLine")
def _caretScriptPostMovedHelper(self, speakUnit, gesture, info=None):
if eventHandler.isPendingEvents("gainFocus"):
return
super()._caretScriptPostMovedHelper(speakUnit, gesture, info)
class EditableTextWithAutoSelectDetection(EditableText):
"""In addition to L{EditableText}, handles reporting of selection changes for objects which notify of them.
To have selection changes reported, the object must notify of selection changes via the caret event.
Optionally, it may notify of changes to content via the textChange, textInsert and textRemove events.
If the object supports selection but does not notify of selection changes, L{EditableTextWithoutAutoSelectDetection} should be used instead.
"""
def event_gainFocus(self):
super(EditableText, self).event_gainFocus()
self.initAutoSelectDetection()
def event_caret(self):
super(EditableText, self).event_caret()
if self is api.getFocusObject() and not eventHandler.isPendingEvents('gainFocus'):
self.detectPossibleSelectionChange()
def event_textChange(self):
self.hasContentChangedSinceLastSelection = True
def event_textInsert(self):
self.hasContentChangedSinceLastSelection = True
def event_textRemove(self):
self.hasContentChangedSinceLastSelection = True
class EditableTextWithoutAutoSelectDetection(editableText.EditableTextWithoutAutoSelectDetection, EditableText):
"""In addition to L{EditableText}, provides scripts to report appropriately when the selection changes.
This should be used when an object does not notify of selection changes.
"""
initOverlayClass = editableText.EditableTextWithoutAutoSelectDetection.initClass
class LiveText(NVDAObject):
"""An object for which new text should be reported automatically.
These objects present text as a single chunk
and only fire an event indicating that some part of the text has changed; i.e. they don't provide the new text.
Monitoring must be explicitly started and stopped using the L{startMonitoring} and L{stopMonitoring} methods.
The object should notify of text changes using the textChange event.
"""
#: The time to wait before fetching text after a change event.
STABILIZE_DELAY = 0
# If the text is live, this is definitely content.
presentationType = NVDAObject.presType_content
announceNewLineText=False
def initOverlayClass(self):
self._event = threading.Event()
self._monitorThread = None
self._keepMonitoring = False
def startMonitoring(self):
"""Start monitoring for new text.
New text will be reported when it is detected.
@note: If monitoring has already been started, this will have no effect.
@see: L{stopMonitoring}
"""
if self._monitorThread:
return
thread = self._monitorThread = threading.Thread(
name=f"{self.__class__.__qualname__}._monitorThread",
target=self._monitor
)
thread.daemon = True
self._keepMonitoring = True
self._event.clear()
thread.start()
def stopMonitoring(self):
"""Stop monitoring previously started with L{startMonitoring}.
@note: If monitoring has not been started, this will have no effect.
@see: L{startMonitoring}
"""
if not self._monitorThread:
return
self._keepMonitoring = False
self._event.set()
self._monitorThread = None
def event_textChange(self):
"""Fired when the text changes.
@note: It is safe to call this directly from threads other than the main thread.
"""
self._event.set()
def _getTextLines(self):
"""Retrieve the text of this object in lines.
This will be used to determine the new text to speak.
The base implementation uses the L{TextInfo}.
However, subclasses should override this if there is a better way to retrieve the text.
@return: The current lines of text.
@rtype: list of str
"""
return list(self.makeTextInfo(textInfos.POSITION_ALL).getTextInChunks(textInfos.UNIT_LINE))
def _reportNewLines(self, lines):
"""
Reports new lines of text using _reportNewText for each new line.
Subclasses may override this method to provide custom filtering of new text,
where logic depends on multiple lines.
"""
for line in lines:
self._reportNewText(line)
def _reportNewText(self, line):
"""Report a line of new text.
"""
speech.speakText(line)
def _monitor(self):
try:
oldLines = self._getTextLines()
except:
log.exception("Error getting initial lines")
oldLines = []
while self._keepMonitoring:
self._event.wait()
if not self._keepMonitoring:
break
if self.STABILIZE_DELAY > 0:
# wait for the text to stabilise.
time.sleep(self.STABILIZE_DELAY)
if not self._keepMonitoring:
# Monitoring was stopped while waiting for the text to stabilise.
break
self._event.clear()
try:
newLines = self._getTextLines()
if config.conf["presentation"]["reportDynamicContentChanges"]:
outLines = self._calculateNewText(newLines, oldLines)
if len(outLines) == 1 and len(outLines[0].strip()) == 1:
# This is only a single character,
# which probably means it is just a typed character,
# so ignore it.
del outLines[0]
if outLines:
queueHandler.queueFunction(queueHandler.eventQueue, self._reportNewLines, outLines)
oldLines = newLines
except:
log.exception("Error getting lines or calculating new text")
def _calculateNewText(self, newLines, oldLines):
outLines = []
prevLine = None
for line in difflib.ndiff(oldLines, newLines):
if line[0] == "?":
# We're never interested in these.
continue
if line[0] != "+":
# We're only interested in new lines.
prevLine = line
continue
text = line[2:]
if not text or text.isspace():
prevLine = line
continue
if prevLine and prevLine[0] == "-" and len(prevLine) > 2:
# It's possible that only a few characters have changed in this line.
# If so, we want to speak just the changed section, rather than the entire line.
prevText = prevLine[2:]
textLen = len(text)
prevTextLen = len(prevText)
# Find the first character that differs between the two lines.
for pos in range(min(textLen, prevTextLen)):
if text[pos] != prevText[pos]:
start = pos
break
else:
# We haven't found a differing character so far and we've hit the end of one of the lines.
# This means that the differing text starts here.
start = pos + 1
# Find the end of the differing text.
if textLen != prevTextLen:
# The lines are different lengths, so assume the rest of the line changed.
end = textLen
else:
for pos in range(textLen - 1, start - 1, -1):
if text[pos] != prevText[pos]:
end = pos + 1
break
if end - start < 15:
# Less than 15 characters have changed, so only speak the changed chunk.
text = text[start:end]
if text and not text.isspace():
outLines.append(text)
prevLine = line
return outLines
class Terminal(LiveText, EditableText):
"""An object which both accepts text input and outputs text which should be reported automatically.
This is an L{EditableText} object,
as well as a L{liveText} object for which monitoring is automatically enabled and disabled based on whether it has focus.
"""
role = controlTypes.ROLE_TERMINAL
def event_gainFocus(self):
super(Terminal, self).event_gainFocus()
self.startMonitoring()
def event_loseFocus(self):
super(Terminal, self).event_loseFocus()
self.stopMonitoring()
def _get_caretMovementDetectionUsesEvents(self):
"""Using caret events in consoles sometimes causes the last character of the
prompt to be read when quickly deleting text."""
return False
class KeyboardHandlerBasedTypedCharSupport(Terminal):
"""A Terminal object that also provides typed character support for
console applications via keyboardHandler events.
These events are queued from NVDA's global keyboard hook.
Therefore, an event is fired for every single character that is being typed,
even when a character is not written to the console (e.g. in read only console applications).
This approach is an alternative to monitoring the console output for
characters close to the caret, or injecting in-process with NVDAHelper.
This class relies on the toUnicodeEx Windows function, and in particular
the flag to preserve keyboard state available in Windows 10 1607
and later."""
#: Whether this object quickly and reliably sends textChange events
#: when its contents update.
#: Timely and reliable textChange events are required
#: to support password suppression.
_supportsTextChange = True
#: A queue of typed characters, to be dispatched on C{textChange}.
#: This queue allows NVDA to suppress typed passwords when needed.
_queuedChars = []
#: Whether the last typed character is a tab.
#: If so, we should temporarily disable filtering as completions may
#: be short.
_hasTab = False
def _reportNewLines(self, lines):
# Perform typed character filtering, as typed characters are handled with events.
if (
len(lines) == 1
and not self._hasTab
and len(lines[0].strip()) < max(len(speech.curWordChars) + 1, 3)
):
return
super()._reportNewLines(lines)
def event_typedCharacter(self, ch):
if ch == '\t':
self._hasTab = True
# Clear the typed word buffer for tab completion.
speech.clearTypedWordBuffer()
else:
self._hasTab = False
if (
(
config.conf['keyboard']['speakTypedCharacters']
or config.conf['keyboard']['speakTypedWords']
)
and not config.conf['terminals']['speakPasswords']
and self._supportsTextChange
):
self._queuedChars.append(ch)
else:
super().event_typedCharacter(ch)
def event_textChange(self):
self._dispatchQueue()
super().event_textChange()
@script(gestures=[
"kb:enter",
"kb:numpadEnter",
"kb:tab",
"kb:control+c",
"kb:control+d",
"kb:control+pause"
])
def script_flush_queuedChars(self, gesture):
"""
Flushes the typed word buffer and queue of typedCharacter events if present.
Since these gestures clear the current word/line, we should flush the
queue to avoid erroneously reporting these chars.
"""
self._queuedChars = []
speech.clearTypedWordBuffer()
gesture.send()
def _calculateNewText(self, newLines, oldLines):
hasNewLines = (
self._findNonBlankIndices(newLines)
!= self._findNonBlankIndices(oldLines)
)
if hasNewLines:
# Clear the typed word buffer for new text lines.
speech.clearTypedWordBuffer()
self._queuedChars = []
return super()._calculateNewText(newLines, oldLines)
def _dispatchQueue(self):
"""Sends queued typedCharacter events through to NVDA."""
while self._queuedChars:
ch = self._queuedChars.pop(0)
super().event_typedCharacter(ch)
def _findNonBlankIndices(self, lines):
"""
Given a list of strings, returns a list of indices where the strings
are not empty.
"""
return [index for index, line in enumerate(lines) if line]
class CandidateItem(NVDAObject):
def getFormattedCandidateName(self,number,candidate):
if config.conf["inputComposition"]["alwaysIncludeShortCharacterDescriptionInCandidateName"]:
describedSymbols=[]
for symbol in candidate:
try:
symbolDescriptions=characterProcessing.getCharacterDescription(speech.getCurrentLanguage(),symbol) or []
except TypeError:
symbolDescriptions=[]
if len(symbolDescriptions)>=1:
description=symbolDescriptions[0]
if description.startswith('(') and description.endswith(')'):
describedSymbols.append(description[1:-1])
else:
# Translators: a message announcing a candidate's character and description.
describedSymbols.append(_(u"{symbol} as in {description}").format(symbol=symbol,description=description))
else:
describedSymbols.append(symbol)
candidate=u", ".join(describedSymbols)
# Translators: a formatted message announcing a candidate's number and candidate text.
return _(u"{number} {candidate}").format(number=number,candidate=candidate)
def getFormattedCandidateDescription(self,candidate):
descriptions=[]
numSymbols=len(candidate) if candidate else 0
if numSymbols!=1: return u""
symbol=candidate[0]
try:
symbolDescriptions=characterProcessing.getCharacterDescription(speech.getCurrentLanguage(),symbol) or []
except TypeError:
symbolDescriptions=[]
if config.conf["inputComposition"]["alwaysIncludeShortCharacterDescriptionInCandidateName"]:
symbolDescriptions=symbolDescriptions[1:]
if len(symbolDescriptions)<1: return u""
return u", ".join(symbolDescriptions)
def reportFocus(self):
if not config.conf["inputComposition"]["announceSelectedCandidate"]: return
text=self.name
desc=self.description
if desc:
text+=u", "+desc
speech.speakText(text)
def _get_visibleCandidateItemsText(self):
obj=self
textList=[]
while isinstance(obj,CandidateItem) and isinstance(obj.candidateNumber,int) and controlTypes.STATE_INVISIBLE not in obj.states:
textList.append(obj.name)
obj=obj.previous
textList.reverse()
obj=self.next
while isinstance(obj,CandidateItem) and isinstance(obj.candidateNumber,int) and controlTypes.STATE_INVISIBLE not in obj.states:
textList.append(obj.name)
obj=obj.next
if len(textList)<=1: return None
self.visibleCandidateItemsText=(u", ".join(textList))+u", "
return self.visibleCandidateItemsText
class RowWithFakeNavigation(NVDAObject):
"""Provides table navigation commands for a row which doesn't support them natively.
The cells must be exposed as children and they must support the table cell properties.
"""
_savedColumnNumber = None
def _moveToColumn(self, obj):
if not obj:
ui.message(_("Edge of table"))
return
if obj is not self:
# Use the focused copy of the row as the parent for all cells to make comparison faster.
obj.parent = self
api.setNavigatorObject(obj)
speech.speakObject(obj, reason=controlTypes.REASON_FOCUS)
def _moveToColumnNumber(self, column):
child = column - 1
if child >= self.childCount:
return
cell = self.getChild(child)
self._moveToColumn(cell)
def script_moveToNextColumn(self, gesture):
cur = api.getNavigatorObject()
if cur == self:
new = self.firstChild
elif cur.parent != self:
self._moveToColumn(self)
return
else:
new = cur.next
while new and new.hasIrrelevantLocation:
new = new.next
self._moveToColumn(new)
script_moveToNextColumn.canPropagate = True
# Translators: The description of an NVDA command.
script_moveToNextColumn.__doc__ = _("Moves the navigator object to the next column")
def script_moveToPreviousColumn(self, gesture):
cur = api.getNavigatorObject()
if cur == self:
new = None
elif cur.parent != self or not cur.previous:
new = self
else:
new = cur.previous
while new and new.hasIrrelevantLocation:
new = new.previous
self._moveToColumn(new)
script_moveToPreviousColumn.canPropagate = True
# Translators: The description of an NVDA command.
script_moveToPreviousColumn.__doc__ = _("Moves the navigator object to the previous column")
def reportFocus(self):
col = self._savedColumnNumber
if not col:
return super(RowWithFakeNavigation, self).reportFocus()
self.__class__._savedColumnNumber = None
self._moveToColumnNumber(col)
def _moveToRow(self, row):
if not row:
return self._moveToColumn(None)
nav = api.getNavigatorObject()
if nav != self and nav.parent == self:
self.__class__._savedColumnNumber = nav.columnNumber
row.setFocus()
def script_moveToNextRow(self, gesture):
self._moveToRow(self.next)
script_moveToNextRow.canPropagate = True
# Translators: The description of an NVDA command.
script_moveToNextRow.__doc__ = _("Moves the navigator object and focus to the next row")
def script_moveToPreviousRow(self, gesture):
self._moveToRow(self.previous)
script_moveToPreviousRow.canPropagate = True
# Translators: The description of an NVDA command.
script_moveToPreviousRow.__doc__ = _("Moves the navigator object and focus to the previous row")
__gestures = {
"kb:control+alt+rightArrow": "moveToNextColumn",
"kb:control+alt+leftArrow": "moveToPreviousColumn",
"kb:control+alt+downArrow": "moveToNextRow",
"kb:control+alt+upArrow": "moveToPreviousRow",
}
class RowWithoutCellObjects(NVDAObject):
"""An abstract class which creates cell objects for table rows which don't natively expose them.
Subclasses must override L{_getColumnContent} and can optionally override L{_getColumnHeader}
to retrieve information about individual columns.
The parent (table) must support the L{columnCount} property.
"""
def _get_childCount(self):
return self.parent.columnCount
def _getColumnLocation(self,column):
"""Get the screen location for the given column.
Subclasses may optionally override this method.
@param column: The index of the column, starting at 1.
@type column: int
@rtype: tuple
"""
raise NotImplementedError
def _getColumnContent(self, column):
"""Get the text content for a given column of this row.
Subclasses must override this method.
@param column: The index of the column, starting at 1.
@type column: int
@rtype: str
"""
raise NotImplementedError
def _getColumnHeader(self, column):
"""Get the header text for this column.
@param column: The index of the column, starting at 1.
@type column: int
@rtype: str
"""
raise NotImplementedError
def _makeCell(self, column):
if column == 0 or column > self.childCount:
return None
return _FakeTableCell(parent=self, column=column)
def _get_firstChild(self):
return self._makeCell(1)
def _get_children(self):
return [self._makeCell(column) for column in range(1, self.childCount + 1)]
def getChild(self, index):
return self._makeCell(index + 1)
class _FakeTableCell(NVDAObject):
role = controlTypes.ROLE_TABLECELL
def __init__(self, parent=None, column=None):
super(_FakeTableCell, self).__init__()
self.parent = parent
self.columnNumber = column
try:
self.rowNumber = self.parent.positionInfo["indexInGroup"]
except KeyError:
pass
self.processID = parent.processID
try:
# HACK: Some NVDA code depends on window properties, even for non-Window objects.
self.windowHandle = parent.windowHandle
self.windowClassName = parent.windowClassName
self.windowControlID = parent.windowControlID
except AttributeError:
pass
def _get_next(self):
return self.parent._makeCell(self.columnNumber + 1)
def _get_previous(self):
return self.parent._makeCell(self.columnNumber - 1)
firstChild = None
def _get_location(self):
try:
return self.parent._getColumnLocation(self.columnNumber)
except NotImplementedError:
return None
def _get_name(self):
return self.parent._getColumnContent(self.columnNumber)
def _get_columnHeaderText(self):
return self.parent._getColumnHeader(self.columnNumber)
def _get_tableID(self):
return id(self.parent.parent)
def _get_states(self):
states = self.parent.states.copy()
if not self.location or self.location.width == 0:
states.add(controlTypes.STATE_INVISIBLE)
return states
class FocusableUnfocusableContainer(NVDAObject):
"""Makes an unfocusable container focusable using its first focusable descendant.
One instance where this is useful is ARIA applications on the web where the author hasn't set a tabIndex.
"""
isFocusable = True
def setFocus(self):
for obj in self.recursiveDescendants:
if obj.isFocusable:
obj.setFocus()
break
class ToolTip(NVDAObject):
"""Provides information about an item over which the user is hovering a cursor.
The object should fire a show event when it appears.
"""
role = controlTypes.ROLE_TOOLTIP
def event_show(self):
if not config.conf["presentation"]["reportTooltips"]:
return
speech.speakObject(self, reason=controlTypes.REASON_FOCUS)
# Ideally, we wouldn't use getPropertiesBraille directly.
braille.handler.message(braille.getPropertiesBraille(name=self.name, role=self.role))
class Notification(NVDAObject):
"""Informs the user of non-critical information that does not require immediate action.
This is primarily for notifications displayed in the system notification area, and for Windows 8 and later, toasts.
The object should fire a alert or show event when the user should be notified.
"""
def event_alert(self):
if not config.conf["presentation"]["reportHelpBalloons"]:
return
speech.speakObject(self, reason=controlTypes.REASON_FOCUS)
# Ideally, we wouldn't use getPropertiesBraille directly.
braille.handler.message(braille.getPropertiesBraille(name=self.name, role=self.role))
event_show = event_alert
class EditableTextWithSuggestions(NVDAObject):
"""Allows NvDA to announce appearance/disappearance of suggestions as text is entered.
This is used in various places, including Windows 10 search edit fields and others.
Subclasses should provide L{event_suggestionsOpened} and can optionally override L{event_suggestionsClosed}.
These events are fired when suggestions appear and disappear, respectively.
"""
def event_suggestionsOpened(self):
"""Called when suggestions appear when text is entered e.g. search suggestions.
Subclasses should provide custom implementations if possible.
By default NVDA will announce appearance of suggestions using speech, braille or a sound will be played.
"""
# Translators: Announced in braille when suggestions appear when search term is entered in various search fields such as Start search box in Windows 10.
braille.handler.message(_("Suggestions"))
if config.conf["presentation"]["reportAutoSuggestionsWithSound"]:
nvwave.playWaveFile(r"waves\suggestionsOpened.wav")
def event_suggestionsClosed(self):
"""Called when suggestions list or container is closed.
Subclasses should provide custom implementations if possible.
By default NVDA will announce this via speech, braille or via a sound.
"""
if config.conf["presentation"]["reportAutoSuggestionsWithSound"]:
nvwave.playWaveFile(r"waves\suggestionsClosed.wav")
class WebDialog(NVDAObject):
"""
A dialog that will use a treeInterceptor if its parent currently does.
This can be used to ensure that dialogs on the web get browseMode by default, unless inside an ARIA application
"""
def _get_shouldCreateTreeInterceptor(self):
if self.parent.treeInterceptor:
return True
return False
|
# Copyright 2020 Ericsson TEI, Fabio Ubaldi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from urllib.parse import urlparse
from jsonschema import validate
from jsonschema.exceptions import ValidationError, SchemaError
from adaptation_layer import create_app
from .request_mock import mock_ns
from .response_schemas import ns_lcm_op_occ_schema, \
ns_list_schema, ns_schema, ns_lcm_op_occ_list_schema
# AUTHORIZATION unsupported by EVER driver
# scale a ns instance unsupported by EVER driver
class EverTestCase(unittest.TestCase):
client = None
@classmethod
def setUpClass(cls):
"""Define test variables and initialize app."""
cls.client = create_app().test_client
# Check status codes 200, 404, headers and payload for get_ns_list()
def test_get_ns_list_200(self):
res = self.client().get('/rano/3/ns_instances?__code=200')
print(res)
#print (ns_list_schema)
try:
validate(res.json, ns_list_schema)
except (ValidationError, SchemaError) as e:
self.fail(msg=e.message)
self.assertEqual(res.status_code, 200)
# Check status codes 200, 404, headers and payload for get_ns()
def test_get_ns_200(self):
res = self.client().get(
'/rano/3/ns_instances/49ccb6a2-5bcd-4f35-a2cf-7728c54e48b7?__code=200')
try:
validate(res.json, ns_schema)
except (ValidationError, SchemaError) as e:
self.fail(msg=e.message)
self.assertEqual(res.status_code, 200)
def test_get_ns_404(self):
res = self.client().get(
'/rano/3/ns_instances/49ccb6a2-5bcd-4f35-a2cf-7728c54e48b7?__code=404')
self.assertEqual(res.status_code, 404)
# Check status codes 201, 404, headers and payload for create_ns()
def test_create_ns_201(self):
res = self.client().post('/rano/3/ns_instances?__code=201', json=mock_ns)
self.assertEqual(res.status_code, 201)
self.assertIn('Location', res.headers)
validate_url = urlparse(res.headers["Location"])
self.assertTrue(all([validate_url.scheme, validate_url.netloc, validate_url.path]))
try:
validate(res.json, ns_schema)
except (ValidationError, SchemaError) as e:
self.fail(msg=e.message)
def test_create_ns_400(self):
res = self.client().post('/rano/3/ns_instances?__code=400', json=mock_ns)
self.assertEqual(res.status_code, 400)
# Check status codes 202, 400, 404, headers and payload for instantiate_ns()
def test_instantiate_ns_202(self):
res = self.client().post('/rano/3/ns_instances/49ccb6a2-5bcd-4f35-a2cf-7728c54e48b7/instantiate?__code=202')
self.assertEqual(res.status_code, 202)
self.assertIn('Location', res.headers)
validate_url = urlparse(res.headers["Location"])
self.assertTrue(all([validate_url.scheme, validate_url.netloc, validate_url.path]))
def test_instantiate_ns_400(self):
res = self.client().post('/rano/3/ns_instances/49ccb6a2-5bcd-4f35-a2cf-7728c54e48b7/instantiate?__code=400')
self.assertEqual(res.status_code, 400)
def test_instantiate_ns_404(self):
res = self.client().post('/rano/3/ns_instances/49ccb6a2-5bcd-4f35-a2cf-7728c54e48b7/instantiate?__code=404')
self.assertEqual(res.status_code, 404)
# Check status codes 202, 404, headers and payload for terminate_ns()
def test_terminate_ns_202(self):
res = self.client().post('/rano/3/ns_instances/49ccb6a2-5bcd-4f35-a2cf-7728c54e48b7/terminate?__code=202')
self.assertEqual(res.status_code, 202)
self.assertIn('Location', res.headers)
validate_url = urlparse(res.headers["Location"])
self.assertTrue(all([validate_url.scheme, validate_url.netloc, validate_url.path]))
def test_terminate_ns_404(self):
res = self.client().post('/rano/3/ns_instances/49ccb6a2-5bcd-4f35-a2cf-7728c54e48b7/terminate?__code=404')
self.assertEqual(res.status_code, 404)
# Check status codes 204, 404, headers and payload for delete_ns()
def test_delete_ns_204(self):
res = self.client().delete(
'/rano/3/ns_instances/49ccb6a2-5bcd-4f35-a2cf-7728c54e48b7?__code=204')
self.assertEqual(res.status_code, 204)
def test_delete_ns_404(self):
res = self.client().delete(
'/rano/3/ns_instances/49ccb6a2-5bcd-4f35-a2cf-7728c54e48b7?__code=404')
self.assertEqual(res.status_code, 404)
# Check status codes 200, 404, headers and payload for get_ns_lcm_op_occs_()
def test_get_ns_lcm_op_occs_200(self):
res = self.client().get('/rano/3/ns_lcm_op_occs/49ccb6a2-5bcd-4f35-a2cf-7728c54c48b7?__code=200')
try:
validate(res.json, ns_lcm_op_occ_schema)
except (ValidationError, SchemaError) as e:
self.fail(msg=e.message)
self.assertEqual(res.status_code, 200)
def test_get_ns_lcm_op_occs_404(self):
res = self.client().get('/rano/3/ns_lcm_op_occs/49ccb6a2-5bcd-4f35-a2cf-7728c54c48b7?__code=404')
self.assertEqual(res.status_code, 404)
# Check status codes 200, headers and payload for get_ns_lcm_op_occs_list()
def test_get_ns_lcm_op_occs_list_200(self):
res = self.client().get('/rano/3/ns_lcm_op_occs?__code=200')
try:
validate(res.json, ns_lcm_op_occ_list_schema)
except (ValidationError, SchemaError) as e:
self.fail(msg=e.message)
self.assertEqual(res.status_code, 200)
if __name__ == '__main__':
unittest.main()
|
'''
Created on 2020-07-04
@author: wf
'''
import csv
import html
import io
import json
import re
import time
from lodstorage.entity import EntityManager
from lodstorage.storageconfig import StoreMode, StorageConfig
import pyparsing as pp
class EventManager(EntityManager):
''' handle a catalog of events '''
debug=False
def __init__(self,name,url=None,title=None,config=None):
'''
Constructor
Args:
name(string): the name of this event manager e.g. "confref"
url(string): the url of the event source e.g. "http://portal.confref.org/"
title(string): title of the event source e.g. "confref.org"
'''
if config is None:
config=StorageConfig.getDefault()
config.tableName="Event_%s" % name
super().__init__(name,entityName="Event",entityPluralName="Events",config=config)
self.url=url
self.title=title
self.events={}
self.eventsByAcronym={}
self.eventsByCheckedAcronym={}
def add(self,event):
''' add the given event '''
self.events[event.eventId]=event
if hasattr(event,"lookupAcronym"):
self.eventsByAcronym[event.lookupAcronym]=event
def lookup(self,acronym):
''' lookup the given event '''
foundEvents=[]
if acronym in self.events:
foundEvent=self.events[acronym]
foundEvent.lookedUpIn="events"
foundEvents=[foundEvent]
elif acronym in self.eventsByCheckedAcronym:
foundEvents=self.eventsByCheckedAcronym[acronym]
for foundEvent in foundEvents:
foundEvent.lookedUpIn="checked acronyms"
elif acronym in self.eventsByAcronym:
foundEvent=self.eventsByAcronym[acronym]
foundEvent.lookedUpIn="acronyms"
foundEvents=[foundEvent]
return foundEvents
def extractCheckedAcronyms(self):
''' extract checked acronyms '''
self.showProgress("extracting acronyms for %s" % (self.name))
self.eventsByCheckedAcronym={}
grammar= pp.Regex(r'^(([1-9][0-9]?)th\s)?(?P<acronym>[A-Z/_-]{2,11})[ -]*(19|20)[0-9][0-9]$')
for event in self.events.values():
if hasattr(event, 'acronym') and event.acronym is not None:
try:
val=grammar.parseString(event.acronym).asDict()
if "acronym" in val:
acronym=val['acronym']
if acronym in self.eventsByCheckedAcronym:
self.eventsByCheckedAcronym[acronym].append(event)
else:
self.eventsByCheckedAcronym[acronym]=[event]
except pp.ParseException as pe:
if EventManager.debug:
print(event.acronym)
print(pe)
pass
self.showProgress ("found %d checked acronyms for %s of %d events with acronyms" % (len(self.eventsByCheckedAcronym),self.name,len(self.eventsByAcronym)))
def fromEventList(self,eventList):
'''
restore my events form the given ListOfDicts
Args:
eventList(list): the list of event Records/Dicts
'''
for eventRecord in eventList:
event=Event()
event.fromDict(eventRecord)
self.add(event)
def fromStore(self,cacheFile=None):
'''
restore me from the store
Args:
cacheFile(String): the cacheFile to use if None use the preconfigured Cachefile
'''
startTime=time.time()
listOfDicts=super().fromStore(cacheFile)
if self.config.mode is StoreMode.JSON:
em=listOfDicts
if em is not None:
if em.events is not None:
self.events=em.events
self.showProgress("read %d %s from %s in %5.1f s" % (len(em.events),self.entityPluralName,self.name,time.time()-startTime))
if em.eventsByAcronym:
self.eventsByAcronym=em.eventsByAcronym
else:
self.fromEventList(listOfDicts)
def getListOfDicts(self):
'''
get the list of Dicts for me
'''
eventList=[]
for event in self.events.values():
d=event.__dict__
eventList.append(d)
return eventList
def store(self,cacheFile=None,batchSize=2000,limit=None,sampleRecordCount=100):
'''
store my events
Args:
cacheFile(string): the cacheFile to use
batchSize(int): size of batch
limit(int): maximum number of records
sampleRecordcount(int): how many records to sample for type detection
'''
super().store(self.getListOfDicts(),cacheFile=cacheFile,batchSize=batchSize,limit=limit,sampleRecordCount=sampleRecordCount)
@staticmethod
def asWikiSon(eventDicts):
wikison=""
for eventDict in eventDicts:
wikison+=EventManager.eventDictToWikiSon(eventDict)
return wikison
@staticmethod
def asCsv(eventDicts):
''' convert the given event dicts to CSV
see https://stackoverflow.com/a/9157370/1497139'''
output=io.StringIO()
fieldNameSet=set()
for eventDict in eventDicts:
for key in eventDict.keys():
fieldNameSet.add(key)
writer=csv.DictWriter(output,fieldnames=list(fieldNameSet),quoting=csv.QUOTE_NONNUMERIC)
writer.writeheader()
for eventDict in eventDicts:
writer.writerow(eventDict)
return output.getvalue()
@staticmethod
def eventDictToWikiSon(eventDict):
wikison="{{Event\n"
for key,value in eventDict.items():
if key not in ['foundBy','source','creation_date','modification_date']:
if value is not None:
wikison+="|%s=%s\n" % (key,value)
wikison+="}}\n"
return wikison
class Event(object):
'''
an Event
'''
def __init__(self):
'''
Constructor
'''
self.foundBy=None
self.homepage=None
self.acronym=None
self.city=None
self.country=None
def hasUrl(self):
result=False
if (hasattr(self,'homepage') and self.homepage is not None):
result=True
else:
result=hasattr(self,'url')
return result
def getUrl(self):
if hasattr(self,'url'):
return self.url
else:
return self.homepage
def fromTitle(self,title,debug=False):
'''
fill my data from the given Title
Args:
title(Title): the title to get the information from
debug(boolean): True if debugging should be activated
'''
md=title.metadata()
Event.fixEncodings(md,debug)
self.fromDict(md)
self.getLookupAcronym()
def fromDict(self,srcDict,withHtmlUnescape=False):
'''
fill my data from the given source Dict
Args:
srcDict(dict): the dict to fill my data from
withHtmlUnescape(boolean): True if HTML entities should be unescaped e.g. Montréal
'''
d=self.__dict__
for key in srcDict:
targetKey=key
if key=="id":
targetKey='eventId'
value=srcDict[key]
if withHtmlUnescape and value is not None and type(value) is str:
value=html.unescape(value)
d[targetKey]=value
self.getLookupAcronym()
def getLookupAcronym(self):
''' get the lookup acronym of this event e.g. add year information '''
if hasattr(self,'acronym') and self.acronym is not None:
self.lookupAcronym=self.acronym
else:
if hasattr(self,'event'):
self.lookupAcronym=self.event
if hasattr(self,'lookupAcronym'):
if self.lookupAcronym is not None:
try:
if hasattr(self, 'year') and self.year is not None and not re.search(r'[0-9]{4}',self.lookupAcronym):
self.lookupAcronym="%s %s" % (self.lookupAcronym,str(self.year))
except TypeError as te:
print ('Warning getLookupAcronym failed for year: %s and lookupAcronym %s' % (self.year,self.lookupAcronym))
def asJson(self):
''' return me as a JSON record
https://stackoverflow.com/a/36142844/1497139 '''
return json.dumps(self.__dict__,indent=4,sort_keys=True, default=str)
def __str__(self):
''' create a string representation of this title '''
text="%s (%s)" % (self.homepage,self.foundBy)
return text
|
"""
Dimension Data Cloud Module
===========================
This is a cloud module for the Dimension Data Cloud,
using the existing Libcloud driver for Dimension Data.
.. code-block:: yaml
# Note: This example is for /etc/salt/cloud.providers
# or any file in the
# /etc/salt/cloud.providers.d/ directory.
my-dimensiondata-config:
user_id: my_username
key: myPassword!
region: dd-na
driver: dimensiondata
:maintainer: Anthony Shaw <anthonyshaw@apache.org>
:depends: libcloud >= 1.2.1
"""
import logging
import pprint
import socket
import salt.config as config
import salt.utils.cloud
from salt.cloud.libcloudfuncs import * # pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import
from salt.exceptions import (
SaltCloudExecutionFailure,
SaltCloudExecutionTimeout,
SaltCloudSystemExit,
)
from salt.utils.functools import namespaced_function
from salt.utils.versions import LooseVersion as _LooseVersion
# Import libcloud
try:
import libcloud
from libcloud.compute.base import NodeDriver, NodeState
from libcloud.compute.base import NodeAuthPassword
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.loadbalancer.base import Member
from libcloud.loadbalancer.types import Provider as Provider_lb
from libcloud.loadbalancer.providers import get_driver as get_driver_lb
# This work-around for Issue #32743 is no longer needed for libcloud >=
# 1.4.0. However, older versions of libcloud must still be supported with
# this work-around. This work-around can be removed when the required
# minimum version of libcloud is 2.0.0 (See PR #40837 - which is
# implemented in Salt 2018.3.0).
if _LooseVersion(libcloud.__version__) < _LooseVersion("1.4.0"):
# See https://github.com/saltstack/salt/issues/32743
import libcloud.security
libcloud.security.CA_CERTS_PATH.append("/etc/ssl/certs/YaST-CA.pem")
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
try:
from netaddr import all_matching_cidrs # pylint: disable=unused-import
HAS_NETADDR = True
except ImportError:
HAS_NETADDR = False
# Some of the libcloud functions need to be in the same namespace as the
# functions defined in the module, so we create new function objects inside
# this module namespace
get_size = namespaced_function(get_size, globals())
get_image = namespaced_function(get_image, globals())
avail_locations = namespaced_function(avail_locations, globals())
avail_images = namespaced_function(avail_images, globals())
avail_sizes = namespaced_function(avail_sizes, globals())
script = namespaced_function(script, globals())
destroy = namespaced_function(destroy, globals())
reboot = namespaced_function(reboot, globals())
list_nodes = namespaced_function(list_nodes, globals())
list_nodes_full = namespaced_function(list_nodes_full, globals())
list_nodes_select = namespaced_function(list_nodes_select, globals())
show_instance = namespaced_function(show_instance, globals())
get_node = namespaced_function(get_node, globals())
# Get logging started
log = logging.getLogger(__name__)
__virtualname__ = "dimensiondata"
def __virtual__():
"""
Set up the libcloud functions and check for dimensiondata configurations.
"""
if get_configured_provider() is False:
return False
if get_dependencies() is False:
return False
for provider, details in __opts__["providers"].items():
if "dimensiondata" not in details:
continue
return __virtualname__
def get_configured_provider():
"""
Return the first configured instance.
"""
return config.is_provider_configured(
__opts__,
__active_provider_name__ or "dimensiondata",
("user_id", "key", "region"),
)
def get_dependencies():
"""
Warn if dependencies aren't met.
"""
deps = {"libcloud": HAS_LIBCLOUD, "netaddr": HAS_NETADDR}
return config.check_driver_dependencies(__virtualname__, deps)
def _query_node_data(vm_, data):
running = False
try:
node = show_instance(vm_["name"], "action") # pylint: disable=not-callable
running = node["state"] == NodeState.RUNNING
log.debug(
"Loaded node data for %s:\nname: %s\nstate: %s",
vm_["name"],
pprint.pformat(node["name"]),
node["state"],
)
except Exception as err: # pylint: disable=broad-except
log.error(
"Failed to get nodes list: %s",
err,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG,
)
# Trigger a failure in the wait for IP function
return running
if not running:
# Still not running, trigger another iteration
return
private = node["private_ips"]
public = node["public_ips"]
if private and not public:
log.warning(
"Private IPs returned, but not public. Checking for misidentified IPs."
)
for private_ip in private:
private_ip = preferred_ip(vm_, [private_ip])
if private_ip is False:
continue
if salt.utils.cloud.is_public_ip(private_ip):
log.warning("%s is a public IP", private_ip)
data.public_ips.append(private_ip)
else:
log.warning("%s is a private IP", private_ip)
if private_ip not in data.private_ips:
data.private_ips.append(private_ip)
if ssh_interface(vm_) == "private_ips" and data.private_ips:
return data
if private:
data.private_ips = private
if ssh_interface(vm_) == "private_ips":
return data
if public:
data.public_ips = public
if ssh_interface(vm_) != "private_ips":
return data
log.debug("Contents of the node data:")
log.debug(data)
def create(vm_):
"""
Create a single VM from a data dict
"""
try:
# Check for required profile parameters before sending any API calls.
if (
vm_["profile"]
and config.is_profile_configured(
__opts__, __active_provider_name__ or "dimensiondata", vm_["profile"]
)
is False
):
return False
except AttributeError:
pass
__utils__["cloud.fire_event"](
"event",
"starting create",
"salt/cloud/{}/creating".format(vm_["name"]),
args=__utils__["cloud.filter_event"](
"creating", vm_, ["name", "profile", "provider", "driver"]
),
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
)
log.info("Creating Cloud VM %s", vm_["name"])
conn = get_conn()
location = conn.ex_get_location_by_id(vm_["location"])
images = conn.list_images(location=location)
image = [x for x in images if x.id == vm_["image"]][0]
network_domains = conn.ex_list_network_domains(location=location)
try:
network_domain = [
y for y in network_domains if y.name == vm_["network_domain"]
][0]
except IndexError:
network_domain = conn.ex_create_network_domain(
location=location,
name=vm_["network_domain"],
plan="ADVANCED",
description="",
)
try:
vlan = [
y
for y in conn.ex_list_vlans(
location=location, network_domain=network_domain
)
if y.name == vm_["vlan"]
][0]
except (IndexError, KeyError):
# Use the first VLAN in the network domain
vlan = conn.ex_list_vlans(location=location, network_domain=network_domain)[0]
kwargs = {
"name": vm_["name"],
"image": image,
"ex_description": vm_["description"],
"ex_network_domain": network_domain,
"ex_vlan": vlan,
"ex_is_started": vm_["is_started"],
}
event_data = _to_event_data(kwargs)
__utils__["cloud.fire_event"](
"event",
"requesting instance",
"salt/cloud/{}/requesting".format(vm_["name"]),
args=__utils__["cloud.filter_event"](
"requesting", event_data, list(event_data)
),
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
)
# Initial password (excluded from event payload)
initial_password = NodeAuthPassword(vm_["auth"])
kwargs["auth"] = initial_password
try:
data = conn.create_node(**kwargs)
except Exception as exc: # pylint: disable=broad-except
log.error(
"Error creating %s on DIMENSIONDATA\n\n"
"The following exception was thrown by libcloud when trying to "
"run the initial deployment: \n%s",
vm_["name"],
exc,
exc_info_on_loglevel=logging.DEBUG,
)
return False
try:
data = __utils__["cloud.wait_for_ip"](
_query_node_data,
update_args=(vm_, data),
timeout=config.get_cloud_config_value(
"wait_for_ip_timeout", vm_, __opts__, default=25 * 60
),
interval=config.get_cloud_config_value(
"wait_for_ip_interval", vm_, __opts__, default=30
),
max_failures=config.get_cloud_config_value(
"wait_for_ip_max_failures", vm_, __opts__, default=60
),
)
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
try:
# It might be already up, let's destroy it!
destroy(vm_["name"]) # pylint: disable=not-callable
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(str(exc))
log.debug("VM is now running")
if ssh_interface(vm_) == "private_ips":
ip_address = preferred_ip(vm_, data.private_ips)
else:
ip_address = preferred_ip(vm_, data.public_ips)
log.debug("Using IP address %s", ip_address)
if __utils__["cloud.get_salt_interface"](vm_, __opts__) == "private_ips":
salt_ip_address = preferred_ip(vm_, data.private_ips)
log.info("Salt interface set to: %s", salt_ip_address)
else:
salt_ip_address = preferred_ip(vm_, data.public_ips)
log.debug("Salt interface set to: %s", salt_ip_address)
if not ip_address:
raise SaltCloudSystemExit("No IP addresses could be found.")
vm_["salt_host"] = salt_ip_address
vm_["ssh_host"] = ip_address
vm_["password"] = vm_["auth"]
ret = __utils__["cloud.bootstrap"](vm_, __opts__)
ret.update(data.__dict__)
if "password" in data.extra:
del data.extra["password"]
log.info("Created Cloud VM '%s'", vm_["name"])
log.debug(
"'%s' VM creation details:\n%s", vm_["name"], pprint.pformat(data.__dict__)
)
__utils__["cloud.fire_event"](
"event",
"created instance",
"salt/cloud/{}/created".format(vm_["name"]),
args=__utils__["cloud.filter_event"](
"created", vm_, ["name", "profile", "provider", "driver"]
),
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
)
return ret
def create_lb(kwargs=None, call=None):
r"""
Create a load-balancer configuration.
CLI Example:
.. code-block:: bash
salt-cloud -f create_lb dimensiondata \
name=dev-lb port=80 protocol=http \
members=w1,w2,w3 algorithm=ROUND_ROBIN
"""
conn = get_conn()
if call != "function":
raise SaltCloudSystemExit(
"The create_lb function must be called with -f or --function."
)
if not kwargs or "name" not in kwargs:
log.error("A name must be specified when creating a health check.")
return False
if "port" not in kwargs:
log.error("A port or port-range must be specified for the load-balancer.")
return False
if "networkdomain" not in kwargs:
log.error("A network domain must be specified for the load-balancer.")
return False
if "members" in kwargs:
members = []
ip = ""
membersList = kwargs.get("members").split(",")
log.debug("MemberList: %s", membersList)
for member in membersList:
try:
log.debug("Member: %s", member)
node = get_node(conn, member) # pylint: disable=not-callable
log.debug("Node: %s", node)
ip = node.private_ips[0]
except Exception as err: # pylint: disable=broad-except
log.error(
"Failed to get node ip: %s",
err,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG,
)
members.append(Member(ip, ip, kwargs["port"]))
else:
members = None
log.debug("Members: %s", members)
networkdomain = kwargs["networkdomain"]
name = kwargs["name"]
port = kwargs["port"]
protocol = kwargs.get("protocol", None)
algorithm = kwargs.get("algorithm", None)
lb_conn = get_lb_conn(conn)
network_domains = conn.ex_list_network_domains()
network_domain = [y for y in network_domains if y.name == networkdomain][0]
log.debug("Network Domain: %s", network_domain.id)
lb_conn.ex_set_current_network_domain(network_domain.id)
event_data = _to_event_data(kwargs)
__utils__["cloud.fire_event"](
"event",
"create load_balancer",
"salt/cloud/loadbalancer/creating",
args=event_data,
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
)
lb = lb_conn.create_balancer(name, port, protocol, algorithm, members)
event_data = _to_event_data(kwargs)
__utils__["cloud.fire_event"](
"event",
"created load_balancer",
"salt/cloud/loadbalancer/created",
args=event_data,
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
)
return _expand_balancer(lb)
def _expand_balancer(lb):
"""
Convert the libcloud load-balancer object into something more serializable.
"""
ret = {}
ret.update(lb.__dict__)
return ret
def preferred_ip(vm_, ips):
"""
Return the preferred Internet protocol. Either 'ipv4' (default) or 'ipv6'.
"""
proto = config.get_cloud_config_value(
"protocol", vm_, __opts__, default="ipv4", search_global=False
)
family = socket.AF_INET
if proto == "ipv6":
family = socket.AF_INET6
for ip in ips:
try:
socket.inet_pton(family, ip)
return ip
except Exception: # pylint: disable=broad-except
continue
return False
def ssh_interface(vm_):
"""
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
"""
return config.get_cloud_config_value(
"ssh_interface", vm_, __opts__, default="public_ips", search_global=False
)
def stop(name, call=None):
"""
Stop a VM in DimensionData.
name:
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
"""
conn = get_conn()
node = get_node(conn, name) # pylint: disable=not-callable
log.debug("Node of Cloud VM: %s", node)
status = conn.ex_shutdown_graceful(node)
log.debug("Status of Cloud VM: %s", status)
return status
def start(name, call=None):
"""
Stop a VM in DimensionData.
:param str name:
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
"""
conn = get_conn()
node = get_node(conn, name) # pylint: disable=not-callable
log.debug("Node of Cloud VM: %s", node)
status = conn.ex_start_node(node)
log.debug("Status of Cloud VM: %s", status)
return status
def get_conn():
"""
Return a conn object for the passed VM data
"""
vm_ = get_configured_provider()
driver = get_driver(Provider.DIMENSIONDATA)
region = config.get_cloud_config_value("region", vm_, __opts__)
user_id = config.get_cloud_config_value("user_id", vm_, __opts__)
key = config.get_cloud_config_value("key", vm_, __opts__)
if key is not None:
log.debug("DimensionData authenticating using password")
return driver(user_id, key, region=region)
def get_lb_conn(dd_driver=None):
"""
Return a load-balancer conn object
"""
vm_ = get_configured_provider()
region = config.get_cloud_config_value("region", vm_, __opts__)
user_id = config.get_cloud_config_value("user_id", vm_, __opts__)
key = config.get_cloud_config_value("key", vm_, __opts__)
if not dd_driver:
raise SaltCloudSystemExit(
"Missing dimensiondata_driver for get_lb_conn method."
)
return get_driver_lb(Provider_lb.DIMENSIONDATA)(user_id, key, region=region)
def _to_event_data(obj):
"""
Convert the specified object into a form that can be serialised by msgpack as event data.
:param obj: The object to convert.
"""
if obj is None:
return None
if isinstance(obj, bool):
return obj
if isinstance(obj, int):
return obj
if isinstance(obj, float):
return obj
if isinstance(obj, str):
return obj
if isinstance(obj, bytes):
return obj
if isinstance(obj, dict):
return obj
if isinstance(obj, NodeDriver): # Special case for NodeDriver (cyclic references)
return obj.name
if isinstance(obj, list):
return [_to_event_data(item) for item in obj]
event_data = {}
for attribute_name in dir(obj):
if attribute_name.startswith("_"):
continue
attribute_value = getattr(obj, attribute_name)
if callable(attribute_value): # Strip out methods
continue
event_data[attribute_name] = _to_event_data(attribute_value)
return event_data
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Vultr Driver
"""
import time
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import urlencode
from libcloud.common.base import ConnectionKey, JsonResponse
from libcloud.compute.types import Provider, NodeState
from libcloud.common.types import LibcloudError, InvalidCredsError
from libcloud.compute.base import NodeDriver
from libcloud.compute.base import Node, NodeImage, NodeSize, NodeLocation
class VultrResponse(JsonResponse):
def parse_error(self):
if self.status == httplib.OK:
body = self.parse_body()
return body
elif self.status == httplib.FORBIDDEN:
raise InvalidCredsError(self.body)
else:
raise LibcloudError(self.body)
class SSHKey(object):
def __init__(self, id, name, pub_key):
self.id = id
self.name = name
self.pub_key = pub_key
def __repr__(self):
return (('<SSHKey: id=%s, name=%s, pub_key=%s>') %
(self.id, self.name, self.pub_key))
class VultrConnection(ConnectionKey):
"""
Connection class for the Vultr driver.
"""
host = 'api.vultr.com'
responseCls = VultrResponse
def add_default_params(self, params):
"""
Add parameters that are necessary for every request
This method add ``api_key`` to
the request.
"""
params['api_key'] = self.key
return params
def encode_data(self, data):
return urlencode(data)
def get(self, url):
return self.request(url)
def post(self, url, data):
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
return self.request(url, data=data, headers=headers, method='POST')
class VultrNodeDriver(NodeDriver):
"""
VultrNode node driver.
"""
connectionCls = VultrConnection
type = Provider.VULTR
name = 'Vultr'
website = 'https://www.vultr.com'
NODE_STATE_MAP = {'pending': NodeState.PENDING,
'active': NodeState.RUNNING}
EX_CREATE_YES_NO_ATTRIBUTES = ['enable_ipv6',
'enable_private_network',
'auto_backups',
'notify_activate',
'ddos_protection']
EX_CREATE_ID_ATTRIBUTES = {'iso_id': 'ISOID',
'script_id': 'SCRIPTID',
'snapshot_id': 'SNAPSHOTID',
'app_id': 'APPID'}
EX_CREATE_ATTRIBUTES = ['ipxe_chain_url',
'label',
'userdata',
'reserved_ip_v4',
'hostname',
'tag']
EX_CREATE_ATTRIBUTES.extend(EX_CREATE_YES_NO_ATTRIBUTES)
EX_CREATE_ATTRIBUTES.extend(EX_CREATE_ID_ATTRIBUTES.keys())
def list_nodes(self):
return self._list_resources('/v1/server/list', self._to_node)
def list_key_pairs(self):
"""
List all the available SSH keys.
:return: Available SSH keys.
:rtype: ``list`` of :class:`SSHKey`
"""
return self._list_resources('/v1/sshkey/list', self._to_ssh_key)
def create_key_pair(self, name, public_key=''):
"""
Create a new SSH key.
:param name: Name of the new SSH key
:type name: ``str``
:key public_key: Public part of the new SSH key
:type name: ``str``
:return: True on success
:rtype: ``bool``
"""
params = {'name': name, 'ssh_key': public_key}
res = self.connection.post('/v1/sshkey/create', params)
return res.status == httplib.OK
def delete_key_pair(self, key_pair):
"""
Delete an SSH key.
:param key_pair: The SSH key to delete
:type key_pair: :class:`SSHKey`
:return: True on success
:rtype: ``bool``
"""
params = {'SSHKEYID': key_pair.id}
res = self.connection.post('/v1/sshkey/destroy', params)
return res.status == httplib.OK
def list_locations(self):
return self._list_resources('/v1/regions/list', self._to_location)
def list_sizes(self):
return self._list_resources('/v1/plans/list', self._to_size)
def list_images(self):
return self._list_resources('/v1/os/list', self._to_image)
def create_node(self, name, size, image, location, ex_ssh_key_ids=None,
ex_create_attr=None):
"""
Create a node
:param name: Name for the new node
:type name: ``str``
:param size: Size of the new node
:type size: :class:`NodeSize`
:param image: Image for the new node
:type image: :class:`NodeImage`
:param location: Location of the new node
:type location: :class:`NodeLocation`
:param ex_ssh_key_ids: IDs of the SSH keys to initialize
:type ex_sshkeyid: ``list`` of ``str``
:param ex_create_attr: Extra attributes for node creation
:type ex_create_attr: ``dict``
The `ex_create_attr` parameter can include the following dictionary
key and value pairs:
* `ipxe_chain_url`: ``str`` for specifying URL to boot via IPXE
* `iso_id`: ``str`` the ID of a specific ISO to mount,
only meaningful with the `Custom` `NodeImage`
* `script_id`: ``int`` ID of a startup script to execute on boot,
only meaningful when the `NodeImage` is not `Custom`
* 'snapshot_id`: ``str`` Snapshot ID to restore for the initial
installation, only meaningful with the `Snapshot` `NodeImage`
* `enable_ipv6`: ``bool`` Whether an IPv6 subnet should be assigned
* `enable_private_network`: ``bool`` Whether private networking
support should be added
* `label`: ``str`` Text label to be shown in the control panel
* `auto_backups`: ``bool`` Whether automatic backups should be enabled
* `app_id`: ``int`` App ID to launch if launching an application,
only meaningful when the `NodeImage` is `Application`
* `userdata`: ``str`` Base64 encoded cloud-init user-data
* `notify_activate`: ``bool`` Whether an activation email should be
sent when the server is ready
* `ddos_protection`: ``bool`` Whether DDOS protection should be enabled
* `reserved_ip_v4`: ``str`` IP address of the floating IP to use as
the main IP of this server
* `hostname`: ``str`` The hostname to assign to this server
* `tag`: ``str`` The tag to assign to this server
:return: The newly created node.
:rtype: :class:`Node`
"""
params = {'DCID': location.id, 'VPSPLANID': size.id,
'OSID': image.id, 'label': name}
if ex_ssh_key_ids is not None:
params['SSHKEYID'] = ','.join(ex_ssh_key_ids)
ex_create_attr = ex_create_attr or {}
for key, value in ex_create_attr.items():
if key in self.EX_CREATE_ATTRIBUTES:
if key in self.EX_CREATE_YES_NO_ATTRIBUTES:
params[key] = 'yes' if value else 'no'
else:
if key in self.EX_CREATE_ID_ATTRIBUTES:
key = self.EX_CREATE_ID_ATTRIBUTES[key]
params[key] = value
result = self.connection.post('/v1/server/create', params)
if result.status != httplib.OK:
return False
subid = result.object['SUBID']
retry_count = 3
created_node = None
for i in range(retry_count):
try:
nodes = self.list_nodes()
created_node = [n for n in nodes if n.id == subid][0]
except IndexError:
time.sleep(1)
pass
else:
break
return created_node
def reboot_node(self, node):
params = {'SUBID': node.id}
res = self.connection.post('/v1/server/reboot', params)
return res.status == httplib.OK
def destroy_node(self, node):
params = {'SUBID': node.id}
res = self.connection.post('/v1/server/destroy', params)
return res.status == httplib.OK
def _list_resources(self, url, tranform_func):
data = self.connection.get(url).object
sorted_key = sorted(data)
return [tranform_func(data[key]) for key in sorted_key]
def _to_node(self, data):
if 'status' in data:
state = self.NODE_STATE_MAP.get(data['status'], NodeState.UNKNOWN)
if state == NodeState.RUNNING and \
data['power_status'] != 'running':
state = NodeState.STOPPED
else:
state = NodeState.UNKNOWN
if 'main_ip' in data and data['main_ip'] is not None:
public_ips = [data['main_ip']]
else:
public_ips = []
extra_keys = []
extra = {}
for key in extra_keys:
if key in data:
extra[key] = data[key]
node = Node(id=data['SUBID'], name=data['label'], state=state,
public_ips=public_ips, private_ips=None, extra=extra,
driver=self)
return node
def _to_location(self, data):
return NodeLocation(id=data['DCID'], name=data['name'],
country=data['country'], driver=self)
def _to_size(self, data):
extra = {'vcpu_count': int(data['vcpu_count'])}
ram = int(data['ram'])
disk = int(data['disk'])
bandwidth = float(data['bandwidth'])
price = float(data['price_per_month'])
return NodeSize(id=data['VPSPLANID'], name=data['name'],
ram=ram, disk=disk,
bandwidth=bandwidth, price=price,
extra=extra, driver=self)
def _to_image(self, data):
extra = {'arch': data['arch'], 'family': data['family']}
return NodeImage(id=data['OSID'], name=data['name'], extra=extra,
driver=self)
def _to_ssh_key(self, data):
return SSHKey(id=data['SSHKEYID'], name=data['name'],
pub_key=data['ssh_key'])
|
def merger_first_into_second(arr1, arr2):
p1 = len(arr1) - 1
runner = len(arr2) - 1
# Assuming arr2 is the padded
p2 = len(arr2) - len(arr1) - 1
while p1 >= 0 or p2 >= 0 and p1 != runner and p2 != runner:
if p1 >= 0 and p2 >= 0:
if arr1[p1] > arr2[p2]:
arr2[runner] = arr1[p1]
p1 -= 1
else:
arr2[runner] = arr2[p2]
p2 -= 1
elif p1 >= 0:
arr2[runner] = arr1[p1]
p1 -= 1
else:
arr2[runner] = arr2[p2]
p2 -= 1
runner -= 1
|
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD+Patents license found in the
# LICENSE file in the root directory of this source tree.
#@nolint
# not linting this file because it imports * form swigfaiss, which
# causes a ton of useless warnings.
import numpy as np
import sys
import inspect
import pdb
# we import * so that the symbol X can be accessed as faiss.X
try:
from swigfaiss_gpu import *
except ImportError as e:
if 'No module named' not in e.args[0]:
# swigfaiss_gpu is there but failed to load: Warn user about it.
sys.stderr.write("Failed to load GPU Faiss: %s\n" % e.args[0])
sys.stderr.write("Faiss falling back to CPU-only.\n")
from swigfaiss import *
##################################################################
# The functions below add or replace some methods for classes
# this is to be able to pass in numpy arrays directly
# The C++ version of the classnames will be suffixed with _c
##################################################################
def replace_method(the_class, name, replacement, ignore_missing=False):
try:
orig_method = getattr(the_class, name)
except AttributeError:
if ignore_missing:
return
raise
if orig_method.__name__ == 'replacement_' + name:
# replacement was done in parent class
return
setattr(the_class, name + '_c', orig_method)
setattr(the_class, name, replacement)
def handle_Clustering():
def replacement_train(self, x, index):
assert x.flags.contiguous
n, d = x.shape
assert d == self.d
self.train_c(n, swig_ptr(x), index)
replace_method(Clustering, 'train', replacement_train)
handle_Clustering()
def handle_Quantizer(the_class):
def replacement_train(self, x):
n, d = x.shape
assert d == self.d
self.train_c(n, swig_ptr(x))
def replacement_compute_codes(self, x):
n, d = x.shape
assert d == self.d
codes = np.empty((n, self.code_size), dtype='uint8')
self.compute_codes_c(swig_ptr(x), swig_ptr(codes), n)
return codes
def replacement_decode(self, codes):
n, cs = codes.shape
assert cs == self.code_size
x = np.empty((n, self.d), dtype='float32')
self.decode_c(swig_ptr(codes), swig_ptr(x), n)
return x
replace_method(the_class, 'train', replacement_train)
replace_method(the_class, 'compute_codes', replacement_compute_codes)
replace_method(the_class, 'decode', replacement_decode)
handle_Quantizer(ProductQuantizer)
handle_Quantizer(ScalarQuantizer)
def handle_Index(the_class):
def replacement_add(self, x):
assert x.flags.contiguous
n, d = x.shape
assert d == self.d
self.add_c(n, swig_ptr(x))
def replacement_add_with_ids(self, x, ids):
n, d = x.shape
assert d == self.d
assert ids.shape == (n, ), 'not same nb of vectors as ids'
self.add_with_ids_c(n, swig_ptr(x), swig_ptr(ids))
def replacement_train(self, x):
assert x.flags.contiguous
n, d = x.shape
assert d == self.d
self.train_c(n, swig_ptr(x))
def replacement_search(self, x, k):
n, d = x.shape
assert d == self.d
distances = np.empty((n, k), dtype=np.float32)
labels = np.empty((n, k), dtype=np.int64)
self.search_c(n, swig_ptr(x),
k, swig_ptr(distances),
swig_ptr(labels))
return distances, labels
def replacement_search_and_reconstruct(self, x, k):
n, d = x.shape
assert d == self.d
distances = np.empty((n, k), dtype=np.float32)
labels = np.empty((n, k), dtype=np.int64)
recons = np.empty((n, k, d), dtype=np.float32)
self.search_and_reconstruct_c(n, swig_ptr(x),
k, swig_ptr(distances),
swig_ptr(labels),
swig_ptr(recons))
return distances, labels, recons
def replacement_remove_ids(self, x):
if isinstance(x, IDSelector):
sel = x
else:
assert x.ndim == 1
sel = IDSelectorBatch(x.size, swig_ptr(x))
return self.remove_ids_c(sel)
def replacement_reconstruct(self, key):
x = np.empty(self.d, dtype=np.float32)
self.reconstruct_c(key, swig_ptr(x))
return x
def replacement_reconstruct_n(self, n0, ni):
x = np.empty((ni, self.d), dtype=np.float32)
self.reconstruct_n_c(n0, ni, swig_ptr(x))
return x
def replacement_update_vectors(self, keys, x):
n = keys.size
assert keys.shape == (n, )
assert x.shape == (n, self.d)
self.update_vectors_c(n, swig_ptr(keys), swig_ptr(x))
def replacement_range_search(self, x, thresh):
n, d = x.shape
assert d == self.d
res = RangeSearchResult(n)
self.range_search_c(n, swig_ptr(x), thresh, res)
# get pointers and copy them
lims = rev_swig_ptr(res.lims, n + 1).copy()
nd = int(lims[-1])
D = rev_swig_ptr(res.distances, nd).copy()
I = rev_swig_ptr(res.labels, nd).copy()
return lims, D, I
replace_method(the_class, 'add', replacement_add)
replace_method(the_class, 'add_with_ids', replacement_add_with_ids)
replace_method(the_class, 'train', replacement_train)
replace_method(the_class, 'search', replacement_search)
replace_method(the_class, 'remove_ids', replacement_remove_ids)
replace_method(the_class, 'reconstruct', replacement_reconstruct)
replace_method(the_class, 'reconstruct_n', replacement_reconstruct_n)
replace_method(the_class, 'range_search', replacement_range_search)
replace_method(the_class, 'update_vectors', replacement_update_vectors,
ignore_missing=True)
replace_method(the_class, 'search_and_reconstruct',
replacement_search_and_reconstruct, ignore_missing=True)
def handle_VectorTransform(the_class):
def apply_method(self, x):
assert x.flags.contiguous
n, d = x.shape
assert d == self.d_in
y = np.empty((n, self.d_out), dtype=np.float32)
self.apply_noalloc(n, swig_ptr(x), swig_ptr(y))
return y
def replacement_reverse_transform(self, x):
n, d = x.shape
assert d == self.d_out
y = np.empty((n, self.d_in), dtype=np.float32)
self.reverse_transform_c(n, swig_ptr(x), swig_ptr(y))
return y
def replacement_vt_train(self, x):
assert x.flags.contiguous
n, d = x.shape
assert d == self.d_in
self.train_c(n, swig_ptr(x))
replace_method(the_class, 'train', replacement_vt_train)
# apply is reserved in Pyton...
the_class.apply_py = apply_method
replace_method(the_class, 'reverse_transform',
replacement_reverse_transform)
def handle_AutoTuneCriterion(the_class):
def replacement_set_groundtruth(self, D, I):
if D:
assert I.shape == D.shape
self.nq, self.gt_nnn = I.shape
self.set_groundtruth_c(
self.gt_nnn, swig_ptr(D) if D else None, swig_ptr(I))
def replacement_evaluate(self, D, I):
assert I.shape == D.shape
assert I.shape == (self.nq, self.nnn)
return self.evaluate_c(swig_ptr(D), swig_ptr(I))
replace_method(the_class, 'set_groundtruth', replacement_set_groundtruth)
replace_method(the_class, 'evaluate', replacement_evaluate)
def handle_ParameterSpace(the_class):
def replacement_explore(self, index, xq, crit):
assert xq.shape == (crit.nq, index.d)
ops = OperatingPoints()
self.explore_c(index, crit.nq, swig_ptr(xq),
crit, ops)
return ops
replace_method(the_class, 'explore', replacement_explore)
this_module = sys.modules[__name__]
for symbol in dir(this_module):
obj = getattr(this_module, symbol)
# print symbol, isinstance(obj, (type, types.ClassType))
if inspect.isclass(obj):
the_class = obj
if issubclass(the_class, Index):
handle_Index(the_class)
if issubclass(the_class, VectorTransform):
handle_VectorTransform(the_class)
if issubclass(the_class, AutoTuneCriterion):
handle_AutoTuneCriterion(the_class)
if issubclass(the_class, ParameterSpace):
handle_ParameterSpace(the_class)
def index_cpu_to_gpu_multiple_py(resources, index, co=None):
"""builds the C++ vectors for the GPU indices and the
resources. Handles the common case where the resources are assigned to
the first len(resources) GPUs"""
vres = GpuResourcesVector()
vdev = IntVector()
for i, res in enumerate(resources):
vdev.push_back(i)
vres.push_back(res)
return index_cpu_to_gpu_multiple(vres, vdev, index, co)
def index_cpu_to_all_gpus(index, co=None, ngpu=-1):
if ngpu == -1:
ngpu = get_num_gpus()
res = [StandardGpuResources() for i in range(ngpu)]
index2 = index_cpu_to_gpu_multiple_py(res, index, co)
index2.dont_dealloc = res
return index2
# mapping from vector names in swigfaiss.swig and the numpy dtype names
vector_name_map = {
'Float': 'float32',
'Byte': 'uint8',
'Uint64': 'uint64',
'Long': 'int64',
'Int': 'int32',
'Double': 'float64'
}
def vector_to_array(v):
""" convert a C++ vector to a numpy array """
classname = v.__class__.__name__
assert classname.endswith('Vector')
dtype = np.dtype(vector_name_map[classname[:-6]])
a = np.empty(v.size(), dtype=dtype)
memcpy(swig_ptr(a), v.data(), a.nbytes)
return a
def vector_float_to_array(v):
return vector_to_array(v)
def copy_array_to_vector(a, v):
""" copy a numpy array to a vector """
n, = a.shape
classname = v.__class__.__name__
assert classname.endswith('Vector')
dtype = np.dtype(vector_name_map[classname[:-6]])
assert dtype == a.dtype, (
'cannot copy a %s array to a %s (should be %s)' % (
a.dtype, classname, dtype))
v.resize(n)
memcpy(v.data(), swig_ptr(a), a.nbytes)
class Kmeans:
def __init__(self, d, k, niter=25, verbose=False, spherical = False):
self.d = d
self.k = k
self.cp = ClusteringParameters()
self.cp.niter = niter
self.cp.verbose = verbose
self.cp.spherical = spherical
self.centroids = None
def train(self, x):
assert x.flags.contiguous
n, d = x.shape
assert d == self.d
clus = Clustering(d, self.k, self.cp)
if self.cp.spherical:
self.index = IndexFlatIP(d)
else:
self.index = IndexFlatL2(d)
clus.train(x, self.index)
centroids = vector_float_to_array(clus.centroids)
self.centroids = centroids.reshape(self.k, d)
self.obj = vector_float_to_array(clus.obj)
return self.obj[-1]
def assign(self, x):
assert self.centroids is not None, "should train before assigning"
index = IndexFlatL2(self.d)
index.add(self.centroids)
D, I = index.search(x, 1)
return D.ravel(), I.ravel()
def kmin(array, k):
"""return k smallest values (and their indices) of the lines of a
float32 array"""
m, n = array.shape
I = np.zeros((m, k), dtype='int64')
D = np.zeros((m, k), dtype='float32')
ha = float_maxheap_array_t()
ha.ids = swig_ptr(I)
ha.val = swig_ptr(D)
ha.nh = m
ha.k = k
ha.heapify()
ha.addn(n, swig_ptr(array))
ha.reorder()
return D, I
def kmax(array, k):
"""return k largest values (and their indices) of the lines of a
float32 array"""
m, n = array.shape
I = np.zeros((m, k), dtype='int64')
D = np.zeros((m, k), dtype='float32')
ha = float_minheap_array_t()
ha.ids = swig_ptr(I)
ha.val = swig_ptr(D)
ha.nh = m
ha.k = k
ha.heapify()
ha.addn(n, swig_ptr(array))
ha.reorder()
return D, I
def rand(n, seed=12345):
res = np.empty(n, dtype='float32')
float_rand(swig_ptr(res), n, seed)
return res
def lrand(n, seed=12345):
res = np.empty(n, dtype='int64')
long_rand(swig_ptr(res), n, seed)
return res
def randn(n, seed=12345):
res = np.empty(n, dtype='float32')
float_randn(swig_ptr(res), n, seed)
return res
def eval_intersection(I1, I2):
""" size of intersection between each line of two result tables"""
n = I1.shape[0]
assert I2.shape[0] == n
k1, k2 = I1.shape[1], I2.shape[1]
ninter = 0
for i in range(n):
ninter += ranklist_intersection_size(
k1, swig_ptr(I1[i]), k2, swig_ptr(I2[i]))
return ninter
def normalize_L2(x):
fvec_renorm_L2(x.shape[1], x.shape[0], swig_ptr(x))
def replacement_map_add(self, keys, vals):
n, = keys.shape
assert (n,) == keys.shape
self.add_c(n, swig_ptr(keys), swig_ptr(vals))
def replacement_map_search_multiple(self, keys):
n, = keys.shape
vals = np.empty(n, dtype='int64')
self.search_multiple_c(n, swig_ptr(keys), swig_ptr(vals))
return vals
replace_method(MapLong2Long, 'add', replacement_map_add)
replace_method(MapLong2Long, 'search_multiple', replacement_map_search_multiple)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2021 Robert Cowham, Perforce Software Ltd
# ========================================
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL PERFORCE
# SOFTWARE, INC. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
NAME:
GitP4Transfer.py
DESCRIPTION:
This python script (2.7/3.6+ compatible) will transfer Git changes into a Perforce
Helix Core Repository, somewhat similar to 'git p4' (not historical) and also GitFusion (now deprecated).
This script transfers changes in one direction - from a source Git server to a target p4 server.
It handles LFS files in the source server (assuming git LFS is suitably installed and enabled)
Requires Git version 2.7+ due to use of formatting flags
Usage:
python3 GitP4Transfer.py -h
The script requires a config file, by default transfer.yaml.
An initial example can be generated, e.g.
GitP4Transfer.py --sample-config > transfer.yaml
For full documentation/usage, see project doc:
https://github.com/rcowham/gitp4transfer/blob/main/doc/GitP4Transfer.adoc
"""
# Notes:
# Scan all commits for diffs
# Scan all commits for other key info
# Find start commit
# Process in reverse order
from __future__ import print_function, division
from os import error
import sys
import re
import subprocess
import stat
import pprint
from string import Template
import argparse
import textwrap
import os
from datetime import datetime
import logging
import time
import platform
import collections
# Non-standard modules
import P4
import logutils
import pytz
# Import yaml which will roundtrip comments
from ruamel.yaml import YAML
yaml = YAML()
subproc = subprocess # Could have a wrapper for use on Windows
VERSION = """$Id: 74939df934a7a660e6beff62870f65635918300b $"""
ANON_BRANCH_PREFIX = "_anon"
if bytes is not str:
# For python3, always encode and decode as appropriate
def decode_text_stream(s):
return s.decode() if isinstance(s, bytes) else s
else:
# For python2.7, pass read strings as-is
def decode_text_stream(s):
return s
def anonymousBranch(branch):
return branch.startswith(ANON_BRANCH_PREFIX)
def logrepr(self):
return pprint.pformat(self.__dict__, width=240)
alreadyLogged = {}
# Log messages just once per run
def logOnce(logger, *args):
global alreadyLogged
msg = ", ".join([str(x) for x in args])
if msg not in alreadyLogged:
alreadyLogged[msg] = 1
logger.debug(msg)
#
# P4 wildcards are not allowed in filenames. P4 complains
# if you simply add them, but you can force it with "-f", in
# which case it translates them into %xx encoding internally.
#
def wildcard_decode(path):
# Search for and fix just these four characters. Do % last so
# that fixing it does not inadvertently create new %-escapes.
# Cannot have * in a filename in windows; untested as to
# what p4 would do in such a case.
if not platform.system() == "Windows":
path = path.replace("%2A", "*")
path = path.replace("%23", "#") \
.replace("%40", "@") \
.replace("%25", "%")
return path
def wildcard_encode(path):
# do % first to avoid double-encoding the %s introduced here
path = path.replace("%", "%25") \
.replace("*", "%2A") \
.replace("#", "%23") \
.replace("@", "%40")
return path
def wildcard_present(path):
m = re.search("[*#@%]", path)
return m is not None
def isModeExec(mode):
# Returns True if the given git mode represents an executable file,
# otherwise False.
return mode[-3:] == "755"
def isModeExecChanged(src_mode, dst_mode):
return isModeExec(src_mode) != isModeExec(dst_mode)
_diff_tree_pattern = None
def parseDiffTreeEntry(entry):
"""Parses a single diff tree entry into its component elements.
See git-diff-tree(1) manpage for details about the format of the diff
output. This method returns a dictionary with the following elements:
src_mode - The mode of the source file
dst_mode - The mode of the destination file
src_sha1 - The sha1 for the source file
dst_sha1 - The sha1 fr the destination file
status - The one letter status of the diff (i.e. 'A', 'M', 'D', etc)
status_score - The score for the status (applicable for 'C' and 'R'
statuses). This is None if there is no score.
src - The path for the source file.
dst - The path for the destination file. This is only present for
copy or renames. If it is not present, this is None.
If the pattern is not matched, None is returned."""
global _diff_tree_pattern
if not _diff_tree_pattern:
_diff_tree_pattern = re.compile(':(\d+) (\d+) (\w+) (\w+) ([A-Z])(\d+)?\t(.*?)((\t(.*))|$)')
match = _diff_tree_pattern.match(entry)
if match:
return {
'src_mode': match.group(1),
'dst_mode': match.group(2),
'src_sha1': match.group(3),
'dst_sha1': match.group(4),
'status': match.group(5),
'status_score': match.group(6),
'src': PathQuoting.dequote(match.group(7)),
'dst': PathQuoting.dequote(match.group(10))
}
return None
P4.Revision.__repr__ = logrepr
P4.Integration.__repr__ = logrepr
P4.DepotFile.__repr__ = logrepr
python3 = sys.version_info[0] >= 3
if sys.hexversion < 0x02070000 or (0x0300000 <= sys.hexversion < 0x0303000):
sys.exit("Python 2.7 or 3.3 or newer is required to run this program.")
reFetchMoveError = re.compile("Files are missing as a result of one or more move operations")
# Although this should work with Python 3, it doesn't currently handle Windows Perforce servers
# with filenames containing charaters such as umlauts etc: åäö
class P4TException(Exception):
pass
class P4TLogicException(P4TException):
pass
class P4TConfigException(P4TException):
pass
CONFIG_FILE = 'transfer.yaml'
GENERAL_SECTION = 'general'
SOURCE_SECTION = 'source'
TARGET_SECTION = 'target'
LOGGER_NAME = "GitP4Transfer"
CHANGE_MAP_DESC = "Updated change_map_file"
# This is for writing to sample config file
yaml.preserve_quotes = True
DEFAULT_CONFIG = yaml.load(r"""
# counter_name: Unique counter on target server to use for recording source changes processed. No spaces.
# Name sensibly if you have multiple instances transferring into the same target p4 repository.
# The counter value represents the last transferred change number - script will start from next change.
# If not set, or 0 then transfer will start from first change.
counter_name: GitP4Transfer_counter
# instance_name: Name of the instance of GitP4Transfer - for emails etc. Spaces allowed.
instance_name: "Git LFS Transfer from XYZ"
# For notification - if smtp not available - expects a pre-configured nms FormMail script as a URL
# E.g. expects to post using 2 fields: subject, message
# Alternatively, use the following entries (suitable adjusted) to use Mailgun for notifications
# api: "<Mailgun API key"
# url: "https://api.mailgun.net/v3/<domain or sandbox>"
# mail_from: "Fred <fred@example.com>"
# mail_to:
# - "fred@example.com"
mail_form_url:
# The mail_* parameters must all be valid (non-blank) to receive email updates during processing.
# mail_to: One or more valid email addresses - comma separated for multiple values
# E.g. somebody@example.com,somebody-else@example.com
mail_to:
# mail_from: Email address of sender of emails, E.g. p4transfer@example.com
mail_from:
# mail_server: The SMTP server to connect to for email sending, E.g. smtpserver.example.com
mail_server:
# ===============================================================================
# Note that for any of the following parameters identified as (Integer) you can specify a
# valid python expression which evaluates to integer value, e.g.
# "24 * 60"
# "7 * 24 * 60"
# Such values should be quoted (in order to be treated as strings)
# -------------------------------------------------------------------------------
# sleep_on_error_interval (Integer): How long (in minutes) to sleep when error is encountered in the script
sleep_on_error_interval: 60
# poll_interval (Integer): How long (in minutes) to wait between polling source server for new changes
poll_interval: 60
# change_batch_size (Integer): changelists are processed in batches of this size
change_batch_size: 1000
# The following *_interval values result in reports, but only if mail_* values are specified
# report_interval (Integer): Interval (in minutes) between regular update emails being sent
report_interval: 30
# error_report_interval (Integer): Interval (in minutes) between error emails being sent e.g. connection error
# Usually some value less than report_interval. Useful if transfer being run with --repeat option.
error_report_interval: 15
# summary_report_interval (Integer): Interval (in minutes) between summary emails being sent e.g. changes processed
# Typically some value such as 1 week (10080 = 7 * 24 * 60). Useful if transfer being run with --repeat option.
summary_report_interval: "7 * 24 * 60"
# max_logfile_size (Integer): Max size of file to (in bytes) after which it should be rotated
# Typically some value such as 20MB = 20 * 1024 * 1024. Useful if transfer being run with --repeat option.
max_logfile_size: "20 * 1024 * 1024"
# change_description_format: The standard format for transferred changes.
# Keywords prefixed with $. Use \\n for newlines. Keywords allowed:
# $sourceDescription, $sourceChange, $sourceRepo, $sourceUser
change_description_format: "$sourceDescription\\n\\nTransferred from git://$sourceRepo@$sourceChange"
# superuser: Set to n if not a superuser (so can't update change times - can just transfer them).
superuser: "y"
source:
# git_repo: root directory for git repo
# This will be used to update the client workspace Root: field for target workspace
git_repo:
# ws_root: specific directory to use for workspace root, for example if you want to map the git repo to a subdirectory
# This will replace the git_repo value as the root. This should be a parent path of git_repo
# ws_root:
target:
# P4PORT to connect to, e.g. some-server:1666 - if this is on localhost and you just
# want to specify port number, then use quotes: "1666"
p4port:
# P4USER to use
p4user:
# P4CLIENT to use, e.g. p4-transfer-client
p4client:
# P4PASSWD for the user - valid password. If blank then no login performed.
# Recommended to make sure user is in a group with a long password timeout!
# Make sure your P4TICKETS file is correctly found in the environment
p4passwd:
# P4CHARSET to use, e.g. none, utf8, etc - leave blank for non-unicode p4d instance
p4charset:
# branch_maps: An array of git branches to migrate and where to.
# Note that other branches encountered will be given temp names under anon_branches_root
# Entries specify 'git_branch' and 'targ'. No wildcards.
branch_maps:
- git_branch: "master"
targ: "//git_import/master"
# user_map: A dictionary of Git users to map to Perforce usernames
# This will be modified using p4 change -f and requires superuser
user_map:
A User: "auser"
First McLastname: "fmclastname"
# import_anon_branches: Set this to 'y' to import anonymous branches - NOT YET FUNCTIONAL!!!
# Any other value means they will not be imported.
import_anon_branches: n
# anon_branches_root: A depot path used for anonymous git branches (names automatically generated).
# NOT YET FUNCTIONAL
# Such branches only contain files modified on git branch.
# Name of branch under this root is _anonNNNN with a unique ID.
# If this field is empty, then no anonymous branches will be created/imported.
#anon_branches_root: //git_import/temp_branches
anon_branches_root:
""")
def ensureDirectory(directory):
if not os.path.isdir(directory):
os.makedirs(directory)
def makeWritable(fpath):
"Make file writable"
os.chmod(fpath, stat.S_IWRITE + stat.S_IREAD)
def p4time(unixtime):
"Convert time to Perforce format time"
return time.strftime("%Y/%m/%d:%H:%M:%S", time.localtime(unixtime))
def printSampleConfig():
"Print defaults from above dictionary for saving as a base file"
print("")
print("# Save this output to a file to e.g. transfer.yaml and edit it for your configuration")
print("")
yaml.dump(DEFAULT_CONFIG, sys.stdout)
sys.stdout.flush()
def fmtsize(num):
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return "%3.1f %s" % (num, x)
num /= 1024.0
class PathQuoting:
"""From git-filter-repo.py - great for python2 - but needs conversion to bytes for python3"""
_unescape = {b'a': b'\a',
b'b': b'\b',
b'f': b'\f',
b'n': b'\n',
b'r': b'\r',
b't': b'\t',
b'v': b'\v',
b'"': b'"',
b'\\':b'\\'}
_unescape_re = re.compile(br'\\([a-z"\\]|[0-9]{3})')
_escape = [bytes([x]) for x in range(127)]+[
b'\\'+bytes(ord(c) for c in oct(x)[2:]) for x in range(127,256)]
_reverse = dict(map(reversed, _unescape.items()))
for x in _reverse:
_escape[ord(x)] = b'\\'+_reverse[x]
_special_chars = [len(x) > 1 for x in _escape]
@staticmethod
def unescape_sequence(orig):
seq = orig.group(1)
return PathQuoting._unescape[seq] if len(seq) == 1 else bytes([int(seq, 8)])
@staticmethod
def dequote(quoted_string):
if quoted_string and quoted_string.startswith('"'):
assert quoted_string.endswith('"')
# Python3 - convert to bytes for magic above
quoted_string = quoted_string.encode()
result = PathQuoting._unescape_re.sub(PathQuoting.unescape_sequence,
quoted_string[1:-1])
return result.decode()
return quoted_string
class GitFileChanges():
"Convenience class for file changes as part of a git commit"
def __init__(self, modes, shas, changeTypes, filenames) -> None:
self.modes = modes
self.shas = shas
self.changeTypes = changeTypes
self.filenames = filenames
class GitCommit():
"Convenience class for a git commit"
def __init__(self, commitID, name, email, description, date) -> None:
self.commitID = commitID
self.name = name
self.email = email
self.description = description
self.date = date
self.parents = []
self.fileChanges = []
self.branch = None
self.parentBranch = None
self.firstOnBranch = False
class GitInfo:
"Extract info about Git repo"
def __init__(self, logger) -> None:
self.logger = logger
self.anonBranchInd = 0
def read_pipe_lines(self, c):
self.logger.debug('Reading pipe: %s\n' % str(c))
expand = not isinstance(c, list)
p = subprocess.Popen(c, stdout=subprocess.PIPE, shell=expand)
pipe = p.stdout
val = [decode_text_stream(line) for line in pipe.readlines()]
if pipe.close() or p.wait():
raise Exception('Command failed: %s' % str(c))
return val
def getCommitDiffs(self, refs):
"Return array of commits in reverse order for processing, together with dict of commits"
# Setup the rev-list/diff-tree process and read info about file diffs
# Learned from git-filter-repo
cmd = ('git rev-list --first-parent --reverse {}'.format(' '.join(refs)) +
' | git diff-tree --stdin --always --root --format=%H%n%P%n%cn%n%ce%n%B%n"__END_OF_DESC__"%n%cd' +
' --date=iso-local -M -t -c --raw --combined-all-paths')
if self.logger:
self.logger.debug(cmd)
dtp = subproc.Popen(cmd, shell=True, bufsize=-1, stdout=subprocess.PIPE)
f = dtp.stdout
commitList = []
commits = {}
line = decode_text_stream(f.readline())
if not line:
return commitList, commits
cont = bool(line)
while cont:
commitID = decode_text_stream(line).rstrip()
parents = decode_text_stream(f.readline()).split()
name = decode_text_stream(f.readline()).rstrip()
email = decode_text_stream(f.readline()).rstrip()
desc = []
in_desc = True
while in_desc:
line = decode_text_stream(f.readline()).rstrip()
if line.startswith('__END_OF_DESC__'):
in_desc = False
elif line:
desc.append(line)
date = decode_text_stream(f.readline()).rstrip()
# We expect a blank line next; if we get a non-blank line then
# this commit modified no files and we need to move on to the next.
# If there is no line, we've reached end-of-input.
line = decode_text_stream(f.readline())
if not line:
cont = False
line = line.rstrip()
# If we haven't reached end of input, and we got a blank line meaning
# a commit that has modified files, then get the file changes associated
# with this commit.
fileChanges = []
if cont and not line:
cont = False
for line in f:
line = decode_text_stream(line)
if not line.startswith(':'):
cont = True
break
n = 1 + max(1, len(parents))
assert line.startswith(':'*(n-1))
relevant = line[n-1:-1]
splits = relevant.split(None, n)
modes = splits[0:n]
splits = splits[n].split(None, n)
shas = splits[0:n]
splits = splits[n].split('\t')
change_types = splits[0]
filenames = [PathQuoting.dequote(x) for x in splits[1:]]
fileChanges.append(GitFileChanges(modes, shas, change_types, filenames))
commits[commitID] = GitCommit(commitID, name, email, '\n'.join(desc), date)
commits[commitID].parents = parents
commits[commitID].fileChanges = fileChanges
commitList.append(commitID)
# Close the output, ensure rev-list|diff-tree pipeline completed successfully
dtp.stdout.close()
if dtp.wait():
raise SystemExit(("Error: rev-list|diff-tree pipeline failed; see above.")) # pragma: no cover
return commitList, commits
def getFileChanges(self, commit):
"Return file changes for a commit which is a merge - thus itself against its first parent"
cmd = ('git diff-tree -r {} {}'.format(commit.commitID, commit.parents[0]))
if self.logger:
self.logger.debug(cmd)
dtp = subproc.Popen(cmd, shell=True, bufsize=-1, stdout=subprocess.PIPE)
f = dtp.stdout
fileChanges = []
for line in f:
line = decode_text_stream(line)
if not line.startswith(':'):
continue
n = 2 # 1 + max(1, len(commit.parents))
assert line.startswith(':'*(n-1))
relevant = line[n-1:-1]
splits = relevant.split(None, n)
modes = splits[0:n]
splits = splits[n].split(None, n)
shas = splits[0:n]
splits = splits[n].split('\t')
change_types = splits[0]
filenames = [PathQuoting.dequote(x) for x in splits[1:]]
fileChanges.append(GitFileChanges(modes, shas, change_types, filenames))
dtp.stdout.close()
if dtp.wait():
raise SystemExit(("Error: {} failed; see above.".format(cmd))) # pragma: no cover
return fileChanges
def getBranchCommits(self, branchRefs):
"Returns a list of commit ids on the referenced branches"
branchCommits = {}
for b in branchRefs:
branchCommits[b] = []
cmd = ('git rev-list --first-parent {}'.format(b))
if self.logger:
self.logger.debug(cmd)
dtp = subproc.Popen(cmd, shell=True, bufsize=-1, stdout=subprocess.PIPE)
f = dtp.stdout
line = decode_text_stream(f.readline())
if not line:
return
cont = bool(line)
while cont:
commit = decode_text_stream(line).rstrip()
branchCommits[b].append(commit)
line = decode_text_stream(f.readline())
if not line:
break
dtp.stdout.close()
if dtp.wait():
raise SystemExit("Error: {} failed; see above.".format(cmd)) # pragma: no cover
return branchCommits
# def updateBranchInfo(self, branchRefs, commitList, commits):
# "Updates the branch details for every commit"
# branchCommits = self.getBranchCommits(branchRefs)
# for b in branchRefs:
# for id in branchCommits[b]:
# if not id in commitList:
# raise P4TException("Failed to find commit: %s" % id)
# commits[id].branch = b
# # Now update anonymous branches - in commit order (so parents first)
# for id in commitList:
# if not commits[id].branch:
# firstParent = commits[id].parents[0]
# assert(commits[firstParent].branch is not None)
# if not commits[firstParent].branch.startswith(ANON_BRANCH_PREFIX):
# self.anonBranchInd += 1
# anonBranch = "%s%04d" % (ANON_BRANCH_PREFIX, self.anonBranchInd)
# commits[id].branch = anonBranch
# commits[id].parentBranch = commits[firstParent].branch
# commits[id].firstOnBranch = True
# else:
# commits[id].branch = commits[firstParent].branch
class ChangeRevision:
"Represents a change - created from P4API supplied information and thus encoding"
def __init__(self, rev, change, n):
self.rev = rev
self.action = change['action'][n]
self.type = change['type'][n]
self.depotFile = change['depotFile'][n]
self.localFile = None
self.fileSize = 0
self.digest = ""
self.fixedLocalFile = None
def depotFileRev(self):
"Fully specify depot file with rev number"
return "%s#%s" % (self.depotFile, self.rev)
def localFileRev(self):
"Fully specify local file with rev number"
return "%s#%s" % (self.localFile, self.rev)
def setLocalFile(self, localFile):
self.localFile = localFile
localFile = localFile.replace("%40", "@")
localFile = localFile.replace("%23", "#")
localFile = localFile.replace("%2A", "*")
localFile = localFile.replace("%25", "%")
localFile = localFile.replace("/", os.sep)
self.fixedLocalFile = localFile
def __repr__(self):
return 'rev={rev} action={action} type={type} size={size} digest={digest} depotFile={depotfile}' .format(
rev=self.rev,
action=self.action,
type=self.type,
size=self.fileSize,
digest=self.digest,
depotfile=self.depotFile,
)
class P4Base(object):
"Processes a config"
section = None
P4PORT = None
P4CLIENT = None
P4CHARSET = None
P4USER = None
P4PASSWD = None
counter = 0
clientLogged = 0
def __init__(self, section, options, p4id):
self.section = section
self.options = options
self.logger = logging.getLogger(LOGGER_NAME)
self.p4id = p4id
self.p4 = None
self.client_logged = 0
def __str__(self):
return '[section = {} P4PORT = {} P4CLIENT = {} P4USER = {} P4PASSWD = {} P4CHARSET = {}]'.format(
self.section,
self.P4PORT,
self.P4CLIENT,
self.P4USER,
self.P4PASSWD,
self.P4CHARSET,
)
def connect(self, progname):
self.p4 = P4.P4()
self.p4.port = self.P4PORT
self.p4.client = self.P4CLIENT
self.p4.user = self.P4USER
self.p4.prog = progname
self.p4.exception_level = P4.P4.RAISE_ERROR
self.p4.connect()
if self.P4CHARSET is not None:
self.p4.charset = self.P4CHARSET
if self.P4PASSWD is not None:
self.p4.password = self.P4PASSWD
self.p4.run_login()
def p4cmd(self, *args, **kwargs):
"Execute p4 cmd while logging arguments and results"
self.logger.debug(self.p4id, args)
output = self.p4.run(args, **kwargs)
self.logger.debug(self.p4id, output)
self.checkWarnings()
return output
def disconnect(self):
if self.p4:
self.p4.disconnect()
def checkWarnings(self):
if self.p4 and self.p4.warnings:
self.logger.warning('warning result: {}'.format(str(self.p4.warnings)))
# def resetWorkspace(self):
# self.p4cmd('sync', '//%s/...#none' % self.p4.P4CLIENT)
def createClientWorkspace(self):
"""Create or adjust client workspace for target
"""
clientspec = self.p4.fetch_client(self.p4.client)
logOnce(self.logger, "orig %s:%s:%s" % (self.p4id, self.p4.client, pprint.pformat(clientspec)))
self.root = self.source.ws_root or self.source.git_repo
clientspec._root = self.root
clientspec["Options"] = clientspec["Options"].replace("normdir", "rmdir")
clientspec["Options"] = clientspec["Options"].replace("noallwrite", "allwrite")
clientspec["LineEnd"] = "unix"
clientView = []
v = self.options.branch_maps[0] # Start with first one - assume to be equivalent of master
line = "%s/... //%s/..." % (v['targ'], self.p4.client)
clientView.append(line)
for exclude in [self.source.getRelativeGitPath(trailing_slash=True) + '.git/...']:
line = "-%s/%s //%s/%s" % (v['targ'], exclude, self.p4.client, exclude)
clientView.append(line)
clientspec._view = clientView
self.clientmap = P4.Map(clientView)
self.clientspec = clientspec
self.p4.save_client(clientspec)
logOnce(self.logger, "updated %s:%s:%s" % (self.p4id, self.p4.client, pprint.pformat(clientspec)))
self.p4.cwd = self.source.git_repo
ctr = P4.Map('//"'+clientspec._client+'/..." "' + clientspec._root + '/..."')
self.localmap = P4.Map.join(self.clientmap, ctr)
self.depotmap = self.localmap.reverse()
def updateClientWorkspace(self, branch):
""" Adjust client workspace for new branch"""
clientspec = self.p4.fetch_client(self.p4.client)
logOnce(self.logger, "orig %s:%s:%s" % (self.p4id, self.p4.client, pprint.pformat(clientspec)))
clientView = []
if anonymousBranch(branch):
targ = "%s/%s" % (self.options.anon_branches_root, branch)
else:
for v in self.options.branch_maps:
if v['git_branch'] == branch:
targ = v['targ']
break
line = "%s/... //%s/..." % (targ, self.p4.client)
clientView.append(line)
for exclude in [self.source.getRelativeGitPath(trailing_slash=True) + '.git/...']:
line = "-%s/%s //%s/%s" % (targ, exclude, self.p4.client, exclude)
clientView.append(line)
clientspec._view = clientView
self.clientmap = P4.Map(clientView)
self.clientspec = clientspec
self.p4.save_client(clientspec)
logOnce(self.logger, "updated %s:%s:%s" % (self.p4id, self.p4.client, pprint.pformat(clientspec)))
self.logger.debug("Updated client view for branch: %s" % branch)
ctr = P4.Map('//"'+clientspec._client+'/..." "' + clientspec._root + '/..."')
self.localmap = P4.Map.join(self.clientmap, ctr)
self.depotmap = self.localmap.reverse()
def getBranchMap(self, origBranch, newBranch):
"""Create a mapping between original and new branches"""
src = ""
targ = ""
if anonymousBranch(origBranch):
src = "%s/%s" % (self.options.anon_branches_root, origBranch)
else:
for v in self.options.branch_maps:
if v['git_branch'] == origBranch:
src = v['targ']
if anonymousBranch(newBranch):
targ = "%s/%s" % (self.options.anon_branches_root, newBranch)
else:
for v in self.options.branch_maps:
if v['git_branch'] == newBranch:
targ = v['targ']
line = "%s/... %s/..." % (src, targ)
self.logger.debug("Map: %s" % line)
branchMap = P4.Map(line)
return branchMap
tempBranch = "p4_exportBranch"
class GitSource(P4Base):
"Functionality for reading from source Perforce repository"
def __init__(self, section, options):
super(GitSource, self).__init__(section, options, 'src')
self.gitinfo = GitInfo(self.logger)
def run_cmd(self, cmd, dir=".", get_output=True, timeout=2*60*60, stop_on_error=True):
"Run cmd logging input and output"
output = ""
try:
self.logger.debug("Running: %s" % cmd)
if get_output:
p = subprocess.Popen(cmd, cwd=dir, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, shell=True)
if python3:
output, _ = p.communicate(timeout=timeout)
else:
output, _ = p.communicate()
# rc = p.returncode
self.logger.debug("Output:\n%s" % output)
else:
result = subprocess.run(cmd, shell=True, check=True, capture_output=True)
self.logger.debug('Result: %s' % str(result))
except subprocess.CalledProcessError as e:
self.logger.debug("Output: %s" % e.output)
if stop_on_error:
msg = 'Failed run_cmd: %d %s' % (e.returncode, str(e))
self.logger.debug(msg)
raise e
except Exception as e:
self.logger.debug("Output: %s" % output)
if stop_on_error:
msg = 'Failed run_cmd: %s' % str(e)
self.logger.debug(msg)
raise e
return output
def missingCommits(self, counter):
# self.gather_commits()
branchRefs = [t['git_branch'] for t in self.options.branch_maps]
self.gitinfo = GitInfo(self.logger)
commitList, commits = self.gitinfo.getCommitDiffs(branchRefs)
try:
ind = commitList.index(counter)
commitList = commitList[ind+1:]
except ValueError:
pass
# self.gitinfo.updateBranchInfo(branchRefs, commitList, commits)
self.logger.debug("commits: %s" % ' '.join(commitList))
maxChanges = 0
if self.options.change_batch_size:
maxChanges = self.options.change_batch_size
if self.options.maximum and self.options.maximum < maxChanges:
maxChanges = self.options.maximum
if maxChanges > 0:
commitList = commitList[:maxChanges]
self.logger.debug('processing %d commits' % len(commitList))
self.commitList = commitList
self.commits = commits
return commitList, commits
def fileModified(self, filename):
"Returns true if git thinks file has changed on disk"
args = ['git', 'status', '-z', filename]
result = self.run_cmd(' '.join(args))
return len(result) > 0
def checkoutCommit(self, commitID):
"""Expects change number as a string, and returns list of filerevs"""
args = ['git', 'switch', '-C', tempBranch, commitID]
self.run_cmd(' '.join(args), get_output=False)
def getRelativeGitPath(self, trailing_slash=False):
# Determine relative path if the destination is in a subdirectory
relative_path = ''
if self.ws_root:
relative_path = os.path.relpath(self.git_repo, self.ws_root)
if trailing_slash:
relative_path += os.sep
return relative_path
class P4Target(P4Base):
"Functionality for transferring changes to target Perforce repository"
def __init__(self, section, options, source):
super(P4Target, self).__init__(section, options, 'targ')
self.source = source
self.filesToIgnore = []
self.currentBranch = ""
def formatChangeDescription(self, **kwargs):
"""Format using specified format options - see call in replicateCommit"""
format = self.options.change_description_format
format = format.replace("\\n", "\n")
t = Template(format)
result = t.safe_substitute(**kwargs)
return result
def ignoreFile(self, fname):
"Returns True if file is to be ignored"
if not self.options.re_ignore_files:
return False
for exp in self.options.re_ignore_files:
if exp.search(fname):
return True
return False
def p4_integrate(self, src, dest):
self.p4cmd("integrate", "-Dt", wildcard_encode(src), wildcard_encode(dest))
# def p4_sync(f, *options):
# p4_system(["sync"] + list(options) + [wildcard_encode(f)])
def p4_add(self, f):
# forcibly add file names with wildcards
if wildcard_present(f):
self.p4cmd("add", "-f", f)
else:
self.p4cmd("add", f)
def p4_delete(self, f):
self.p4cmd("delete", wildcard_encode(f))
def p4_edit(self, f, *options):
self.p4cmd("edit", options, wildcard_encode(f))
def p4_revert(self, f):
self.p4cmd("revert", wildcard_encode(f))
def p4_reopen(self, type, f):
self.p4cmd("reopen", "-t", type, wildcard_encode(f))
# def p4_reopen_in_change(changelist, files):
# cmd = ["reopen", "-c", str(changelist)] + files
# p4_system(cmd)
def p4_move(self, src, dest):
self.p4cmd("move", "-k", wildcard_encode(src), wildcard_encode(dest))
def replicateCommit(self, commit):
"""This is the heart of it all. Replicate a single commit/change"""
self.filesToIgnore = []
# Branch processing currently removed for now.
# if self.currentBranch == "":
# self.currentBranch = commit.branch
# if self.currentBranch != commit.branch:
# self.updateClientWorkspace(commit.branch)
# if commit.firstOnBranch:
# self.p4cmd('sync', '-k')
# fileChanges = commit.fileChanges
# if len(commit.parents) > 1:
# # merge commit
# parentBranch = self.source.commits[commit.parents[1]].branch
# branchMap = self.getBranchMap(parentBranch, commit.branch)
# else:
# branchMap = self.getBranchMap(commit.parentBranch, commit.branch)
# if len(fileChanges) == 0:
# # Do a git diff-tree to make sure we detect files changed on the target branch.
# fileChanges = self.source.gitinfo.getFileChanges(commit)
# for fc in fileChanges:
# self.logger.debug("fileChange: %s %s" % (fc.changeTypes, fc.filenames[0]))
# if fc.changeTypes == 'A':
# self.p4cmd('rec', '-af', fc.filenames[0])
# elif fc.changeTypes == 'M' or fc.changeTypes == 'MM':
# # Translate target depot to source via client map and branch map
# depotFile = self.depotmap.translate(os.path.join(self.source.git_repo, fc.filenames[0]))
# src = branchMap.translate(depotFile, 0)
# self.p4cmd('sync', '-k', fc.filenames[0])
# self.p4cmd('integrate', src, fc.filenames[0])
# self.p4cmd('resolve', '-at')
# # After whatever p4 has done to the file contents we ensure it is as per git
# if self.source.fileModified(fc.filenames[0]):
# self.p4cmd('edit', fc.filenames[0])
# args = ['git', 'restore', fc.filenames[0]]
# self.source.run_cmd(' '.join(args))
# elif fc.changeTypes == 'D':
# self.p4cmd('rec', '-d', fc.filenames[0])
# else: # Better safe than sorry! Various known actions not yet implemented
# raise P4TLogicException('Action not yet implemented: %s', fc.changeTypes)
# self.currentBranch = commit.branch
# else:
self.p4cmd('sync', '-k')
fileChanges = commit.fileChanges
if not fileChanges or (0 < len([f for f in fileChanges if f.changeTypes == 'MM'])):
# Do a git diff-tree to make sure we detect files changed on the target branch rather than just dirs
fileChanges = self.source.gitinfo.getFileChanges(commit)
if not commit.parents:
for fc in fileChanges:
self.logger.debug("fileChange: %s %s" % (fc.changeTypes, fc.filenames[0]))
if fc.filenames[0]:
filename = PathQuoting.dequote(fc.filenames[0])
if fc.changeTypes == 'A':
self.p4cmd('rec', '-af', filename)
elif fc.changeTypes == 'M':
self.p4cmd('rec', '-e', filename)
elif fc.changeTypes == 'D':
self.p4cmd('rec', '-d', filename)
else: # Better safe than sorry! Various known actions not yet implemented
raise P4TLogicException('Action not yet implemented: %s', fc.changeTypes)
else:
diff = self.source.gitinfo.read_pipe_lines("git diff-tree -r %s \"%s^\" \"%s\"" % (
"-M", commit.commitID, commit.commitID))
filesToAdd = set()
filesToChangeType = set()
filesToDelete = set()
editedFiles = set()
pureRenameCopy = set()
symlinks = set()
filesToChangeExecBit = {}
all_files = list()
for line in diff:
diff = parseDiffTreeEntry(line)
modifier = diff['status']
path = diff['src']
all_files.append(path)
if modifier == "M":
self.p4_edit(path)
if isModeExecChanged(diff['src_mode'], diff['dst_mode']):
filesToChangeExecBit[path] = diff['dst_mode']
editedFiles.add(path)
elif modifier == "A":
filesToAdd.add(path)
filesToChangeExecBit[path] = diff['dst_mode']
if path in filesToDelete:
filesToDelete.remove(path)
dst_mode = int(diff['dst_mode'], 8)
if dst_mode == 0o120000:
symlinks.add(path)
elif modifier == "D":
filesToDelete.add(path)
if path in filesToAdd:
filesToAdd.remove(path)
elif modifier == "C":
src, dest = diff['src'], diff['dst']
all_files.append(dest)
self.p4_integrate(src, dest)
pureRenameCopy.add(dest)
if diff['src_sha1'] != diff['dst_sha1']:
self.p4_edit(dest)
pureRenameCopy.discard(dest)
if isModeExecChanged(diff['src_mode'], diff['dst_mode']):
self.p4_edit(dest)
pureRenameCopy.discard(dest)
filesToChangeExecBit[dest] = diff['dst_mode']
if self.isWindows:
# turn off read-only attribute
os.chmod(dest, stat.S_IWRITE)
os.unlink(dest)
editedFiles.add(dest)
elif modifier == "R":
src, dest = diff['src'], diff['dst']
all_files.append(dest)
self.p4_edit(src, "-k") # src must be open before move but may not exist
self.p4_move(src, dest) # opens for (move/delete, move/add)
if isModeExecChanged(diff['src_mode'], diff['dst_mode']):
filesToChangeExecBit[dest] = diff['dst_mode']
editedFiles.add(dest)
elif modifier == "T":
filesToChangeType.add(path)
else:
raise Exception("unknown modifier %s for %s" % (modifier, path))
for f in filesToChangeType:
self.p4_edit(f, "-t", "auto")
for f in filesToAdd:
self.p4_add(f)
for f in filesToDelete:
self.p4_revert(f)
self.p4_delete(f)
# Set/clear executable bits
# TODO
# for f in filesToChangeExecBit.keys():
# mode = filesToChangeExecBit[f]
# setP4ExecBit(f, mode)
openedFiles = self.p4cmd('opened')
lenOpenedFiles = len(openedFiles)
if lenOpenedFiles > 0:
self.logger.debug("Opened files: %d" % lenOpenedFiles)
# self.fixFileTypes(fileRevs, openedFiles)
description = self.formatChangeDescription(
sourceDescription=commit.description,
sourceChange=commit.commitID, sourcePort='git_repo',
sourceUser=commit.name, sourceEmail=commit.email)
newChangeId = 0
result = None
try:
# Debug for larger changelists
if lenOpenedFiles > 1000:
self.logger.debug("About to fetch change")
chg = self.p4.fetch_change()
chg['Description'] = description
if lenOpenedFiles > 1000:
self.logger.debug("About to submit")
result = self.p4.save_submit(chg)
a = -1
while 'submittedChange' not in result[a]:
a -= 1
newChangeId = result[a]['submittedChange']
if lenOpenedFiles > 1000:
self.logger.debug("submitted")
self.logger.debug(self.p4id, result)
self.checkWarnings()
except P4.P4Exception as e:
raise e
if newChangeId:
self.logger.info("source = {} : target = {}".format(commit.commitID, newChangeId))
description = self.formatChangeDescription(
sourceDescription=commit.description,
sourceChange=commit.commitID, sourcePort='git_repo',
sourceUser=commit.name, sourceEmail=commit.email)
self.updateChange(newChangeId=newChangeId, description=description, user=commit.name, date=commit.date)
else:
self.logger.error("failed to replicate change {}".format(commit))
return newChangeId
def updateChange(self, newChangeId, description, user, date):
# need to update the user and time stamp - but only if a superuser
if not self.options.superuser == "y":
return
newChange = self.p4.fetch_change(newChangeId)
newChange._description = description
newChange._date = to_perforce_time(date)
# change the username of the commit if we have a mapping between the name of the committer's name and p4
if user in self.options.user_map:
newChange._user = self.options.user_map[user]
self.p4.save_change(newChange, '-f')
def getCounter(self):
"Returns value of counter"
result = self.p4cmd('counter', self.options.counter_name)
if result and 'counter' in result[0]:
return result[0]['value']
return ''
def setCounter(self, value):
"Set's the counter to specified value"
self.p4cmd('counter', self.options.counter_name, str(value))
def valid_datetime_type(arg_datetime_str):
"""custom argparse type for user datetime values given from the command line"""
try:
return datetime.strptime(arg_datetime_str, "%Y/%m/%d %H:%M")
except ValueError:
msg = "Given Datetime ({0}) not valid! Expected format, 'YYYY/MM/DD HH:mm'!".format(arg_datetime_str)
raise argparse.ArgumentTypeError(msg)
def to_perforce_time(git_time_str):
git_time = datetime.strptime(git_time_str, "%Y-%m-%d %H:%M:%S %z")
git_time_utc = git_time.astimezone(pytz.UTC)
return git_time_utc.strftime('%Y/%m/%d %H:%M:%S')
class GitP4Transfer(object):
"Main transfer class"
def __init__(self, *args):
desc = textwrap.dedent(__doc__)
parser = argparse.ArgumentParser(
description=desc,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="Copyright (C) 2021 Robert Cowham, Perforce Software Ltd"
)
parser.add_argument('-c', '--config', default=CONFIG_FILE, help="Default is " + CONFIG_FILE)
parser.add_argument('-n', '--notransfer', action='store_true',
help="Validate config file and setup source/target workspaces but don't transfer anything")
parser.add_argument('-m', '--maximum', default=None, type=int, help="Maximum number of changes to transfer")
parser.add_argument('-r', '--repeat', action='store_true',
help="Repeat transfer in a loop - for continuous transfer as background task")
parser.add_argument('-s', '--stoponerror', action='store_true', help="Stop on any error even if --repeat has been specified")
parser.add_argument('--sample-config', action='store_true', help="Print an example config file and exit")
parser.add_argument('--end-datetime', type=valid_datetime_type, default=None,
help="Time to stop transfers, format: 'YYYY/MM/DD HH:mm' - useful"
" for automation runs during quiet periods e.g. run overnight but stop first thing in the morning")
self.options = parser.parse_args(list(args))
if self.options.sample_config:
printSampleConfig()
return
self.logger = logutils.getLogger(LOGGER_NAME)
self.previous_target_change_counter = 0 # Current value
def getOption(self, section, option_name, default=None):
result = default
try:
if section == GENERAL_SECTION:
result = self.config[option_name]
else:
result = self.config[section][option_name]
except Exception:
pass
return result
def getIntOption(self, section, option_name, default=None):
result = default
val = self.getOption(section, option_name, default)
if isinstance(val, int):
return val
if val:
try:
result = int(eval(val))
except Exception:
pass
return result
def readConfig(self):
self.config = {}
try:
with open(self.options.config) as f:
self.config = yaml.load(f)
except Exception as e:
raise P4TConfigException('Could not read config file %s: %s' % (self.options.config, str(e)))
errors = []
self.options.counter_name = self.getOption(GENERAL_SECTION, "counter_name")
if not self.options.counter_name:
errors.append("Option counter_name must be specified")
self.options.instance_name = self.getOption(GENERAL_SECTION, "instance_name", self.options.counter_name)
self.options.mail_form_url = self.getOption(GENERAL_SECTION, "mail_form_url")
self.options.mail_to = self.getOption(GENERAL_SECTION, "mail_to")
self.options.mail_from = self.getOption(GENERAL_SECTION, "mail_from")
self.options.mail_server = self.getOption(GENERAL_SECTION, "mail_server")
self.options.sleep_on_error_interval = self.getIntOption(GENERAL_SECTION, "sleep_on_error_interval", 60)
self.options.poll_interval = self.getIntOption(GENERAL_SECTION, "poll_interval", 60)
self.options.change_batch_size = self.getIntOption(GENERAL_SECTION, "change_batch_size", 1000)
self.options.report_interval = self.getIntOption(GENERAL_SECTION, "report_interval", 30)
self.options.error_report_interval = self.getIntOption(GENERAL_SECTION, "error_report_interval", 30)
self.options.summary_report_interval = self.getIntOption(GENERAL_SECTION, "summary_report_interval", 10080)
self.options.max_logfile_size = self.getIntOption(GENERAL_SECTION, "max_logfile_size", 20 * 1024 * 1024)
self.options.change_description_format = self.getOption(
GENERAL_SECTION, "change_description_format",
"$sourceDescription\n\nTransferred from git://$sourceRepo@$sourceChange")
self.options.superuser = self.getOption(GENERAL_SECTION, "superuser", "y")
self.options.branch_maps = self.getOption(GENERAL_SECTION, "branch_maps")
if not self.options.branch_maps:
errors.append("Option branch_maps must not be empty")
self.options.user_map = self.getOption(GENERAL_SECTION, "user_map")
self.options.anon_branches_root = self.getOption(GENERAL_SECTION, "anon_branches_root")
self.options.import_anon_branches = self.getOption(GENERAL_SECTION, "import_anon_branches", "n")
self.options.ignore_files = self.getOption(GENERAL_SECTION, "ignore_files")
self.options.re_ignore_files = []
if self.options.ignore_files:
for exp in self.options.ignore_files:
try:
self.options.re_ignore_files.append(re.compile(exp))
except Exception as e:
errors.append("Failed to parse ignore_files: %s" % str(e))
if errors:
raise P4TConfigException("\n".join(errors))
self.source = GitSource(SOURCE_SECTION, self.options)
self.target = P4Target(TARGET_SECTION, self.options, self.source)
self.readOption('git_repo', self.source)
self.readOption('ws_root', self.source, optional=True)
self.readP4Section(self.target)
def readP4Section(self, p4config):
if p4config.section in self.config:
self.readOptions(p4config)
else:
raise P4TConfigException('Config file needs section %s' % p4config.section)
def readOptions(self, p4config):
self.readOption('P4CLIENT', p4config)
self.readOption('P4USER', p4config)
self.readOption('P4PORT', p4config)
self.readOption('P4PASSWD', p4config, optional=True)
self.readOption('P4CHARSET', p4config, optional=True)
def readOption(self, option, p4config, optional=False):
lcOption = option.lower()
if lcOption in self.config[p4config.section]:
p4config.__dict__[option] = self.config[p4config.section][lcOption]
elif not optional:
raise P4TConfigException('Required option %s not found in section %s' % (option, p4config.section))
def revertOpenedFiles(self):
"Clear out any opened files from previous errors - hoping they are transient - except for change_map"
with self.target.p4.at_exception_level(P4.P4.RAISE_NONE):
self.target.p4cmd('revert', "-k", "//%s/..." % self.target.P4CLIENT)
def replicate_commits(self):
"Perform a replication loop"
os.chdir(self.source.git_repo)
self.target.connect('target replicate')
self.target.createClientWorkspace()
commitIDs, commits = self.source.missingCommits(self.target.getCounter())
if self.options.notransfer:
self.logger.info("Would transfer %d commits - stopping due to --notransfer" % len(commitIDs))
return 0
self.logger.info("Transferring %d changes" % len(commitIDs))
changesTransferred = 0
if len(commitIDs) > 0:
self.save_previous_target_change_counter()
self.checkRotateLogFile()
self.revertOpenedFiles()
for id in commitIDs:
if self.endDatetimeExceeded(): # Bail early
self.logger.info("Transfer stopped due to --end-datetime being exceeded")
return changesTransferred
msg = 'Processing commit: {}'.format(id)
self.logger.info(msg)
commit = commits[id]
self.source.checkoutCommit(id)
self.target.replicateCommit(commit)
self.target.setCounter(id)
changesTransferred += 1
self.target.disconnect()
return changesTransferred
def log_exception(self, e):
"Log exceptions appropriately"
etext = str(e)
if re.search("WSAETIMEDOUT", etext, re.MULTILINE) or re.search("WSAECONNREFUSED", etext, re.MULTILINE):
self.logger.error(etext)
else:
self.logger.exception(e)
def save_previous_target_change_counter(self):
"Save the latest change transferred to the target"
chg = self.target.p4cmd('changes', '-m1', '-ssubmitted', '//{client}/...'.format(client=self.target.P4CLIENT))
if chg:
self.previous_target_change_counter = int(chg[0]['change']) + 1
def send_summary_email(self, time_last_summary_sent, change_last_summary_sent):
"Send an email summarising changes transferred"
time_str = p4time(time_last_summary_sent)
self.target.connect('target replicate')
# Combine changes reported by time or since last changelist transferred
changes = self.target.p4cmd('changes', '-l', '//{client}/...@{rev},#head'.format(
client=self.target.P4CLIENT, rev=time_str))
chgnums = [chg['change'] for chg in changes]
counter_changes = self.target.p4cmd('changes', '-l', '//{client}/...@{rev},#head'.format(
client=self.target.P4CLIENT, rev=change_last_summary_sent))
for chg in counter_changes:
if chg['change'] not in chgnums:
changes.append(chg)
changes.reverse()
lines = []
lines.append(["Date", "Time", "Changelist", "File Revisions", "Size (bytes)", "Size"])
total_changes = 0
total_rev_count = 0
total_file_sizes = 0
for chg in changes:
sizes = self.target.p4cmd('sizes', '-s', '//%s/...@%s,%s' % (self.target.P4CLIENT,
chg['change'], chg['change']))
lines.append([time.strftime("%Y/%m/%d", time.localtime(int(chg['time']))),
time.strftime("%H:%M:%S", time.localtime(int(chg['time']))),
chg['change'], sizes[0]['fileCount'], sizes[0]['fileSize'],
fmtsize(int(sizes[0]['fileSize']))])
total_changes += 1
total_rev_count += int(sizes[0]['fileCount'])
total_file_sizes += int(sizes[0]['fileSize'])
lines.append([])
lines.append(['Totals', '', str(total_changes), str(total_rev_count), str(total_file_sizes), fmtsize(total_file_sizes)])
report = "Changes transferred since %s\n%s" % (
time_str, "\n".join(["\t".join(line) for line in lines]))
self.logger.debug("Transfer summary report:\n%s" % report)
self.logger.info("Sending Transfer summary report")
self.logger.notify("Transfer summary report", report, include_output=False)
self.save_previous_target_change_counter()
self.target.disconnect()
def validateConfig(self):
"Performs appropriate validation of config values - primarily streams"
pass
def setupReplicate(self):
"Read config file and setup - raises exceptions if invalid"
self.readConfig()
self.target.connect('target replicate')
self.validateConfig()
self.logger.debug("connected to source and target")
self.target.createClientWorkspace()
def writeLogHeader(self):
"Write header info to log"
logOnce(self.logger, VERSION)
logOnce(self.logger, "Python ver: 0x%08x, OS: %s" % (sys.hexversion, sys.platform))
logOnce(self.logger, "P4Python ver: %s" % (P4.P4.identify()))
logOnce(self.logger, "Options: ", self.options)
logOnce(self.logger, "Reading config file")
def rotateLogFile(self):
"Rotate existing log file"
self.logger.info("Rotating logfile")
logutils.resetLogger(LOGGER_NAME)
global alreadyLogged
alreadyLogged = {}
self.writeLogHeader()
def checkRotateLogFile(self):
"Rotate log file if greater than limit"
try:
fname = logutils.getCurrentLogFileName(LOGGER_NAME)
fsize = os.path.getsize(fname)
if fsize > self.options.max_logfile_size:
self.logger.info("Rotating logfile since greater than max_logfile_size: %d" % fsize)
self.rotateLogFile()
except Exception as e:
self.log_exception(e)
def endDatetimeExceeded(self):
"""Determine if we should stop due to this being set"""
if not self.options.end_datetime:
return False
present = datetime.now()
return present > self.options.end_datetime
def replicate(self):
"""Central method that performs the replication between server1 and server2"""
if self.options.sample_config:
return 0
try:
self.writeLogHeader()
self.setupReplicate()
except Exception as e:
self.log_exception(e)
logging.shutdown()
return 1
self.options.config = os.path.realpath(self.options.config)
time_last_summary_sent = time.time()
change_last_summary_sent = 0
self.logger.debug("Time last summary sent: %s" % p4time(time_last_summary_sent))
time_last_error_occurred = 0
error_encountered = False # Flag to indicate error encountered which may require reporting
error_notified = False
finished = False
num_changes = 0
while not finished:
try:
self.readConfig() # Read every time to allow user to change them
self.logger.setReportingOptions(
instance_name=self.options.instance_name,
mail_form_url=self.options.mail_form_url, mail_to=self.options.mail_to,
mail_from=self.options.mail_from, mail_server=self.options.mail_server,
report_interval=self.options.report_interval)
logOnce(self.logger, self.source.options)
logOnce(self.logger, self.target.options)
self.source.disconnect()
self.target.disconnect()
num_changes = self.replicate_commits()
if self.options.notransfer:
finished = True
if num_changes > 0:
self.logger.info("Transferred %d changes successfully" % num_changes)
if change_last_summary_sent == 0:
change_last_summary_sent = self.previous_target_change_counter
if self.options.change_batch_size and num_changes >= self.options.change_batch_size:
self.logger.info("Finished processing batch of %d changes" % self.options.change_batch_size)
self.rotateLogFile()
elif not self.options.repeat:
finished = True
else:
if self.endDatetimeExceeded():
finished = True
self.logger.info("Stopping due to --end-datetime parameter being exceeded")
if error_encountered:
self.logger.info("Logging - reset error interval")
self.logger.notify("Cleared error", "Previous error has now been cleared")
error_encountered = False
error_notified = False
if time.time() - time_last_summary_sent > self.options.summary_report_interval * 60:
time_last_summary_sent = time.time()
self.send_summary_email(time_last_summary_sent, change_last_summary_sent)
time.sleep(self.options.poll_interval * 60)
self.logger.info("Sleeping for %d minutes" % self.options.poll_interval)
except P4TException as e:
self.log_exception(e)
self.logger.notify("Error", "Logic Exception encountered - stopping")
logging.shutdown()
return 1
except Exception as e:
self.log_exception(e)
if self.options.stoponerror:
self.logger.notify("Error", "Exception encountered and --stoponerror specified")
logging.shutdown()
return 1
else:
# Decide whether to report an error
if not error_encountered:
error_encountered = True
time_last_error_occurred = time.time()
elif not error_notified:
if time.time() - time_last_error_occurred > self.options.error_report_interval * 60:
error_notified = True
self.logger.info("Logging - Notifying recurring error")
self.logger.notify("Recurring error", "Multiple errors seen")
self.logger.info("Sleeping on error for %d minutes" % self.options.sleep_on_error_interval)
time.sleep(self.options.sleep_on_error_interval * 60)
self.logger.notify("Changes transferred", "Completed successfully")
logging.shutdown()
return 0
if __name__ == '__main__':
result = 0
try:
prog = GitP4Transfer(*sys.argv[1:])
result = prog.replicate()
except Exception as e:
print(str(e))
result = 1
sys.exit(result)
|
# -*- coding: utf-8 -*-
'''The file contains functions for working with the database'''
import base64
import time
from os.path import isfile
import sqlalchemy as sa
from aiohttp_session.cookie_storage import EncryptedCookieStorage
from envparse import env
# Reading settings file
if isfile('.env'):
env.read_envfile('.env')
# Database connection parameters obtained from .env
def get_dsn():
'''DB connection string
:return:
'''
return f"dbname={env.str('PG_DATABASE')} user={env.str('PG_USERNAME')} " \
f"password={env.str('PG_PASSWORD')} host={env.str('PG_SERVER')}"
def get_sekret_key():
'''SECRET_KEY for the session
:return:
'''
return EncryptedCookieStorage(base64.urlsafe_b64decode(env.str('SECRET_KEY')))
def get_timestamp_str():
'''TimeStamp string of the current timestamp for the base
:return:
'''
return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
metadata = sa.MetaData()
# Table user
tb_user = sa.Table(
'user',
metadata,
sa.Column('id', sa.Integer, primary_key=True, autoincrement=True),
sa.Column('email', sa.String(255)),
sa.Column('password', sa.String(255)),
sa.Column('name', sa.String(255)),
sa.Column('surname', sa.String(255)),
sa.Column('create_at', sa.TIMESTAMP),
sa.Column('delete_at', sa.TIMESTAMP))
# Table user_rule
tb_user_rule = sa.Table(
'user_rule',
metadata,
sa.Column('id', sa.Integer, primary_key=True, autoincrement=True),
sa.Column('rule', None, sa.ForeignKey('tb_rule.id')),
sa.Column('user', None, sa.ForeignKey('tb_user.id')))
# Table rule
tb_rule = sa.Table(
'rule',
metadata,
sa.Column('id', sa.Integer, primary_key=True, autoincrement=True),
sa.Column('rule', sa.String(255)),
sa.Column('comment', sa.String(255)))
async def get_user_by_email(engine, email):
'''User existence check
:param engine: DB connection
:param email: user email
:return: a list of users
'''
async with engine.acquire() as conn:
async for row in conn.execute(tb_user.select().where(tb_user.c.email == email)):
return {
'id': row[0],
'email': row[1],
'password': row[2],
'name': row[3],
'surname': row[4],
'create_at': row[5],
'delete_at': row[6]
}
async def get_user_rules(engine, user_id):
'''Obtaining user rights by id
:param engine: DB connection
:param user_id: user id
:return: user rights list
'''
async with engine.acquire() as conn:
rules = []
join = sa.join(tb_rule, tb_user_rule, tb_rule.c.id == tb_user_rule.c.rule)
async for row in conn.execute(
tb_rule.select().select_from(join).where(tb_user_rule.c.user == user_id)):
rules.append(row[1])
return rules
async def get_user_info(engine, user_id):
'''Getting user data by id
:param engine: DB connection
:param user_id: user id
:return: user information
'''
async with engine.acquire() as conn:
async for row in conn.execute(tb_user.select().where(tb_user.c.id == user_id)):
return {
'id': row[0],
'email': row[1],
'password': row[2],
'name': row[3],
'surname': row[4],
'rules': await get_user_rules(engine=engine, user_id=user_id)
}
async def get_users(engine, admin):
'''Retrieving user data
:param engine: DB connection
:param admin: Request data for admin user
:return: a list of users
'''
async with engine.acquire() as conn:
users = []
where = '' if admin else 'WHERE u.delete_at is null'
async for row in await conn.execute(
f'''SELECT u.id, u.email, u.password, u.name, u.surname, u.delete_at,
ARRAY(
SELECT r.rule
FROM "user_rule" as ur
LEFT JOIN "rule" as r on ur.rule = r.id
WHERE ur.user = u.id
) as "rules"
FROM "user" as u
{where}
ORDER BY u.id;'''):
# If the data is requested not by the Admin, then we do not show the admins
if not admin and 'admin' in row[6]:
continue
users.append({
'id': row[0],
'email': row[1],
'password': row[2],
'name': row[3],
'surname': row[4],
'delete': row[5] is not None,
'rules': row[6]
})
return users
async def get_rules(engine):
'''Obtaining rights data
:param engine: DB connection
:return: list of rights
'''
async with engine.acquire() as conn:
rules = {}
async for row in conn.execute(tb_rule.select()):
# {'admin': 0}
rules[row[1]] = row[0]
return rules
async def set_rules_for_user(engine, user_id, data):
'''Setting / changing user rights
:param engine: DB connection
:param user_id: user id
:param data: data for setting
:return:
'''
rules = await get_rules(engine)
user_rules = await get_user_rules(engine, user_id)
for rule, rule_id in rules.items():
# The user already has the current role and from the form flew to True
# if rule in user_rules and data.get(rule, False) is True:
# The user does not have the current role and from the form flew to False
# if rule not in user_rules and data.get(rule, False) is False:
# The user has a role, but False has arrived from the form - delete
if rule in user_rules and data.get(rule, False) is False:
async with engine.acquire() as conn:
await conn.execute(
tb_user_rule.delete(None)
.where(tb_user_rule.c.user == user_id)
.where(tb_user_rule.c.rule == rule_id))
# The user does not have roles, but True has arrived from the form - add
if rule not in user_rules and data.get(rule, False) is True:
async with engine.acquire() as conn:
await conn.execute(tb_user_rule.insert(None).values(user=user_id, rule=rule_id))
async def set_delete_at_for_user(engine, user_id, restore=False):
'''Delete user by id
:param engine: DB connection
:param user_id: id of the user to be deleted
:return:
'''
timestamp = 'null' if restore else f"'{get_timestamp_str()}'"
async with engine.acquire() as conn:
await conn.execute(f'''UPDATE "user" SET delete_at={timestamp} WHERE id={user_id};''')
async def create_user(engine, data):
'''User creation
:param engine: DB connection
:param data: new user data
:return:
'''
async with engine.acquire() as conn:
user = await get_user_by_email(engine=engine, email=data['email'])
if user is not None:
raise Warning('A user with this email already exists.')
user_id = await conn.scalar(
tb_user.insert(None).values(
email=data['email'],
password=data['password'],
name=data['name'],
surname=data['surname'],
create_at=get_timestamp_str()))
await set_rules_for_user(engine=engine, user_id=user_id, data=data)
async def update_user(engine, data):
'''User data update
:param engine: DB connection
:param data: user data to update
:return:
'''
async with engine.acquire() as conn:
# Check that the email matches the current one, or that it is unique in the database
user = await get_user_by_email(engine=engine, email=data['email'])
if user is not None and int(user['id']) != int(data['id']):
raise Warning('A user with this email already exists')
await conn.execute(
sa.update(tb_user)
.values({
'email': data['email'],
'password': data['password'],
'name': data['name'],
'surname': data['surname']
})
.where(tb_user.c.id == int(data['id'])))
await set_rules_for_user(engine=engine, user_id=int(data['id']), data=data)
|
import copy
from collections import OrderedDict, namedtuple
from typing import Dict, List
from influxdb.resultset import ResultSet
from influxpy.client import client_wrapper
from influxpy.compiler import InfluxCompiler
from influxpy.aggregates import BaseAggregate
from influxpy.fields import BaseDuration
from influxpy.query import InfluxQuery
class InfluxSeries(namedtuple('InfluxResult', ['points', 'tags'])):
def __eq__(self, other: 'InfluxSeries'):
return self.points == other.points and self.tags == other.tags
class InfluxQuerySet(object):
"""Represent a lazy database lookup."""
def __init__(self):
self.query = InfluxQuery()
self.compiler = None # type: InfluxCompiler
self.model = None
def _fetch_results(self): # -> List[InfluxResult]:
influx_query = self.compiler.compile(self.query)
result_set = client_wrapper.query(influx_query) # type: ResultSet
results = []
series = result_set.raw.get('series', [])
for serie in series:
columns = serie['columns']
tags = serie.get('tags')
points = []
for value in serie['values']:
point = {}
for col_index, col in enumerate(columns):
point[col] = value[col_index]
points.append(point)
yield InfluxSeries(points=points, tags=tags)
def __iter__(self):
return iter(self._fetch_results())
def iql_query(self) -> str:
"""Returns the Influx query"""
return self.compiler.compile(self.query)
def using(self, database_alias: str) -> 'InfluxQuerySet':
clone = self.clone()
clone.query.database = database_alias
return clone
def filter(self, **kwargs) -> 'InfluxQuerySet':
"""
Return a new QuerySet instance with the args ANDed to the existing
set.
"""
clone = self.clone()
ordered_kwargs = OrderedDict(sorted(kwargs.items(), key=lambda t: t[0]))
clone.query.filters.update(ordered_kwargs)
return clone
def all(self) -> 'InfluxQuerySet':
"""
Returns an unfiltered
:return:
"""
clone = self.clone()
return clone
def group_by(self, *args: str) -> 'InfluxQuerySet':
"""
Returns a query set with additional group bys.
:param args:
:return:
"""
clone = self.clone()
clone.query.group_by = clone.query.group_by + list(args)
return clone
def into(self, destination) -> 'InfluxQuerySet':
"""
:param destination:
:return:
"""
clone = self.clone()
clone.query.destination = destination
return clone
def resolution(self, resolution: BaseDuration) -> 'InfluxQuerySet':
"""
Returns a query set with time group by set to resolution.
:param resolution:
:return:
"""
clone = self.clone()
clone.query.resolution = resolution
return clone
def fill(self, value) -> 'InfluxQuerySet':
"""
Return a query set where time frames with no data will be filled with value.
:param value:
"""
clone = self.clone()
clone.query.fill = value
return clone
def annotate(self, *args: BaseAggregate, **kwargs: BaseAggregate) -> 'InfluxQuerySet':
"""
Return a query set in which the returned objects have been annotated
with extra data or aggregations.
"""
clone = self.clone()
for annotation in args:
clone.query.annotations.append(annotation)
for name, annotation in kwargs.items():
annotation.name = name
clone.query.annotations.append(annotation)
return clone
def use_downsampled(self) -> 'InfluxQuerySet':
"""
Returns a query set but can use downsampled tables
:return:
"""
clone = self.clone()
clone.query.can_use_aggregated_measurement = True
return clone
def clone(self) -> 'InfluxQuerySet':
clone = InfluxQuerySet()
clone.query = copy.deepcopy(self.query)
clone.model = self.model
clone.compiler = self.compiler
return clone
def contribute_to_class(self, model, name):
self.model = model
self.compiler = InfluxCompiler(model)
setattr(model, name, self)
|
"""
Entradas
P -> int -> p
Q -> int -> q
"""
p , q = map ( int , input ( "Digite 2 valores:" ). split ())
si (p ** 3 + q ** 4 - 2 * p ** 2) > 680 :
print ( "Los valores" + str ( p ), "y" , str ( q ), "satisfacen la expresion" )
otra cosa :
print ( "Los valores" + str ( p ), "y" , str ( q ), "no satisfacen la expresion" )
|
N=int(input())
V=list(map(int,input().split()))
print(V.count(max(V)))
|
#二分法による非線型方程式の解法プログラム
import numpy as np #数値計算用モジュール
import matplotlib.pyplot as plt #データ可視化用モジュール
#解きたい方程式
def func_f(x):
return x**2.0 -2.0
#二分法(方程式の関数項、探索区間の左端、探索区間の右端、誤差範囲、最大反復回数)
def bisection(func_f, x_min, x_max, error=1e-10, max_loop=100):
#初期値を表示
num_calc = 0 #計算回数
print("{:3d}: {:.15f} <= x <= {:.15f}".format(num_calc, x_min, x_max))
#中間値の定理の条件を満たすか調べる
if(0.0 < func_f(x_min)*func_f(x_max)):
print("error: Section definition is invalid (0.0 < func_f(x_min)*func_f(x_max)).")
quit()
#ずっと繰り返す
while(True):
#新たな中間値の計算
x_mid = (x_max +x_min)/2.0
#探索区間を更新
if (0.0 < func_f(x_mid)*func_f(x_max)): #中間と右端の値が同じの時
x_max = x_mid #右端を更新
else: #中間と左端の値が同じの時
x_min = x_mid #左端を更新
#結果を表示
num_calc += 1 #計算回数を数える
print("{:3d}: {:.15f} <= x <= {:.15f}".format(num_calc, x_min, x_max))
#「誤差範囲が一定値以下」または「計算回数が一定値以上」ならば終了
if((x_max-x_min <= error) or max_loop <= num_calc):
break
#最終的に得られた解
print("x = {:.15f}".format(x_mid))
return x_mid
#可視化(方程式の関数項、グラフ左端、グラフ右端、方程式の解)
def visualization(func_f, x_min, x_max, x_solved):
plt.xlabel("$x$") #x軸の名前
plt.ylabel("$f(x)$") #y軸の名前
plt.grid() #点線の目盛りを表示
plt.axhline(0, color='#000000') #f(x)=0の線
#関数
exact_x = np.arange(x_min,x_max, (x_max-x_min)/500.0)
exact_y = func_f(exact_x)
plt.plot(exact_x,exact_y, label="$f(x)$", color='#ff0000') #関数を折線グラフで表示
plt.scatter(x_solved,0.0) #数値解を点グラフで表示
plt.text(x_solved,0.0, "$x$ = {:.9f}".format(x_solved), va='bottom', color='#0000ff')
plt.show() #グラフを表示
#メイン実行部
if (__name__ == '__main__'):
#二分法で非線型方程式の解を計算
solution = bisection(func_f, -2.0, -1.0)
#結果を可視化
visualization(func_f, solution-1.0, solution+1.0, solution)
|
"""
Test lldb data formatter subsystem.
"""
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class StdListDataFormatterTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line numbers to break at for the different tests.
self.line = line_number('main.cpp', '// Set break point at this line.')
self.optional_line = line_number(
'main.cpp', '// Optional break point at this line.')
self.final_line = line_number(
'main.cpp', '// Set final break point at this line.')
@add_test_categories(["libstdcxx"])
@expectedFailureAll(bugnumber="llvm.org/pr50861", compiler="gcc")
def test_with_run_command(self):
"""Test that that file and class static variables display correctly."""
self.build()
self.runCmd("file " + self.getBuildArtifact("a.out"), CURRENT_EXECUTABLE_SET)
lldbutil.run_break_set_by_file_and_line(
self, "main.cpp", self.line, num_expected_locations=-1)
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
# This is the function to remove the custom formats in order to have a
# clean slate for the next test case.
def cleanup():
self.runCmd('type format clear', check=False)
self.runCmd('type summary clear', check=False)
self.runCmd('type filter clear', check=False)
self.runCmd('type synth clear', check=False)
self.runCmd(
"settings set target.max-children-count 256",
check=False)
# Execute the cleanup function during test case tear down.
self.addTearDownHook(cleanup)
self.runCmd("frame variable numbers_list --show-types")
self.runCmd("type format add -f hex int")
self.expect("frame variable numbers_list --raw", matching=False,
substrs=['size=0',
'{}'])
self.expect(
"frame variable &numbers_list._M_impl._M_node --raw",
matching=False,
substrs=[
'size=0',
'{}'])
self.expect("frame variable numbers_list",
substrs=['size=0',
'{}'])
self.expect("p numbers_list",
substrs=['size=0',
'{}'])
self.runCmd("n")
self.expect("frame variable numbers_list",
substrs=['size=1',
'[0] = ',
'0x12345678'])
self.runCmd("n")
self.runCmd("n")
self.runCmd("n")
self.expect("frame variable numbers_list",
substrs=['size=4',
'[0] = ',
'0x12345678',
'[1] =',
'0x11223344',
'[2] =',
'0xbeeffeed',
'[3] =',
'0x00abba00'])
self.runCmd("n")
self.runCmd("n")
self.expect("frame variable numbers_list",
substrs=['size=6',
'[0] = ',
'0x12345678',
'0x11223344',
'0xbeeffeed',
'0x00abba00',
'[4] =',
'0x0abcdef0',
'[5] =',
'0x0cab0cab'])
self.expect("p numbers_list",
substrs=['size=6',
'[0] = ',
'0x12345678',
'0x11223344',
'0xbeeffeed',
'0x00abba00',
'[4] =',
'0x0abcdef0',
'[5] =',
'0x0cab0cab'])
# check access-by-index
self.expect("frame variable numbers_list[0]",
substrs=['0x12345678'])
self.expect("frame variable numbers_list[1]",
substrs=['0x11223344'])
# but check that expression does not rely on us
self.expect("expression numbers_list[0]", matching=False, error=True,
substrs=['0x12345678'])
# check that MightHaveChildren() gets it right
self.assertTrue(
self.frame().FindVariable("numbers_list").MightHaveChildren(),
"numbers_list.MightHaveChildren() says False for non empty!")
self.runCmd("n")
self.expect("frame variable numbers_list",
substrs=['size=0',
'{}'])
self.runCmd("n")
self.runCmd("n")
self.runCmd("n")
self.runCmd("n")
self.expect("frame variable numbers_list",
substrs=['size=4',
'[0] = ', '1',
'[1] = ', '2',
'[2] = ', '3',
'[3] = ', '4'])
self.runCmd("type format delete int")
self.runCmd("n")
self.expect("frame variable text_list",
substrs=['size=0',
'{}'])
lldbutil.run_break_set_by_file_and_line(
self, "main.cpp", self.final_line, num_expected_locations=-1)
self.runCmd("c", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
self.expect("frame variable text_list",
substrs=['size=4',
'[0]', 'goofy',
'[1]', 'is',
'[2]', 'smart',
'[3]', '!!!'])
self.expect("p text_list",
substrs=['size=4',
'\"goofy\"',
'\"is\"',
'\"smart\"',
'\"!!!\"'])
# check access-by-index
self.expect("frame variable text_list[0]",
substrs=['goofy'])
self.expect("frame variable text_list[3]",
substrs=['!!!'])
# but check that expression does not rely on us
self.expect("expression text_list[0]", matching=False, error=True,
substrs=['goofy'])
# check that MightHaveChildren() gets it right
self.assertTrue(
self.frame().FindVariable("text_list").MightHaveChildren(),
"text_list.MightHaveChildren() says False for non empty!")
|
"""
WSGI config for kimchi project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "kimchi.settings")
application = get_wsgi_application()
|
from PIL import Image
def get_image_dimension(path) -> (int, int):
image = Image.open(path)
return image.size
def get_ratio_max_size(iwidth, iheight, swidth, sheight) -> (int, int):
KOEF = 1
new_iw = iwidth
new_ih = iheight
swidth_new = swidth * KOEF
sheight_new = sheight * KOEF
height_k = iheight/sheight_new
width_k = iwidth/swidth_new
if max(height_k, width_k) > 1:
if height_k > width_k:
new_iw /= height_k
new_ih /= height_k
else:
new_iw /= width_k
new_ih /= width_k
print('Appropriate size:', new_iw, new_ih)
return new_iw, new_ih
def set_widget_stylesheet(widget, styles: str = ""):
"""Hack that should be eliminated further"""
widget.setStyleSheet(f"border-image: url() 0 0 0 0 stretch stretch; " + styles)
class DisplayCoords:
THRESHOLD = 0.1
@staticmethod
def center(widget_w, widget_h, wind_w, wind_h):
return round(wind_w/2-widget_w/2), round(wind_h/2-widget_h/2), widget_w, widget_h
@classmethod
def leftup(cls, widget_w, widget_h, wind_w, wind_h):
return (wind_w - widget_w) * cls.THRESHOLD, (wind_h - widget_h) * cls.THRESHOLD, widget_w, widget_h
@classmethod
def leftdown(cls, widget_w, widget_h, wind_w, wind_h):
return (wind_w - widget_w) * cls.THRESHOLD, (wind_h - widget_h) * (1 - cls.THRESHOLD), widget_w, widget_h
@classmethod
def rightup(cls, widget_w, widget_h, wind_w, wind_h):
return (wind_w - widget_w) * (1 - cls.THRESHOLD), (wind_h - widget_h) * cls.THRESHOLD, widget_w, widget_h
@classmethod
def rightdown(cls, widget_w, widget_h, wind_w, wind_h):
return (wind_w - widget_w) * (1 - cls.THRESHOLD), (wind_h - widget_h) * (1 - cls.THRESHOLD), widget_w, widget_h
|
import os
import numpy as np
import torch
from typing import Optional, Type
from ..config.base_config import BaseConfig
from ..dataset.base_dataset import KoiDataset
from ..model.base_model import GenerativeModel
import random
from torch.utils.tensorboard import SummaryWriter
class Trainer:
"""
Abstract base class for training any generative model implemented by koi.
For now, we implement validation as holdout set (i.e. a single fold in cross-validation)
"""
def __init__(self, model: Type[GenerativeModel], train: KoiDataset, val: Optional[KoiDataset],
test: Optional[KoiDataset],
config: BaseConfig):
self.config = config
if torch.cuda.is_available() and not self.config.torch_device == 'cpu':
torch.cuda.manual_seed(config.seed)
self.device = torch.device("cuda")
else:
self.device = torch.device("cpu")
# better seed even if cuda is active
torch.manual_seed(config.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.cuda.manual_seed_all(config.seed)
np.random.seed(config.seed)
random.seed(config.seed)
# better specify here, so you don't need to give it as parameter.
train.split = 'train'
val.split = 'val'
test.split = 'test'
self.train = train
self.val = val
self.test = test
self.model = model(config).to(self.device)
print("Current device: ", self.device)
print(config.__dict__)
print(model.__dict__)
self.writer = dict()
import time
t = str(int(time.time()))
self.writer['train'] = SummaryWriter(os.path.join(self.config.logs_folder, "train/{0}".format(t)))
self.writer['val'] = SummaryWriter(os.path.join(self.config.logs_folder, "val/{0}".format(t)))
self.writer['test'] = SummaryWriter(os.path.join(self.config.logs_folder, "test/{0}".format(t)))
def _log(self, tag, epoch, **kwargs):
for k, v in kwargs.items():
self.writer[tag].add_scalar(k, v, epoch)
def run_training(self, **kwargs):
r"""Training procedure.
This method is meant to execute all the training phase. Once the method ends, the
model should be ready to be used to perform approximate density estimation or sampling.
"""
self._run_training(**kwargs)
for k, v in self.writer.items():
v.flush()
v.close()
def _run_training(self, **kwargs):
r"""Training procedure.
This method is meant to execute all the training phase. Once the method ends, the
model should be ready to be used to perform approximate density estimation or sampling.
"""
raise NotImplementedError()
def reset_config(self):
pass
def init_data(self):
self.train.create_data_loaders()
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019, 2020, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
QasmSimulator Integration Tests
"""
# pylint: disable=no-member
import copy
from ddt import ddt
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from qiskit.circuit.library import QuantumVolume, QFT
from qiskit.compiler import transpile
from test.terra.backends.simulator_test_case import (
SimulatorTestCase, supported_methods)
@ddt
class TestChunkSimulators(SimulatorTestCase):
"""QasmSimulator Multi-chunk tests."""
OPTIONS = {
"seed_simulator": 271828,
"max_parallel_threads": 1
}
@supported_methods(['statevector', 'density_matrix'])
def test_chunk_QuantumVolume(self, method, device):
"""Test multi-chunk with quantum volume"""
opts = {
"blocking_enable": True,
"blocking_qubits": 2
}
backend = self.backend(method=method, device=device, **opts)
backend_no_chunk = self.backend(method=method, device=device)
shots = 100
num_qubits = 4
depth = 10
circuit = transpile(QuantumVolume(num_qubits, depth, seed=0),
backend=backend,
optimization_level=0)
circuit.measure_all()
result = backend.run(circuit, shots=shots, memory=True).result()
counts = result.get_counts(circuit)
result_no_chunk = backend_no_chunk.run(circuit, shots=shots, memory=True).result()
counts_no_chunk = result_no_chunk.get_counts(circuit)
self.assertEqual(counts_no_chunk, counts)
@supported_methods(['statevector', 'density_matrix'])
def test_chunk_QuantumVolumeWithFusion(self, method, device):
"""Test multi-chunk with fused quantum volume"""
opts_no_chunk = {
"fusion_enable": True,
"fusion_threshold": 5,
}
opts_chunk = copy.copy(opts_no_chunk)
opts_chunk["blocking_enable"] = True
opts_chunk["blocking_qubits"] = 4
backend = self.backend(
method=method, device=device, **opts_chunk)
backend_no_chunk = self.backend(
method=method, device=device, **opts_no_chunk)
shots = 100
num_qubits = 8
depth = 10
circuit = transpile(QuantumVolume(num_qubits, depth, seed=0),
backend=backend, optimization_level=0)
circuit.measure_all()
result = backend.run(circuit, shots=shots, memory=True).result()
counts = result.get_counts(circuit)
result_no_chunk = backend_no_chunk.run(circuit, shots=shots, memory=True).result()
counts_no_chunk = result_no_chunk.get_counts(circuit)
self.assertEqual(counts_no_chunk, counts)
@supported_methods(['statevector', 'density_matrix'])
def test_chunk_QFTWithFusion(self, method, device):
"""Test multi-chunk with fused QFT (testing multi-chunk diagonal matrix)"""
opts_no_chunk = {
"fusion_enable": True,
"fusion_threshold": 5,
}
opts_chunk = copy.copy(opts_no_chunk)
opts_chunk["blocking_enable"] = True
opts_chunk["blocking_qubits"] = 4
backend = self.backend(
method=method, device=device, **opts_chunk)
backend_no_chunk = self.backend(
method=method, device=device, **opts_no_chunk)
shots = 100
num_qubits = 8
circuit = transpile(QFT(num_qubits), backend=backend,
optimization_level=0)
circuit.measure_all()
result = backend.run(circuit, shots=shots, memory=True).result()
counts = result.get_counts(circuit)
result_no_chunk = backend_no_chunk.run(circuit, shots=shots, memory=True).result()
counts_no_chunk = result_no_chunk.get_counts(circuit)
self.assertEqual(counts_no_chunk, counts)
@supported_methods(['statevector', 'density_matrix'])
def test_chunk_pauli(self, method, device):
"""Test multi-chunk pauli gate"""
opts_no_chunk = {"fusion_enable": False}
opts_chunk = copy.copy(opts_no_chunk)
opts_chunk["blocking_enable"] = True
opts_chunk["blocking_qubits"] = 3
backend = self.backend(
method=method, device=device, **opts_chunk)
backend_no_chunk = self.backend(
method=method, device=device, **opts_no_chunk)
shots = 100
qr = QuantumRegister(5)
cr = ClassicalRegister(5)
regs = (qr, cr)
circuit = QuantumCircuit(*regs)
circuit.h(qr[0])
circuit.h(qr[1])
circuit.h(qr[2])
circuit.h(qr[3])
circuit.h(qr[4])
circuit.pauli('YXZYX',qr)
circuit.measure_all()
result = backend.run(circuit, shots=shots, memory=True).result()
counts = result.get_counts(circuit)
result_no_chunk = backend_no_chunk.run(circuit, shots=shots, memory=True).result()
counts_no_chunk = result_no_chunk.get_counts(circuit)
self.assertEqual(counts_no_chunk, counts)
|
# Copyright (C) tkornuta, IBM Corporation 2019
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = "Tomasz Kornuta"
import torch
from ptp.components.component import Component
from ptp.data_types.data_definition import DataDefinition
class BOWEncoder(Component):
"""
Simple Bag-of-word type encoder that encodes the sentence (in the form of a list of encoded words) into a vector.
.. warning::
BoW transformation is inreversible, thus decode-related methods in fact return original inputs.
"""
def __init__(self, name, config):
"""
Initializes the bag-of-word encoded by creating dictionary mapping ALL words from training, validation and test sets into unique indices.
:param name: Component name (read from configuration file).
:type name: str
:param config: Dictionary of parameters (read from configuration ``.yaml`` file).
:type config: :py:class:`ptp.configuration.ConfigInterface`
"""
# Call constructors of parent classes.
Component.__init__(self, name, BOWEncoder, config)
# Default name mappings for all encoders.
self.key_inputs = self.stream_keys["inputs"]
self.key_outputs = self.stream_keys["outputs"]
# Retrieve bow size from global variables.
self.bow_size = self.globals["bow_size"]
def input_data_definitions(self):
"""
Function returns a dictionary with definitions of input data that are required by the component.
:return: dictionary containing input data definitions (each of type :py:class:`ptp.utils.DataDefinition`).
"""
return {
self.key_inputs: DataDefinition([-1, -1, self.bow_size], [list, list, torch.Tensor], "Batch of sentences, each represented as a list of vectors [BATCH_SIZE] x [SEQ_LENGTH] x [ITEM_SIZE] (agnostic to item size)")
}
def output_data_definitions(self):
"""
Function returns a dictionary with definitions of output data produced the component.
:return: dictionary containing output data definitions (each of type :py:class:`ptp.utils.DataDefinition`).
"""
return {
self.key_outputs: DataDefinition([-1, self.bow_size], [torch.Tensor], "Batch of sentences, each represented as a single vector [BATCH_SIZE x ITEM_SIZE] (agnostic to item size)")
}
def __call__(self, data_dict):
"""
Encodes batch, or, in fact, only one field of batch ("inputs").
Stores result in "outputs" field of data_dict.
:param data_dict: :py:class:`ptp.utils.DataDict` object containing (among others):
- "inputs": expected input containing list of (list of tokens) [BATCH SIZE] x [SEQ_LEN] x [ITEM_SIZE]
- "outputs": added output tensor with encoded words [BATCH_SIZE x ITEM_SIZE]
"""
# Get inputs to be encoded.
inputs = data_dict[self.key_inputs]
outputs_list = []
# Process samples 1 by one.
for sample in inputs:
# Encode sample
output = self.encode_sample(sample)
# Add to list plus unsqueeze inputs dimension(!)
outputs_list.append( output.unsqueeze(0) )
# Concatenate output tensors.
outputs = torch.cat(outputs_list, dim=0)
# Add result to the data dict.
data_dict.extend({self.key_outputs: outputs})
def encode_sample(self, list_of_tokens):
"""
Generates a bag-of-word vector of length `bow_size`.
:param list_of_tokens: List of tokens [SEQ_LENGTH] x [ITEM_SIZE]
:return: torch.LongTensor [ITEM_SIZE]
"""
# Create output.
output = list_of_tokens[0]
# "Adds" tokens.
for token in list_of_tokens[1:]:
output += token
return output
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Tuple
import libcst as cst
from libcst import parse_module
from libcst._batched_visitor import BatchableCSTVisitor
from libcst._visitors import CSTVisitor
from libcst.metadata import (
CodeRange,
MetadataWrapper,
PositionProvider,
WhitespaceInclusivePositionProvider,
)
from libcst.metadata.position_provider import (
PositionProvidingCodegenState,
WhitespaceInclusivePositionProvidingCodegenState,
)
from libcst.testing.utils import UnitTest
def position(
state: WhitespaceInclusivePositionProvidingCodegenState,
) -> Tuple[int, int]:
return state.line, state.column
class PositionProviderTest(UnitTest):
def test_visitor_provider(self) -> None:
"""
Sets 2 metadata entries for every node:
SimpleProvider -> 1
DependentProvider - > 2
"""
test = self
class DependentVisitor(CSTVisitor):
METADATA_DEPENDENCIES = (PositionProvider,)
def visit_Pass(self, node: cst.Pass) -> None:
test.assertEqual(
self.get_metadata(PositionProvider, node), CodeRange((1, 0), (1, 4))
)
wrapper = MetadataWrapper(parse_module("pass"))
wrapper.visit(DependentVisitor())
def test_equal_range(self) -> None:
test = self
expected_range = CodeRange((1, 4), (1, 6))
class EqualPositionVisitor(CSTVisitor):
METADATA_DEPENDENCIES = (PositionProvider,)
def visit_Equal(self, node: cst.Equal) -> None:
test.assertEqual(
self.get_metadata(PositionProvider, node), expected_range
)
def visit_NotEqual(self, node: cst.NotEqual) -> None:
test.assertEqual(
self.get_metadata(PositionProvider, node), expected_range
)
MetadataWrapper(parse_module("var == 1")).visit(EqualPositionVisitor())
MetadataWrapper(parse_module("var != 1")).visit(EqualPositionVisitor())
def test_batchable_provider(self) -> None:
test = self
class ABatchable(BatchableCSTVisitor):
METADATA_DEPENDENCIES = (PositionProvider,)
def visit_Pass(self, node: cst.Pass) -> None:
test.assertEqual(
self.get_metadata(PositionProvider, node), CodeRange((1, 0), (1, 4))
)
wrapper = MetadataWrapper(parse_module("pass"))
wrapper.visit_batched([ABatchable()])
class PositionProvidingCodegenStateTest(UnitTest):
def test_codegen_initial_position(self) -> None:
state = WhitespaceInclusivePositionProvidingCodegenState(
" " * 4, "\n", WhitespaceInclusivePositionProvider()
)
self.assertEqual(position(state), (1, 0))
def test_codegen_add_token(self) -> None:
state = WhitespaceInclusivePositionProvidingCodegenState(
" " * 4, "\n", WhitespaceInclusivePositionProvider()
)
state.add_token("1234")
self.assertEqual(position(state), (1, 4))
def test_codegen_add_tokens(self) -> None:
state = WhitespaceInclusivePositionProvidingCodegenState(
" " * 4, "\n", WhitespaceInclusivePositionProvider()
)
state.add_token("1234\n1234")
self.assertEqual(position(state), (2, 4))
def test_codegen_add_newline(self) -> None:
state = WhitespaceInclusivePositionProvidingCodegenState(
" " * 4, "\n", WhitespaceInclusivePositionProvider()
)
state.add_token("\n")
self.assertEqual(position(state), (2, 0))
def test_codegen_add_indent_tokens(self) -> None:
state = WhitespaceInclusivePositionProvidingCodegenState(
" " * 4, "\n", WhitespaceInclusivePositionProvider()
)
state.increase_indent(state.default_indent)
state.add_indent_tokens()
self.assertEqual(position(state), (1, 4))
def test_codegen_decrease_indent(self) -> None:
state = WhitespaceInclusivePositionProvidingCodegenState(
" " * 4, "\n", WhitespaceInclusivePositionProvider()
)
state.increase_indent(state.default_indent)
state.increase_indent(state.default_indent)
state.increase_indent(state.default_indent)
state.decrease_indent()
state.add_indent_tokens()
self.assertEqual(position(state), (1, 8))
def test_whitespace_inclusive_position(self) -> None:
# create a dummy node
node = cst.Pass()
# simulate codegen behavior for the dummy node
# generates the code " pass "
state = WhitespaceInclusivePositionProvidingCodegenState(
" " * 4, "\n", WhitespaceInclusivePositionProvider()
)
state.before_codegen(node)
state.add_token(" ")
with state.record_syntactic_position(node):
state.add_token("pass")
state.add_token(" ")
state.after_codegen(node)
# check whitespace is correctly recorded
self.assertEqual(state.provider._computed[node], CodeRange((1, 0), (1, 6)))
def test_position(self) -> None:
# create a dummy node
node = cst.Pass()
# simulate codegen behavior for the dummy node
# generates the code " pass "
state = PositionProvidingCodegenState(" " * 4, "\n", PositionProvider())
state.before_codegen(node)
state.add_token(" ")
with state.record_syntactic_position(node):
state.add_token("pass")
state.add_token(" ")
state.after_codegen(node)
# check syntactic position ignores whitespace
self.assertEqual(state.provider._computed[node], CodeRange((1, 1), (1, 5)))
|
# Copyright (c) 2016, 2018, 2020 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2016-2017 Łukasz Rogalski <rogalski.91@gmail.com>
# Copyright (c) 2017 Derek Gustafson <degustaf@gmail.com>
# Copyright (c) 2018 Ioana Tagirta <ioana.tagirta@gmail.com>
# Copyright (c) 2019 Hugo van Kemenade <hugovk@users.noreply.github.com>
# Copyright (c) 2020-2021 hippo91 <guillaume.peillex@gmail.com>
# Copyright (c) 2021 Pierre Sassoulas <pierre.sassoulas@gmail.com>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
import sys
import astroid
PY39 = sys.version_info >= (3, 9)
def _collections_transform():
return astroid.parse(
"""
class defaultdict(dict):
default_factory = None
def __missing__(self, key): pass
def __getitem__(self, key): return default_factory
"""
+ _deque_mock()
+ _ordered_dict_mock()
)
def _deque_mock():
base_deque_class = """
class deque(object):
maxlen = 0
def __init__(self, iterable=None, maxlen=None):
self.iterable = iterable or []
def append(self, x): pass
def appendleft(self, x): pass
def clear(self): pass
def count(self, x): return 0
def extend(self, iterable): pass
def extendleft(self, iterable): pass
def pop(self): return self.iterable[0]
def popleft(self): return self.iterable[0]
def remove(self, value): pass
def reverse(self): return reversed(self.iterable)
def rotate(self, n=1): return self
def __iter__(self): return self
def __reversed__(self): return self.iterable[::-1]
def __getitem__(self, index): return self.iterable[index]
def __setitem__(self, index, value): pass
def __delitem__(self, index): pass
def __bool__(self): return bool(self.iterable)
def __nonzero__(self): return bool(self.iterable)
def __contains__(self, o): return o in self.iterable
def __len__(self): return len(self.iterable)
def __copy__(self): return deque(self.iterable)
def copy(self): return deque(self.iterable)
def index(self, x, start=0, end=0): return 0
def insert(self, x, i): pass
def __add__(self, other): pass
def __iadd__(self, other): pass
def __mul__(self, other): pass
def __imul__(self, other): pass
def __rmul__(self, other): pass"""
if PY39:
base_deque_class += """
@classmethod
def __class_getitem__(self, item): return cls"""
return base_deque_class
def _ordered_dict_mock():
base_ordered_dict_class = """
class OrderedDict(dict):
def __reversed__(self): return self[::-1]
def move_to_end(self, key, last=False): pass"""
if PY39:
base_ordered_dict_class += """
@classmethod
def __class_getitem__(cls, item): return cls"""
return base_ordered_dict_class
astroid.register_module_extender(astroid.MANAGER, "collections", _collections_transform)
def _looks_like_subscriptable(node: astroid.nodes.ClassDef) -> bool:
"""
Returns True if the node corresponds to a ClassDef of the Collections.abc module that
supports subscripting
:param node: ClassDef node
"""
if node.qname().startswith("_collections") or node.qname().startswith(
"collections"
):
try:
node.getattr("__class_getitem__")
return True
except astroid.AttributeInferenceError:
pass
return False
CLASS_GET_ITEM_TEMPLATE = """
@classmethod
def __class_getitem__(cls, item):
return cls
"""
def easy_class_getitem_inference(node, context=None):
# Here __class_getitem__ exists but is quite a mess to infer thus
# put an easy inference tip
func_to_add = astroid.extract_node(CLASS_GET_ITEM_TEMPLATE)
node.locals["__class_getitem__"] = [func_to_add]
if PY39:
# Starting with Python39 some objects of the collection module are subscriptable
# thanks to the __class_getitem__ method but the way it is implemented in
# _collection_abc makes it difficult to infer. (We would have to handle AssignName inference in the
# getitem method of the ClassDef class) Instead we put here a mock of the __class_getitem__ method
astroid.MANAGER.register_transform(
astroid.nodes.ClassDef, easy_class_getitem_inference, _looks_like_subscriptable
)
|
from src.inc.lang_detection_utils import *
from nltk import pos_tag, ne_chunk
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.tree import Tree
from nltk import FreqDist
import itertools
class TopicSelector:
def __init__(self, text: str, min_freq: int = 3, lang: str = "auto", n_common_words: int = 20) -> None:
self.text = text
self.min_freq = min_freq
self.n_common_words = n_common_words
self.lang = detect_lang(self.text) if lang == 'auto' else lang
self.named_entities = None
self.common_words = None
self.keywords = None
def get_keywords(self) -> set:
return self.keywords if self.keywords else self._set_keywords()
def get_named_entities(self) -> list:
return self.named_entities if self.named_entities else self._set_named_entities()
def get_common_words(self) -> list:
return self.common_words if self.common_words else self._set_common_words()
def _set_keywords(self) -> set:
self.keywords = set(self.get_named_entities())
return self.keywords
def _get_stopwords(self) -> set:
return set(stopwords.words("english"))
def _filter_stopwords(self, text: str) -> list:
words = word_tokenize(text, language=self.lang)
stopwords = self._get_stopwords()
filtered_words = [
word for word in words if word.casefold() not in stopwords]
return filtered_words
def _lemmatize_words(self, text: str) -> list:
lemmatizer = WordNetLemmatizer()
filtered_words = self._filter_stopwords(text)
lemmatized_words = [lemmatizer.lemmatize(
word) for word in filtered_words]
return lemmatized_words
def _tag_words(self, text: str) -> list:
tagged_words = pos_tag(self._lemmatize_words(text))
return tagged_words
def _set_named_entities(self) -> list:
tagged_words = self._tag_words(self.text)
tree = ne_chunk(tagged_words, binary=True)
named_entities = []
current_chunk = []
for i in tree:
if type(i) == Tree:
current_chunk.append(
" ".join([token for token, pos in i.leaves()]))
if current_chunk:
named_entity = " ".join(current_chunk)
if named_entity not in named_entities:
named_entities.append(named_entity)
current_chunk = []
else:
continue
self.named_entities = named_entities
return self.named_entities
# No longer used in keyword extraction for summarization
# Will be used for definitions
def _set_common_words(self) -> list:
freq_dist = FreqDist(self._lemmatize_words(self.text))
self.common_words = [x[0] for x in freq_dist.most_common(
self.n_common_words) if x[1] >= self.min_freq and len(x[0]) > 1]
return self.common_words
|
###############################################################################
# Author: Jayden Lee
# Date: 27/06/19
# Purpose: Simple Data Dictionary.
###############################################################################
myDog = {"Name": "Oliver", "Age": 7, "Colour": "Mixed", "Disposition": "Cute"}
print(myDog["Name"] + ": " + str(myDog["Age"]))
|
# -*- coding: utf-8 -*-
"""
Instructor Demo: Dicts.
This script showcases basic operations of Python Dicts.
"""
# Initialize a dictionary containing top traders for each month in 2019
top_traders_2019 = {
"january" : "Karen",
"february" : "Harold",
"march" : "Sam"
}
print()
print(f"Dictionary: {top_traders_2019}")
print()
# Initialize a dictionary
trading_pnl = {
"title": "Trading Log",
"03-18-2019": -224,
"03-19-2019": 352,
"03-20-2019": 252,
"03-21-2019": 354,
"03-22-2019": -544,
"03-23-2019": -650,
"03-24-2019": 56,
"03-25-2019": 123,
"03-26-2019": -43,
"03-27-2019": 254,
"03-28-2019": 325,
"03-29-2019": -123,
"03-30-2019": 47,
"03-31-2019": 321,
"04-01-2019": 123,
"04-02-2019": 133,
"04-03-2019": -151,
"04-04-2019": 613,
"04-05-2019": 232,
"04-06-2019": -311
}
# Print out dictionary, initial print() to serve as spacing between command line input
print()
print(f"Dictionary: {trading_pnl}")
print()
# Print out specific value of a key
print(f"03-31-2019: {trading_pnl['03-31-2019']}")
print()
# Add a new key-value pair
trading_pnl["04-07-2019"] = 413
print(trading_pnl)
print()
# Modify a key value
trading_pnl["04-07-2019"] = 542
print(trading_pnl)
print()
# Delete a key-value pair
del trading_pnl["04-07-2019"]
print(trading_pnl)
print()
# Check if key exists
if "04-03-2019" in trading_pnl:
print("Yes, '04-03-2019' is one of the keys in the trading_pnl dictionary")
print()
# Print out dict keys via a for loop
for key in trading_pnl:
print(f"Key: {key}")
print()
# Print out dict values
for key in trading_pnl:
print(f"Value: {trading_pnl[key]}")
print()
# Print out dict key-value pairs
for key, value in trading_pnl.items():
print(f"Key: {key} Value: {value}")
print()
|
##
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
from unittest.mock import MagicMock
import pytz
class DummyDagRun:
def __init__(self) -> None:
self.start_date = pytz.utc.localize(datetime.utcnow())
self.conf = None
self.run_id = "run_id"
@staticmethod
def get_task_instances():
return []
def get_task_instance(self, _):
return self
from datetime import datetime
TASK_ID_SEPARATOR = '.'
class DummyDag:
def __init__(self, dag_id, task_id):
self.dag_id = dag_id
self.task_id = task_id
self.try_number = 0
self.is_subdag = False
self.execution_date = '2017-05-21T00:00:00'
self.dag_run_id = 'dag_run_id'
self.owner = ['owner1', 'owner2']
self.email = ['email1@test.com']
self.task = MagicMock(name='task', owner=self.owner, email=self.email)
self.context = {
'dag': self,
'task': self,
'ti': self,
'ts': datetime.now().timestamp(),
'dag_run': DummyDagRun(),
}
def get_dagrun(self):
return self.context['dag_run']
@staticmethod
def xcom_push(key, value):
return key, value
|
# -*- coding: utf-8 -*-
import sys
import os
import random
import numpy
import scipy
import ivenv
from geo import *
## how to use Suehiro's geo package
## tools/geo/geo.py
# constructors
# VECTOR()
# VECTOR(vec=[1,2,3])
# I = MATRIX()
# MATRIX(mat=[[1,2,3],[4,5,6],[7,8,9]])
# FRAME(mat=m,vec=v)
# def Rx(th):
# return MATRIX(angle=th, axis=XAXIS)
# def Ry(th):
# return MATRIX(angle=th, axis=YAXIS)
# def Rz(th):
# return MATRIX(angle=th, axis=ZAXIS)
def unzip(x):
return zip(*x)
XAXIS=VECTOR(1,0,0)
YAXIS=VECTOR(0,1,0)
ZAXIS=VECTOR(0,0,1)
# norms & samplings
def mat2quat(m):
tr = m[0][0] + m[1][1] + m[2][2] + 1.0
if (tr >= 1.0):
s = 0.5 / sqrt(tr)
w = 0.25 / s
x = (m[1][2] - m[2][1]) * s
y = (m[2][0] - m[0][2]) * s
z = (m[0][1] - m[1][0]) * s
return [w, x, y, z]
else:
if (m[1][1] > m[2][2]):
_max = m[1][1]
else:
_max = m[2][2]
if (_max < m[0][0]):
s = sqrt(m[0][0] - (m[1][1] + m[2][2]) + 1.0)
x = s * 0.5
s = 0.5 / s;
y = (m[0][1] + m[1][0]) * s
z = (m[2][0] + m[0][2]) * s
w = (m[1][2] + m[2][1]) * s
return [w, x, y, z]
elif (_max == m[1][1]):
s = sqrt(m[1][1] - (m[2][2] + m[0][0]) + 1.0)
y = s * 0.5
s = 0.5 / s;
x = (m[0][1] + m[1][0]) * s
z = (m[1][2] + m[2][1]) * s
w = (m[2][0] + m[0][2]) * s
return [w, x, y, z]
else:
s = sqrt(m[2][2] - (m[0][0] + m[1][1]) + 1.0)
z = s * 0.5
s = 0.5 / s;
x = (m[2][0] + m[0][2]) * s
y = (m[1][2] + m[2][1]) * s
w = (m[0][1] + m[1][0]) * s
return [w, x, y, z]
def quat2mat(q):
qw,qx,qy,qz = q
sx = qx * qx
sy = qy * qy
sz = qz * qz
cx = qy * qz
cy = qz * qx
cz = qx * qy
wx = qw * qx
wy = qw * qy
wz = qw * qz
return MATRIX(mat=[[1.0-2.0*(sy+sz), 2.0*(cz+wz), 2.0*(cy-wy)],
[2.0*(cz-wz),1.0-2.0*(sx+sz),2.0*(cx+wx)],
[2.0*(cy+wy),2.0*(cx-wx),1.0-2.0*(sx+sy)]])
def distquat(q1, q2):
l = q1[0]*q2[0] + q1[1]*q2[1] + q1[2]*q2[2] + q1[3]*q2[3]
return 1.0 - abs(l)
def distSO3(m1, m2):
return distquat(mat2quat(m1), mat2quat(m2))
def distRn(v1, v2):
return numpy.linalg.norm(v1-v2)
# scipy.minkowski_distance(v1, v2, 2)
def distSE3(frm1, frm2, wt=1.0, wr=1.8):
v1 = frm1.vec
m1 = frm1.mat
v2 = frm2.vec
m2 = frm2.mat
return wt * distRn(v1, v2) + wr * distSO3(m1, m2)
def weighted_L1dist(v1, v2, w): # weighted L1 distance
return reduce(lambda y,x: x[0]*abs(x[1]-x[2])+y, zip(w,v1,v2), 0.0)
# def normalize(v):
# return v / linalg.norm(v)
def sampleUniformEuler():
theta = 2*pi*random.random() - pi
phi = acos(1-2*random.random()) + pi/2
if random.random() < 0.5:
if phi < pi:
phi = phi + pi
else:
phi = phi - pi
ita = 2*pi*random.random() - pi
return [theta,phi,ita]
def sampleBoxR3(minx,maxx,miny,maxy,minz,maxz):
return [random.uniform(minx, maxx),
random.uniform(miny, maxy),
random.uniform(minz, maxz)]
def sampleSE3():
r = sampleUniformEuler()
p = sampleBoxR3(WSMINX,WSMAXX,WSMINY,WSMAXY,WSMINZ,WSMAXZ)
return FRAME(xyzabc=p+r)
def sampleSE3_with_z_constraint(R):
theta = 2*pi*random.random() - pi
m = MATRIX(angle=theta, axis=ZAXIS)
p = sampleBoxR3(WSMINX,WSMAXX,WSMINY,WSMAXY,WSMINZ,WSMAXZ)
return FRAME(mat=R*m, vec=p)
##
##
def parse_joints_flag(flag):
if flag == 'rarm':
use_waist = False
arm = 'right'
elif flag == 'torso_rarm':
use_waist = True
arm = 'right'
elif flag == 'larm':
use_waist = False
arm = 'left'
elif flag == 'torso_larm':
use_waist = True
arm = 'left'
else:
warn('joints %s is not supported'%flag)
return None
return arm, use_waist
##
## For fast nearest neighbor search
## not yet used
##
class DynamicKdTrees:
def __init__(self):
self.cache = []
self.trees = []
def query(self, point):
mind = 1000000.0 # using squared distance is faster
for p in self.cache:
d = minkowski_distance(p, point)
if d < mind:
mind = d
nnp = p
print 'nearest in list: ', nnp
d, idx = self.trees[0].query(point)
if d < mind:
mind = d
nnp = self.trees[0].data[idx]
print 'nearest in tree: ', nnp
return mind, nnp
def insert(self, point):
if len(self.cache) < 20:
self.cache.append(point)
else:
if self.trees == []:
print 'creage a new tree'
self.trees.append(cKDTree(self.cache))
else:
print 'extend a new tree'
aa = self.trees[0].data.tolist()
aa.extend(self.cache)
self.trees[0] = cKDTree(aa)
# for i in length(self.trees):
# if tree.n <
##
## Simple primitive collision tests
##
#
# Only two primitive tests are implemented.
# 1, test between two line segments (equivalent to capsules)
# 2, test between a line segment (capsule) and AABB surface (not volume)
#
class CBody:
def __init__(self):
pass
class CCapsule(CBody):
def __init__(self, pos=VECTOR(), axis=VECTOR(), radius=60.0):
self.pos = pos
self.axis = axis
self.radius = radius
class CAABB(CBody):
def __init__(self, bounds):
self.bounds = bounds
class CollisionChecker:
def __init__(self):
pass
class SimpleCollisionChecker(CollisionChecker):
def isCollisionFree(self, obj1, obj2):
# collision test functions
# return True if there is no collision
if obj1.__class__ == CCapsule:
if obj2.__class__ == CCapsule:
return self.testTwoCapsules(obj1, obj2)
elif obj2.__class__ == CAABB:
return self.testCapsuleAABB(obj1, obj2)
else:
raise CCException('undefined primitive test pair')
else:
raise CCException('undefined primitive test pair')
def testTwoCapsules(self, c1, c2):
d = self.distanceLineSegments(c1.pos, c1.axis, c2.pos, c2.axis)
return (c1.radius + c2.radius) < d
def testCapsuleAABB(self, c, b):
u = c.axis
p0 = c.pos
minx,maxx,miny,maxy,minz,maxz = b.bounds
r = c.radius
epsilon = 1e-3
if abs(u[0]) > epsilon:
tc = (minx-p0[0])/u[0]
if 0 <= tc and tc <= 1:
pc = p0 + tc*u
if (miny-r <= pc[1] and pc[1] <= maxy+r
and minz-r <= pc[2] and pc[2] <= maxz+r):
return False
tc = (maxx-p0[0])/u[0]
if 0 <= tc and tc <= 1:
pc = p0 + tc*u
if (miny-r <= pc[1] and pc[1] <= maxy+r
and minz-r <= pc[2] and pc[2] <= maxz+r):
return False
else:
if abs(p0[0] - minx) < r or abs(p0[0] - maxx) < r:
return False
if abs(u[1]) > epsilon:
tc = (miny-p0[1])/u[1]
if 0 <= tc and tc <= 1:
pc = p0 + tc*u
if (minx-r <= pc[0] and pc[0] <= maxx+r
and minz-r <= pc[2] and pc[2] <= maxz+r):
return False
tc = (maxy-p0[1])/u[1]
if 0 <= tc and tc <= 1:
pc = p0 + tc*u
if (minx-r <= pc[0] and pc[0] <= maxx+r
and minz-r <= pc[2] and pc[2] <= maxz+r):
return False
else:
if abs(p0[1] - miny) < r or abs(p0[1] - maxy) < r:
return False
if abs(u[2]) > epsilon:
tc = (minz-p0[2])/u[2]
if 0 <= tc and tc <= 1:
pc = p0 + tc*u
if (minx-r <= pc[0] and pc[0] <= maxx+r
and miny-r <= pc[1] and pc[1] <= maxy+r):
return False
tc = (maxz-p0[2])/u[2]
if 0 <= tc and tc <= 1:
pc = p0 + tc*u
if (minx-r <= pc[0] and pc[0] <= maxx+r
and miny-r <= pc[1] and pc[1] <= maxy+r):
return False
else:
if abs(p0[2] - minz) < r or abs(p0[2] - maxz) < r:
return False
return True
def distanceLineSegments(self, p0, u, q0, v):
w0 = p0 - q0
p1 = p0+u
q1 = q0+v
a = u.dot(u)
b = u.dot(v)
c = v.dot(v)
d = u.dot(w0)
e = v.dot(w0)
if a*c - b*b > 1e-6:
sc = (b*e - c*d)/(a*c - b*b)
tc = (a*e - b*d)/(a*c - b*b)
else:
sc = 0
tc = d / b
pc = p0 + sc*u
qc = q0 + tc*v
if 0 <= sc and sc <= 1 and 0 <= tc <= 1:
return (pc - qc).abs() # distance between 2 lines
else:
if sc < 0:
s0 = 0
t0 = e/c
if 0 <= t0 and t0 <= 1:
return (p0-(q0+t0*v)).abs()
else:
return min((p0-q0).abs(), (p0-q1).abs())
if sc > 1:
s0 = 1
t0 = (e+b)/c
if 0 <= t0 and t0 <= 1:
return (p1-(q0+t0*v)).abs()
else:
return min((p1-q0).abs(), (p1-q1).abs())
if tc < 0:
t0 = 0
s0 = - (d/a)
if 0 <= s0 and s0 <= 1:
return ((p0+s0*u)-q0).abs()
else:
return min((p0-q0).abs(), (p1-q0).abs())
if tc > 1:
t0 = 1
s0 = (b-d)/a
if 0 <= s0 and s0 <= 1:
return ((p0+s0*u)-q1).abs()
else:
return min((p0-q1).abs(), (p1-q1).abs())
class CCError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
# pts = []
# for i in range(200):
# pts.append(array([random.uniform(S1limit[0], S1limit[1]),
# random.uniform(S2limit[0], S2limit[1]),
# random.uniform(S3limit[0], S3limit[1]),
# random.uniform(E1limit[0], E1limit[1]),
# random.uniform(E2limit[0], E2limit[1]),
# random.uniform(W1limit[0], W1limit[1]),
# random.uniform(W2limit[0], W2limit[1])]))
# tree = CachedKdTrees()
# for i in range(100):
# tree.insert([random.uniform(S1limit[0], S1limit[1]),
# random.uniform(S2limit[0], S2limit[1]),
# random.uniform(S3limit[0], S3limit[1]),
# random.uniform(E1limit[0], E1limit[1]),
# random.uniform(E2limit[0], E2limit[1]),
# random.uniform(W1limit[0], W1limit[1]),
# random.uniform(W2limit[0], W2limit[1])])
#tree.query([zeros(7)])
def rad2deg_trajectory(traj):
def rad2deg_config(q):
return map(rad2deg, q)
path,tms = unzip(traj)
return zip(map(rad2deg_config, path), tms)
###
colors = {
'clear' : '\033[0m',
'black' : '\033[30m',
'red' : '\033[31m',
'green' : '\033[32m',
'yellow' : '\033[33m',
'blue' : '\033[34m',
'purple' : '\033[35m',
'cyan' : '\033[36m',
'white' : '\033[37m'
}
def colored_print(msg, color):
print '%s%s%s'%(colors[color], msg, colors['clear'])
def warn(msg):
print '%s%s%s'%(colors['red'], msg, colors['clear'])
class RecognitionFailure(Exception):
pass
class IKFailure(Exception):
pass
class PlanningFailure(Exception):
pass
|
# Copyright 2022 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Constants and functions to processing applicant data and define conventions"""
# dataframe
GROUP_RETURNERS = "returners"
GROUP_NEWCOMERS = "newcomers"
# Release team sub team names
TEAM_BUGTRIAGE = "Bug Triage"
TEAM_CISIGNAL = "CI Signal"
TEAM_COMMUNICATIONS = "Communications"
TEAM_RELEASE_NOTES = "Release Notes"
TEAM_DOCS = "Documentation"
TEAM_ENHANCEMENTS = "Enhancements"
RELEASE_TEAM_TEAMS = [TEAM_BUGTRIAGE, TEAM_CISIGNAL,
TEAM_COMMUNICATIONS, TEAM_RELEASE_NOTES, TEAM_DOCS, TEAM_ENHANCEMENTS]
# Folders
APPLICANTS_FOLDER = "applicants"
PLOT_FOLDER = "plots"
# Themes
THEME_MARPLOTLIB = 'ggplot'
def get_applicants_file(team_name, group):
"""function to define how to format markdown file names"""
return f"./{APPLICANTS_FOLDER}/{team_name}-{group}.md"
def get_plot_file(filename):
"""function to define how to format plot file names"""
return f"./{PLOT_FOLDER}/{filename}.png"
company_keywords = [
"student",
"liquid reply",
"vmware",
"microsoft",
"red hat",
"institute",
"cisco",
"ibm",
"apple",
"suse",
"google",
"independent",
"deloitte",
"adeste"
]
company_aliases = {
"redhat": "red hat",
"freelancer": "independent",
"independant": "independent"
}
timezone_aliases = {
"gmt": "london gmt+0", "paris": "london gmt+0", "london": "london gmt+0",
"middle europe": "central europe gmt+1", "cet": "central europe gmt+1",
"+ 1": "central europe gmt+1", "central time": "central europe gmt+1",
"central european time": "central europe gmt+1", "berlin":
"central europe gmt+1", "+1": "central europe gmt+1",
"ist": "india gmt+5", "+5": "india gmt+5", "+ 5": "india gmt+5",
"india": "india gmt+5", "indian": "india gmt+5", "+ 6": "india gmt+5",
"pst": "us pacific gmt-8", "pdt": "us pacific gmt-8",
"pacific": "us pacific gmt-8", "pacific time": "us pacific gmt-8",
"edt": "us east gmt-5", "eastern time": "us east gmt-5",
"us east": "us east gmt-5", "est": "us east gmt-5",
"+4": "iran gmt+4",
"+2": "east europe gmt+2", "eastern europe": "east europe gmt+2",
"eastern standard time": "east europe gmt+2",
"+3": "arabia gmt+3",
"+9": "japan gmt+9", "jst": "japan gmt+9",
"+8": "china gmt+8", "shanghai": "china gmt+8",
"utc": "london gmt+0"
}
pronouns = ["he/they", "he/him", "she/her", "she/they",
"they/them", "ze", "neopronouns", "other"]
# Schema variables reference column headers of the applicant excel file TODO: dynamic schema
RELEASE_VERSION = "1.24"
# General applicant infos
SCHEMA_EMAIL = "Email Address"
SCHEMA_NAME = "Name"
SCHEMA_PRONOUNS = "To help address everyone correctly, please share your pronouns if you're comfortable doing so. You can more about pronoun sharing here https://www.mypronouns.org/sharing"
SCHEMA_SLACK = "Slack Handle"
SCHEMA_GITHUB = "Github Handle"
SCHEMA_AFFILIATION = "Company Affiliation / Employer"
SCHEMA_PREVIOUSLY_SERVED = "Have you previously served on a Kubernetes Release Team?"
# Returners infos
SCHEMA_RETURNERS_PREVIOUS_ROLES = "Which release team roles have you served in?"
SCHEMA_RETURNERS_PREVIOUS_RELEASE_AND_ROLE = "Please tell us which release team(s) you were previously on and what role you held (i.e. Lead or Shadow)"
SCHEMA_RETURNERS_INTERESTED_IN_ROLES = f"What release team roles are you interested in for {RELEASE_VERSION}?"
SCHEMA_RETURNERS_CAN_VOLUNTEER_FOR_UP_COMING_CYCLES = "Can you volunteer for 1.25 or 1.26?"
SCHEMA_RETURNERS_TIMEZONE = "What time zone are you normally in?"
SCHEMA_RETURNERS_GOALS = "Goals"
SCHEMA_RETURNERS_CONTRIBUTION_PLANS = "Contribution Plans"
SCHEMA_RETURNERS_INTERESTED_IN_STABLE_ROSTER = "Are you interested in joining a release team stable roster"
# Newcomers
SCHEMA_NEWCOMERS_INTERESTED_IN_ROLES = "Which release roles are you interested in?"
SCHEMA_NEWCOMERS_READ_HANDBOOK = "Have you read the role handbook associated with that role?"
SCHEMA_NEWCOMERS_WHY_INTERESTED = "Why are you interested in that role(s)?"
SCHEMA_NEWCOMERS_HANDBOOK_QUESTIONS = "Do you have other feedback or questions about the handbook?"
SCHEMA_NEWCOMERS_TIMESTIMATE = "How much time do you estimate you can commit to the Release Team a week? "
SCHEMA_NEWCOMERS_ATTEND_RELEASE_TEAM_MEETINGS = "Will you be able to attend Release Team meetings? "
SCHEMA_NEWCOMERS_ATTEND_BURNDOWN_MEETINGS = "Will you be able to attend Burndown meetings?"
SCHEMA_NEWCOMERS_SCHEDULED_CONFLICTS = "Do you have schedule conflicts?"
SCHEMA_NEWCOMERS_VOLUNTEER_UPCOMING_RELEASE = f"{SCHEMA_RETURNERS_CAN_VOLUNTEER_FOR_UP_COMING_CYCLES}.1"
SCHEMA_NEWCOMERS_TIMEZONE = f"{SCHEMA_RETURNERS_TIMEZONE}.1"
SCHEMA_NEWCOMERS_EXPERIENCE_CONTRIBUTING = "What is your experience contributing?"
SCHEMA_NEWCOMERS_SIGNED_CLA = "Have you signed the CLA?"
SCHEMA_NEWCOMERS_K8S_ORG_MEMBER = "Are you a Kubernetes Org Member?"
SCHEMA_NEWCOMERS_PRIOR_RELEASE_TEAMS = "Prior Release Teams"
SCHEMA_NEWCOMERS_RELEVANT_EXPERIENCE = "Relevant Experience"
SCHEMA_NEWCOMERS_GOALS = f"{SCHEMA_RETURNERS_GOALS}.1"
SCHEMA_NEWCOMERS_CONTRIBUTION_PLANS = f"{SCHEMA_RETURNERS_CONTRIBUTION_PLANS}.1"
SCHEMA_NEWCOMERS_COMMENTS = "Comments"
SCHEMA_NEWCOMERS_APPLIED_PREVIOUSLY = "Have you applied to any previous Kubernetes release teams?"
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
# ported to python 3.x by Dragneel1234
from __future__ import print_function
try:
from urllib2 import urlopen, Request, unquote, HTTPError
from urllib import urlencode
except ImportError:
from urllib.request import urlopen, Request
from urllib.parse import urlencode, unquote
from urllib.error import HTTPError
import re
import sys
import os
import os.path
import argparse
def info_extractor(url):
_VALID_URL = b'(?:https://)?(?:www\.)?watchcartoononline\.io/([^/]+)'
#checks if url is valid
if re.match(_VALID_URL, url) is not None:
#sets user_agent so watchcartoononline doesn't cause issues
user_agent = 'Mozilla/5.0 (Windows NT 5.1; rv:10.0.1) Gecko/20100101 Firefox/10.0.1'
headers = { 'User-Agent' : user_agent }
print("[watchcartoononline-dl] Downloading webpage")
request = Request(url.decode(),headers=headers)
webpage = urlopen(request).read()
print("[watchcartoononline-dl] Finding video")
video_url = re.search(b'<iframe [^>]*src="https://www.watchcartoononline.io/inc/(.+?)>', webpage).group()
video_url = re.search(b'src="(.+?)"', video_url).group(1).replace(b' ',b'%20')
# "clicks" the "Click Here to Watch Free" button to so it can access the actual video file url
#print("[watchcartoononline-dl] Clicking stupid 'Watch Free' button"
params = urlencode({'fuck_you':'','confirm':'Click Here to Watch Free!!'})
print("[watchcartoononline-dl] Getting video URL")
request = Request(video_url.decode("utf-8"),params.encode(),headers=headers)
video_webpage = urlopen(request).read()
#scrapes the actual file url
final_url = re.findall(b'file: "(.+?)"', video_webpage)
#throws error if list is blank
if not final_url:
print("ERROR: Video not found")
else:
return final_url[-1]
else:
print("ERROR: URL was invalid, please use a valid URL from www.watchcartoononline.com")
def episodes_extractor(episode_list, ep_range, directory):
_VALID_URL = r'(?:https://)?(?:www\.)?watchcartoononline\.io/anime/([^/]+)'
#check if url is valid
if re.match(_VALID_URL, episode_list) is not None:
#sets user_agent so watchcartoononline doesn't cause issues
user_agent = 'Mozilla/5.0 (Windows NT 5.1; rv:10.0.1) Gecko/20100101 Firefox/10.0.1'
headers = { 'User-Agent' : user_agent }
print("[watchcartoononline-dl] Downloading webpage")
request = Request(episode_list, headers=headers)
webpage = urlopen(request).read()
print("[watchcartoononline-dl] Finding episode(s)")
#remove the end of the html, to avoid matching episodes in the 'recenly added' bar
indexOfRecenly = webpage.find(b"Recenly")
truncated = ""
if indexOfRecenly != -1:
truncated = webpage[:indexOfRecenly]
else:
print("WARNING: couldn't find 'Recenly Added' section in page, maybe the site layout has changed?")
#todo: improve this regex to work for more stuff
page_urls = re.findall(b'https://www.watchcartoononline.io/[a-zA-Z0-9-]+episode-[0-9]{1,4}[a-zA-Z0-9-]+', truncated)[::-1]
#print(list of URLs we are about to download
if len(ep_range) > 0:
start = ep_range[0]
end = ep_range[1]
if end < start:
print("ERROR: Please specify a valid episode range, end is before start")
sys.exit(0)
try:
start_idx = [i for i, s in enumerate(page_urls) if str(start) in s][0]
end_idx = [i for i, s in enumerate(page_urls) if str(end) in s][0]
except:
print("ERROR: Please specify a valid episode range, this range is out of range of the episodes")
sys.exit(0)
page_urls = page_urls[start_idx:end_idx+1]
print("[watchcartoononline-dl] Downloading episodes {} thru {}".format(start, end))
print("URLs found:")
for url in page_urls:
print(url.decode("utf-8"))
#run original script on each episode URL we found
for url in page_urls:
print("[watchcartoononline-dl] Downloading "+ url.decode("utf-8"))
doAnEpisode(url, directory)
else:
print("ERROR: URL was invalid, please use a valid URL from www.watchcartoononline.com")
def downloader(fileurl, file_name):
try:
#opens the video file url
u = urlopen(fileurl)
except HTTPError as he:
print("HTTPError! code:"+str(he.code))
return
#gets metadata
meta = u.info()
file_size = int(u.info()["Content-Length"])
file_type = u.info()["Content-Type"]
#before downloading, check if file already exists and is the expected size
if os.path.isfile(file_name) and os.path.getsize(file_name) == file_size:
print("[watchcartoononline-dl] file already exists and is the correct size, skipping...")
return
try:
#writes new file with the filename provided
f = open(file_name, 'wb')
except IOError as e:
print(e)
print("ERROR: That directory doesn't exist, please create the folder or specify a valid folder.")
sys.exit(0)
print("[watchcartoononline-dl] Filetype: %s" %(file_type))
print("[watchcartoononline-dl] Destination: %s" %(file_name))
file_size_dl = 0
block_size = 8192
#Download loop
while True:
buffer = u.read(block_size)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
status = r"[download] %s of %s [%3.2f%%]" % (convertSize(file_size_dl), convertSize(file_size), file_size_dl * 100. / file_size)
sys.stdout.write((" " * (int(os.environ.get("COLUMNS") or 80)-2)) + "\r")
sys.stdout.write(status)
sys.stdout.flush()
#Download done. Close file stream
f.close()
sys.stdout.write(os.linesep)
sys.stdout.flush()
def convertSize(n, format='%(value).1f %(symbol)s', symbols='iec'):
"""
Convert n bytes into a human readable string based on format.
symbols can be either "customary", "customary_ext", "iec" or "iec_ext",
see: http://goo.gl/kTQMs
"""
SYMBOLS = {
'customary' : ('B', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y'),
'customary_ext' : ('byte', 'kilo', 'mega', 'giga', 'tera', 'peta', 'exa',
'zetta', 'iotta'),
'iec' : ('Bi', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'),
'iec_ext' : ('byte', 'kibi', 'mebi', 'gibi', 'tebi', 'pebi', 'exbi',
'zebi', 'yobi'),
}
n = int(n)
if n < 0:
raise ValueError("n < 0")
symbols = SYMBOLS[symbols]
prefix = {}
for i, s in enumerate(symbols[1:]):
prefix[s] = 1 << (i+1)*10
for symbol in reversed(symbols[1:]):
if n >= prefix[symbol]:
value = float(n) / prefix[symbol]
return format % locals()
return format % dict(symbol=symbols[0], value=n)
def doAnEpisode(url, directory):
#url = sys.argv[1]
final_url = info_extractor(url)
if final_url is None:
print("ERROR: unable to extract video url from " + url.decode("utf-8"))
else:
name = final_url.replace('%20',' ').split('/')[-1]
name = name[:name.find('?')] # remove trailing URL arguments
name = unquote(name)
if directory is None:
directory = os.getcwd()
downloader(final_url, os.path.join(directory, name))
if __name__ == '__main__':
# Setup command line args, url, range, and directory
parser = argparse.ArgumentParser(prog='watch-dl.py', usage='%(prog)s [URL] [-h] [-r START_EPISODE_NUMBER END_EPISODE_NUMBER] [-d DIRECTORY]')
# parser = argparse.ArgumentParser()
parser.add_argument('url', nargs=1)
parser.add_argument('-r', '--range', nargs=2, type=int, help='Range of episodes to download.')
parser.add_argument('-d', '--directory', nargs=1, help='Directory to download episodes to, if not provided will downlaod to current working directory.')
parsed = parser.parse_args()
# NOTE: args with '-' have it replaced with '_'
print('Result:', vars(parsed))
try:
url = parsed.url[0]
ep_range = parsed.range if parsed.range else []
dir = parsed.directory[0] if parsed.directory else ""
if "/anime/" in url: #argument looks like an episode-list page
print("[watchcartoononline-dl] looks like a list of episodes (season?), extracting episode page URLs...")
episodes_extractor(url, ep_range, dir)
else: #episode should be a video page
doAnEpisode(url, dir)
#throws error message for keyboard interrupt eg: ctrl+c
except KeyboardInterrupt:
print("\nERROR: Interrupted by user")
try:
sys.exit(0)
except SystemExit:
os._exit(0)
|
# Library to extract EXIF information in digital camera image files
#
# To use this library call with:
# f=open(path_name, 'rb')
# tags=EXIF.process_file(f)
# tags will now be a dictionary mapping names of EXIF tags to their
# values in the file named by path_name. You can process the tags
# as you wish. In particular, you can iterate through all the tags with:
# for tag in tags.keys():
# if tag not in ('JPEGThumbnail', 'TIFFThumbnail', 'Filename',
# 'EXIF MakerNote'):
# print "Key: %s, value %s" % (tag, tags[tag])
# (This code uses the if statement to avoid printing out a few of the
# tags that tend to be long or boring.)
#
# The tags dictionary will include keys for all of the usual EXIF
# tags, and will also include keys for Makernotes used by some
# cameras, for which we have a good specification.
#
# Contains code from "exifdump.py" originally written by Thierry Bousch
# <bousch@topo.math.u-psud.fr> and released into the public domain.
#
# Updated and turned into general-purpose library by Gene Cash
# <email gcash at cfl.rr.com>
#
# This copyright license is intended to be similar to the FreeBSD license.
#
# Copyright 2002 Gene Cash All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY GENE CASH ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# This means you may do anything you want with this code, except claim you
# wrote it. Also, if it breaks you get to keep both pieces.
#
# Patch Contributors:
# * Simon J. Gerraty <sjg@crufty.net>
# s2n fix & orientation decode
# * John T. Riedl <riedl@cs.umn.edu>
# Added support for newer Nikon type 3 Makernote format for D70 and some
# other Nikon cameras.
# * Joerg Schaefer <schaeferj@gmx.net>
# Fixed subtle bug when faking an EXIF header, which affected maker notes
# using relative offsets, and a fix for Nikon D100.
#
# 21-AUG-99 TB Last update by Thierry Bousch to his code.
# 17-JAN-02 CEC Discovered code on web.
# Commented everything.
# Made small code improvements.
# Reformatted for readability.
# 19-JAN-02 CEC Added ability to read TIFFs and JFIF-format JPEGs.
# Added ability to extract JPEG formatted thumbnail.
# Added ability to read GPS IFD (not tested).
# Converted IFD data structure to dictionaries indexed by
# tag name.
# Factored into library returning dictionary of IFDs plus
# thumbnail, if any.
# 20-JAN-02 CEC Added MakerNote processing logic.
# Added Olympus MakerNote.
# Converted data structure to single-level dictionary, avoiding
# tag name collisions by prefixing with IFD name. This makes
# it much easier to use.
# 23-JAN-02 CEC Trimmed nulls from end of string values.
# 25-JAN-02 CEC Discovered JPEG thumbnail in Olympus TIFF MakerNote.
# 26-JAN-02 CEC Added ability to extract TIFF thumbnails.
# Added Nikon, Fujifilm, Casio MakerNotes.
# 30-NOV-03 CEC Fixed problem with canon_decode_tag() not creating an
# IFD_Tag() object.
# 15-FEB-04 CEC Finally fixed bit shift warning by converting Y to 0L.
#
# field type descriptions as (length, abbreviation, full name) tuples
FIELD_TYPES=(
(0, 'X', 'Proprietary'), # no such type
(1, 'B', 'Byte'),
(1, 'A', 'ASCII'),
(2, 'S', 'Short'),
(4, 'L', 'Long'),
(8, 'R', 'Ratio'),
(1, 'SB', 'Signed Byte'),
(1, 'U', 'Undefined'),
(2, 'SS', 'Signed Short'),
(4, 'SL', 'Signed Long'),
(8, 'SR', 'Signed Ratio')
)
# dictionary of main EXIF tag names
# first element of tuple is tag name, optional second element is
# another dictionary giving names to values
EXIF_TAGS={
0x0100: ('ImageWidth', ),
0x0101: ('ImageLength', ),
0x0102: ('BitsPerSample', ),
0x0103: ('Compression',
{1: 'Uncompressed TIFF',
6: 'JPEG Compressed'}),
0x0106: ('PhotometricInterpretation', ),
0x010A: ('FillOrder', ),
0x010D: ('DocumentName', ),
0x010E: ('ImageDescription', ),
0x010F: ('Make', ),
0x0110: ('Model', ),
0x0111: ('StripOffsets', ),
0x0112: ('Orientation',
{1: 'Horizontal (normal)',
2: 'Mirrored horizontal',
3: 'Rotated 180',
4: 'Mirrored vertical',
5: 'Mirrored horizontal then rotated 90 CCW',
6: 'Rotated 90 CW',
7: 'Mirrored horizontal then rotated 90 CW',
8: 'Rotated 90 CCW'}),
0x0115: ('SamplesPerPixel', ),
0x0116: ('RowsPerStrip', ),
0x0117: ('StripByteCounts', ),
0x011A: ('XResolution', ),
0x011B: ('YResolution', ),
0x011C: ('PlanarConfiguration', ),
0x0128: ('ResolutionUnit',
{1: 'Not Absolute',
2: 'Pixels/Inch',
3: 'Pixels/Centimeter'}),
0x012D: ('TransferFunction', ),
0x0131: ('Software', ),
0x0132: ('DateTime', ),
0x013B: ('Artist', ),
0x013E: ('WhitePoint', ),
0x013F: ('PrimaryChromaticities', ),
0x0156: ('TransferRange', ),
0x0200: ('JPEGProc', ),
0x0201: ('JPEGInterchangeFormat', ),
0x0202: ('JPEGInterchangeFormatLength', ),
0x0211: ('YCbCrCoefficients', ),
0x0212: ('YCbCrSubSampling', ),
0x0213: ('YCbCrPositioning', ),
0x0214: ('ReferenceBlackWhite', ),
0x828D: ('CFARepeatPatternDim', ),
0x828E: ('CFAPattern', ),
0x828F: ('BatteryLevel', ),
0x8298: ('Copyright', ),
0x829A: ('ExposureTime', ),
0x829D: ('FNumber', ),
0x83BB: ('IPTC/NAA', ),
0x8769: ('ExifOffset', ),
0x8773: ('InterColorProfile', ),
0x8822: ('ExposureProgram',
{0: 'Unidentified',
1: 'Manual',
2: 'Program Normal',
3: 'Aperture Priority',
4: 'Shutter Priority',
5: 'Program Creative',
6: 'Program Action',
7: 'Portrait Mode',
8: 'Landscape Mode'}),
0x8824: ('SpectralSensitivity', ),
0x8825: ('GPSInfo', ),
0x8827: ('ISOSpeedRatings', ),
0x8828: ('OECF', ),
# print as string
0x9000: ('ExifVersion', lambda x: ''.join(map(chr, x))),
0x9003: ('DateTimeOriginal', ),
0x9004: ('DateTimeDigitized', ),
0x9101: ('ComponentsConfiguration',
{0: '',
1: 'Y',
2: 'Cb',
3: 'Cr',
4: 'Red',
5: 'Green',
6: 'Blue'}),
0x9102: ('CompressedBitsPerPixel', ),
0x9201: ('ShutterSpeedValue', ),
0x9202: ('ApertureValue', ),
0x9203: ('BrightnessValue', ),
0x9204: ('ExposureBiasValue', ),
0x9205: ('MaxApertureValue', ),
0x9206: ('SubjectDistance', ),
0x9207: ('MeteringMode',
{0: 'Unidentified',
1: 'Average',
2: 'CenterWeightedAverage',
3: 'Spot',
4: 'MultiSpot'}),
0x9208: ('LightSource',
{0: 'Unknown',
1: 'Daylight',
2: 'Fluorescent',
3: 'Tungsten',
10: 'Flash',
17: 'Standard Light A',
18: 'Standard Light B',
19: 'Standard Light C',
20: 'D55',
21: 'D65',
22: 'D75',
255: 'Other'}),
0x9209: ('Flash', {0: 'No',
1: 'Fired',
5: 'Fired (?)', # no return sensed
7: 'Fired (!)', # return sensed
9: 'Fill Fired',
13: 'Fill Fired (?)',
15: 'Fill Fired (!)',
16: 'Off',
24: 'Auto Off',
25: 'Auto Fired',
29: 'Auto Fired (?)',
31: 'Auto Fired (!)',
32: 'Not Available'}),
0x920A: ('FocalLength', ),
0x927C: ('MakerNote', ),
# print as string
0x9286: ('UserComment', lambda x: ''.join(map(chr, x))),
0x9290: ('SubSecTime', ),
0x9291: ('SubSecTimeOriginal', ),
0x9292: ('SubSecTimeDigitized', ),
# print as string
0xA000: ('FlashPixVersion', lambda x: ''.join(map(chr, x))),
0xA001: ('ColorSpace', ),
0xA002: ('ExifImageWidth', ),
0xA003: ('ExifImageLength', ),
0xA005: ('InteroperabilityOffset', ),
0xA20B: ('FlashEnergy', ), # 0x920B in TIFF/EP
0xA20C: ('SpatialFrequencyResponse', ), # 0x920C - -
0xA20E: ('FocalPlaneXResolution', ), # 0x920E - -
0xA20F: ('FocalPlaneYResolution', ), # 0x920F - -
0xA210: ('FocalPlaneResolutionUnit', ), # 0x9210 - -
0xA214: ('SubjectLocation', ), # 0x9214 - -
0xA215: ('ExposureIndex', ), # 0x9215 - -
0xA217: ('SensingMethod', ), # 0x9217 - -
0xA300: ('FileSource',
{3: 'Digital Camera'}),
0xA301: ('SceneType',
{1: 'Directly Photographed'}),
0xA302: ('CVAPattern',),
}
# interoperability tags
INTR_TAGS={
0x0001: ('InteroperabilityIndex', ),
0x0002: ('InteroperabilityVersion', ),
0x1000: ('RelatedImageFileFormat', ),
0x1001: ('RelatedImageWidth', ),
0x1002: ('RelatedImageLength', ),
}
# GPS tags (not used yet, haven't seen camera with GPS)
GPS_TAGS={
0x0000: ('GPSVersionID', ),
0x0001: ('GPSLatitudeRef', ),
0x0002: ('GPSLatitude', ),
0x0003: ('GPSLongitudeRef', ),
0x0004: ('GPSLongitude', ),
0x0005: ('GPSAltitudeRef', ),
0x0006: ('GPSAltitude', ),
0x0007: ('GPSTimeStamp', ),
0x0008: ('GPSSatellites', ),
0x0009: ('GPSStatus', ),
0x000A: ('GPSMeasureMode', ),
0x000B: ('GPSDOP', ),
0x000C: ('GPSSpeedRef', ),
0x000D: ('GPSSpeed', ),
0x000E: ('GPSTrackRef', ),
0x000F: ('GPSTrack', ),
0x0010: ('GPSImgDirectionRef', ),
0x0011: ('GPSImgDirection', ),
0x0012: ('GPSMapDatum', ),
0x0013: ('GPSDestLatitudeRef', ),
0x0014: ('GPSDestLatitude', ),
0x0015: ('GPSDestLongitudeRef', ),
0x0016: ('GPSDestLongitude', ),
0x0017: ('GPSDestBearingRef', ),
0x0018: ('GPSDestBearing', ),
0x0019: ('GPSDestDistanceRef', ),
0x001A: ('GPSDestDistance', )
}
# Nikon E99x MakerNote Tags
# http://members.tripod.com/~tawba/990exif.htm
MAKERNOTE_NIKON_NEWER_TAGS={
0x0002: ('ISOSetting', ),
0x0003: ('ColorMode', ),
0x0004: ('Quality', ),
0x0005: ('Whitebalance', ),
0x0006: ('ImageSharpening', ),
0x0007: ('FocusMode', ),
0x0008: ('FlashSetting', ),
0x0009: ('AutoFlashMode', ),
0x000B: ('WhiteBalanceBias', ),
0x000C: ('WhiteBalanceRBCoeff', ),
0x000F: ('ISOSelection', ),
0x0012: ('FlashCompensation', ),
0x0013: ('ISOSpeedRequested', ),
0x0016: ('PhotoCornerCoordinates', ),
0x0018: ('FlashBracketCompensationApplied', ),
0x0019: ('AEBracketCompensationApplied', ),
0x0080: ('ImageAdjustment', ),
0x0081: ('ToneCompensation', ),
0x0082: ('AuxiliaryLens', ),
0x0083: ('LensType', ),
0x0084: ('LensMinMaxFocalMaxAperture', ),
0x0085: ('ManualFocusDistance', ),
0x0086: ('DigitalZoomFactor', ),
0x0088: ('AFFocusPosition',
{0x0000: 'Center',
0x0100: 'Top',
0x0200: 'Bottom',
0x0300: 'Left',
0x0400: 'Right'}),
0x0089: ('BracketingMode',
{0x00: 'Single frame, no bracketing',
0x01: 'Continuous, no bracketing',
0x02: 'Timer, no bracketing',
0x10: 'Single frame, exposure bracketing',
0x11: 'Continuous, exposure bracketing',
0x12: 'Timer, exposure bracketing',
0x40: 'Single frame, white balance bracketing',
0x41: 'Continuous, white balance bracketing',
0x42: 'Timer, white balance bracketing'}),
0x008D: ('ColorMode', ),
0x008F: ('SceneMode?', ),
0x0090: ('LightingType', ),
0x0092: ('HueAdjustment', ),
0x0094: ('Saturation',
{-3: 'B&W',
-2: '-2',
-1: '-1',
0: '0',
1: '1',
2: '2'}),
0x0095: ('NoiseReduction', ),
0x00A7: ('TotalShutterReleases', ),
0x00A9: ('ImageOptimization', ),
0x00AA: ('Saturation', ),
0x00AB: ('DigitalVariProgram', ),
0x0010: ('DataDump', )
}
MAKERNOTE_NIKON_OLDER_TAGS={
0x0003: ('Quality',
{1: 'VGA Basic',
2: 'VGA Normal',
3: 'VGA Fine',
4: 'SXGA Basic',
5: 'SXGA Normal',
6: 'SXGA Fine'}),
0x0004: ('ColorMode',
{1: 'Color',
2: 'Monochrome'}),
0x0005: ('ImageAdjustment',
{0: 'Normal',
1: 'Bright+',
2: 'Bright-',
3: 'Contrast+',
4: 'Contrast-'}),
0x0006: ('CCDSpeed',
{0: 'ISO 80',
2: 'ISO 160',
4: 'ISO 320',
5: 'ISO 100'}),
0x0007: ('WhiteBalance',
{0: 'Auto',
1: 'Preset',
2: 'Daylight',
3: 'Incandescent',
4: 'Fluorescent',
5: 'Cloudy',
6: 'Speed Light'})
}
# decode Olympus SpecialMode tag in MakerNote
def olympus_special_mode(v):
a={
0: 'Normal',
1: 'Unknown',
2: 'Fast',
3: 'Panorama'}
b={
0: 'Non-panoramic',
1: 'Left to right',
2: 'Right to left',
3: 'Bottom to top',
4: 'Top to bottom'}
return '%s - sequence %d - %s' % (a[v[0]], v[1], b[v[2]])
MAKERNOTE_OLYMPUS_TAGS={
# ah HAH! those sneeeeeaky bastids! this is how they get past the fact
# that a JPEG thumbnail is not allowed in an uncompressed TIFF file
0x0100: ('JPEGThumbnail', ),
0x0200: ('SpecialMode', olympus_special_mode),
0x0201: ('JPEGQual',
{1: 'SQ',
2: 'HQ',
3: 'SHQ'}),
0x0202: ('Macro',
{0: 'Normal',
1: 'Macro'}),
0x0204: ('DigitalZoom', ),
0x0207: ('SoftwareRelease', ),
0x0208: ('PictureInfo', ),
# print as string
0x0209: ('CameraID', lambda x: ''.join(map(chr, x))),
0x0F00: ('DataDump', )
}
MAKERNOTE_CASIO_TAGS={
0x0001: ('RecordingMode',
{1: 'Single Shutter',
2: 'Panorama',
3: 'Night Scene',
4: 'Portrait',
5: 'Landscape'}),
0x0002: ('Quality',
{1: 'Economy',
2: 'Normal',
3: 'Fine'}),
0x0003: ('FocusingMode',
{2: 'Macro',
3: 'Auto Focus',
4: 'Manual Focus',
5: 'Infinity'}),
0x0004: ('FlashMode',
{1: 'Auto',
2: 'On',
3: 'Off',
4: 'Red Eye Reduction'}),
0x0005: ('FlashIntensity',
{11: 'Weak',
13: 'Normal',
15: 'Strong'}),
0x0006: ('Object Distance', ),
0x0007: ('WhiteBalance',
{1: 'Auto',
2: 'Tungsten',
3: 'Daylight',
4: 'Fluorescent',
5: 'Shade',
129: 'Manual'}),
0x000B: ('Sharpness',
{0: 'Normal',
1: 'Soft',
2: 'Hard'}),
0x000C: ('Contrast',
{0: 'Normal',
1: 'Low',
2: 'High'}),
0x000D: ('Saturation',
{0: 'Normal',
1: 'Low',
2: 'High'}),
0x0014: ('CCDSpeed',
{64: 'Normal',
80: 'Normal',
100: 'High',
125: '+1.0',
244: '+3.0',
250: '+2.0',})
}
MAKERNOTE_FUJIFILM_TAGS={
0x0000: ('NoteVersion', lambda x: ''.join(map(chr, x))),
0x1000: ('Quality', ),
0x1001: ('Sharpness',
{1: 'Soft',
2: 'Soft',
3: 'Normal',
4: 'Hard',
5: 'Hard'}),
0x1002: ('WhiteBalance',
{0: 'Auto',
256: 'Daylight',
512: 'Cloudy',
768: 'DaylightColor-Fluorescent',
769: 'DaywhiteColor-Fluorescent',
770: 'White-Fluorescent',
1024: 'Incandescent',
3840: 'Custom'}),
0x1003: ('Color',
{0: 'Normal',
256: 'High',
512: 'Low'}),
0x1004: ('Tone',
{0: 'Normal',
256: 'High',
512: 'Low'}),
0x1010: ('FlashMode',
{0: 'Auto',
1: 'On',
2: 'Off',
3: 'Red Eye Reduction'}),
0x1011: ('FlashStrength', ),
0x1020: ('Macro',
{0: 'Off',
1: 'On'}),
0x1021: ('FocusMode',
{0: 'Auto',
1: 'Manual'}),
0x1030: ('SlowSync',
{0: 'Off',
1: 'On'}),
0x1031: ('PictureMode',
{0: 'Auto',
1: 'Portrait',
2: 'Landscape',
4: 'Sports',
5: 'Night',
6: 'Program AE',
256: 'Aperture Priority AE',
512: 'Shutter Priority AE',
768: 'Manual Exposure'}),
0x1100: ('MotorOrBracket',
{0: 'Off',
1: 'On'}),
0x1300: ('BlurWarning',
{0: 'Off',
1: 'On'}),
0x1301: ('FocusWarning',
{0: 'Off',
1: 'On'}),
0x1302: ('AEWarning',
{0: 'Off',
1: 'On'})
}
MAKERNOTE_CANON_TAGS={
0x0006: ('ImageType', ),
0x0007: ('FirmwareVersion', ),
0x0008: ('ImageNumber', ),
0x0009: ('OwnerName', )
}
# see http://www.burren.cx/david/canon.html by David Burren
# this is in element offset, name, optional value dictionary format
MAKERNOTE_CANON_TAG_0x001={
1: ('Macromode',
{1: 'Macro',
2: 'Normal'}),
2: ('SelfTimer', ),
3: ('Quality',
{2: 'Normal',
3: 'Fine',
5: 'Superfine'}),
4: ('FlashMode',
{0: 'Flash Not Fired',
1: 'Auto',
2: 'On',
3: 'Red-Eye Reduction',
4: 'Slow Synchro',
5: 'Auto + Red-Eye Reduction',
6: 'On + Red-Eye Reduction',
16: 'external flash'}),
5: ('ContinuousDriveMode',
{0: 'Single Or Timer',
1: 'Continuous'}),
7: ('FocusMode',
{0: 'One-Shot',
1: 'AI Servo',
2: 'AI Focus',
3: 'MF',
4: 'Single',
5: 'Continuous',
6: 'MF'}),
10: ('ImageSize',
{0: 'Large',
1: 'Medium',
2: 'Small'}),
11: ('EasyShootingMode',
{0: 'Full Auto',
1: 'Manual',
2: 'Landscape',
3: 'Fast Shutter',
4: 'Slow Shutter',
5: 'Night',
6: 'B&W',
7: 'Sepia',
8: 'Portrait',
9: 'Sports',
10: 'Macro/Close-Up',
11: 'Pan Focus'}),
12: ('DigitalZoom',
{0: 'None',
1: '2x',
2: '4x'}),
13: ('Contrast',
{0xFFFF: 'Low',
0: 'Normal',
1: 'High'}),
14: ('Saturation',
{0xFFFF: 'Low',
0: 'Normal',
1: 'High'}),
15: ('Sharpness',
{0xFFFF: 'Low',
0: 'Normal',
1: 'High'}),
16: ('ISO',
{0: 'See ISOSpeedRatings Tag',
15: 'Auto',
16: '50',
17: '100',
18: '200',
19: '400'}),
17: ('MeteringMode',
{3: 'Evaluative',
4: 'Partial',
5: 'Center-weighted'}),
18: ('FocusType',
{0: 'Manual',
1: 'Auto',
3: 'Close-Up (Macro)',
8: 'Locked (Pan Mode)'}),
19: ('AFPointSelected',
{0x3000: 'None (MF)',
0x3001: 'Auto-Selected',
0x3002: 'Right',
0x3003: 'Center',
0x3004: 'Left'}),
20: ('ExposureMode',
{0: 'Easy Shooting',
1: 'Program',
2: 'Tv-priority',
3: 'Av-priority',
4: 'Manual',
5: 'A-DEP'}),
23: ('LongFocalLengthOfLensInFocalUnits', ),
24: ('ShortFocalLengthOfLensInFocalUnits', ),
25: ('FocalUnitsPerMM', ),
28: ('FlashActivity',
{0: 'Did Not Fire',
1: 'Fired'}),
29: ('FlashDetails',
{14: 'External E-TTL',
13: 'Internal Flash',
11: 'FP Sync Used',
7: '2nd("Rear")-Curtain Sync Used',
4: 'FP Sync Enabled'}),
32: ('FocusMode',
{0: 'Single',
1: 'Continuous'})
}
MAKERNOTE_CANON_TAG_0x004={
7: ('WhiteBalance',
{0: 'Auto',
1: 'Sunny',
2: 'Cloudy',
3: 'Tungsten',
4: 'Fluorescent',
5: 'Flash',
6: 'Custom'}),
9: ('SequenceNumber', ),
14: ('AFPointUsed', ),
15: ('FlashBias',
{0XFFC0: '-2 EV',
0XFFCC: '-1.67 EV',
0XFFD0: '-1.50 EV',
0XFFD4: '-1.33 EV',
0XFFE0: '-1 EV',
0XFFEC: '-0.67 EV',
0XFFF0: '-0.50 EV',
0XFFF4: '-0.33 EV',
0X0000: '0 EV',
0X000C: '0.33 EV',
0X0010: '0.50 EV',
0X0014: '0.67 EV',
0X0020: '1 EV',
0X002C: '1.33 EV',
0X0030: '1.50 EV',
0X0034: '1.67 EV',
0X0040: '2 EV'}),
19: ('SubjectDistance', )
}
# extract multibyte integer in Motorola format (little endian)
def s2n_motorola(str):
x=0
for c in str:
x=(x << 8) | ord(c)
return x
# extract multibyte integer in Intel format (big endian)
def s2n_intel(str):
x=0
y=0
for c in str:
x=x | (ord(c) << y)
y=y+8
return x
# ratio object that eventually will be able to reduce itself to lowest
# common denominator for printing
def gcd(a, b):
if b == 0:
return a
else:
return gcd(b, a % b)
class Ratio:
def __init__(self, num, den):
self.num=num
self.den=den
def __repr__(self):
self.reduce()
if self.den == 1:
return str(self.num)
return '%d/%d' % (self.num, self.den)
def reduce(self):
div=gcd(self.num, self.den)
if div > 1:
self.num=self.num/div
self.den=self.den/div
# for ease of dealing with tags
class IFD_Tag:
def __init__(self, printable, tag, field_type, values, field_offset,
field_length):
# printable version of data
self.printable=printable
# tag ID number
self.tag=tag
# field type as index into FIELD_TYPES
self.field_type=field_type
# offset of start of field in bytes from beginning of IFD
self.field_offset=field_offset
# length of data field in bytes
self.field_length=field_length
# either a string or array of data items
self.values=values
def __str__(self):
return self.printable
def __repr__(self):
return '(0x%04X) %s=%s @ %d' % (self.tag,
FIELD_TYPES[self.field_type][2],
self.printable,
self.field_offset)
# class that handles an EXIF header
class EXIF_header:
def __init__(self, file, endian, offset, fake_exif, debug=0):
self.file=file
self.endian=endian
self.offset=offset
self.fake_exif=fake_exif
self.debug=debug
self.tags={}
# convert slice to integer, based on sign and endian flags
# usually this offset is assumed to be relative to the beginning of the
# start of the EXIF information. For some cameras that use relative tags,
# this offset may be relative to some other starting point.
def s2n(self, offset, length, signed=0):
self.file.seek(self.offset+offset)
slice=self.file.read(length)
if self.endian == 'I':
val=s2n_intel(slice)
else:
val=s2n_motorola(slice)
# Sign extension ?
if signed:
msb=1 << (8*length-1)
if val & msb:
val=val-(msb << 1)
return val
# convert offset to string
def n2s(self, offset, length):
s=''
for i in range(length):
if self.endian == 'I':
s=s+chr(offset & 0xFF)
else:
s=chr(offset & 0xFF)+s
offset=offset >> 8
return s
# return first IFD
def first_IFD(self):
return self.s2n(4, 4)
# return pointer to next IFD
def next_IFD(self, ifd):
entries=self.s2n(ifd, 2)
return self.s2n(ifd+2+12*entries, 4)
# return list of IFDs in header
def list_IFDs(self):
i=self.first_IFD()
a=[]
while i:
a.append(i)
i=self.next_IFD(i)
return a
# return list of entries in this IFD
def dump_IFD(self, ifd, ifd_name, dict=EXIF_TAGS, relative=0):
entries=self.s2n(ifd, 2)
for i in range(entries):
# entry is index of start of this IFD in the file
entry=ifd+2+12*i
tag=self.s2n(entry, 2)
# get tag name. We do it early to make debugging easier
tag_entry=dict.get(tag)
if tag_entry:
tag_name=tag_entry[0]
else:
tag_name='Tag 0x%04X' % tag
field_type=self.s2n(entry+2, 2)
if not 0 < field_type < len(FIELD_TYPES):
# unknown field type
raise ValueError('unknown type %d in tag 0x%04X' % (field_type, tag))
typelen=FIELD_TYPES[field_type][0]
count=self.s2n(entry+4, 4)
offset=entry+8
if count*typelen > 4:
# offset is not the value; it's a pointer to the value
# if relative we set things up so s2n will seek to the right
# place when it adds self.offset. Note that this 'relative'
# is for the Nikon type 3 makernote. Other cameras may use
# other relative offsets, which would have to be computed here
# slightly differently.
if relative:
tmp_offset=self.s2n(offset, 4)
offset=tmp_offset+ifd-self.offset+4
if self.fake_exif:
offset=offset+18
else:
offset=self.s2n(offset, 4)
field_offset=offset
if field_type == 2:
# special case: null-terminated ASCII string
if count != 0:
self.file.seek(self.offset+offset)
values=self.file.read(count)
values=values.strip().replace('\x00','')
else:
values=''
else:
values=[]
signed=(field_type in [6, 8, 9, 10])
for j in range(count):
if field_type in (5, 10):
# a ratio
value_j=Ratio(self.s2n(offset, 4, signed),
self.s2n(offset+4, 4, signed))
else:
value_j=self.s2n(offset, typelen, signed)
values.append(value_j)
offset=offset+typelen
# now "values" is either a string or an array
if count == 1 and field_type != 2:
printable=str(values[0])
else:
printable=str(values)
# compute printable version of values
if tag_entry:
if len(tag_entry) != 1:
# optional 2nd tag element is present
if callable(tag_entry[1]):
# call mapping function
printable=tag_entry[1](values)
else:
printable=''
for i in values:
# use lookup table for this tag
printable+=tag_entry[1].get(i, repr(i))
self.tags[ifd_name+' '+tag_name]=IFD_Tag(printable, tag,
field_type,
values, field_offset,
count*typelen)
if self.debug:
print(' debug: %s: %s' % (tag_name,
repr(self.tags[ifd_name+' '+tag_name])))
# extract uncompressed TIFF thumbnail (like pulling teeth)
# we take advantage of the pre-existing layout in the thumbnail IFD as
# much as possible
def extract_TIFF_thumbnail(self, thumb_ifd):
entries=self.s2n(thumb_ifd, 2)
# this is header plus offset to IFD ...
if self.endian == 'M':
tiff='MM\x00*\x00\x00\x00\x08'
else:
tiff='II*\x00\x08\x00\x00\x00'
# ... plus thumbnail IFD data plus a null "next IFD" pointer
self.file.seek(self.offset+thumb_ifd)
tiff+=self.file.read(entries*12+2)+'\x00\x00\x00\x00'
# fix up large value offset pointers into data area
for i in range(entries):
entry=thumb_ifd+2+12*i
tag=self.s2n(entry, 2)
field_type=self.s2n(entry+2, 2)
typelen=FIELD_TYPES[field_type][0]
count=self.s2n(entry+4, 4)
oldoff=self.s2n(entry+8, 4)
# start of the 4-byte pointer area in entry
ptr=i*12+18
# remember strip offsets location
if tag == 0x0111:
strip_off=ptr
strip_len=count*typelen
# is it in the data area?
if count*typelen > 4:
# update offset pointer (nasty "strings are immutable" crap)
# should be able to say "tiff[ptr:ptr+4]=newoff"
newoff=len(tiff)
tiff=tiff[:ptr]+self.n2s(newoff, 4)+tiff[ptr+4:]
# remember strip offsets location
if tag == 0x0111:
strip_off=newoff
strip_len=4
# get original data and store it
self.file.seek(self.offset+oldoff)
tiff+=self.file.read(count*typelen)
# add pixel strips and update strip offset info
old_offsets=self.tags['Thumbnail StripOffsets'].values
old_counts=self.tags['Thumbnail StripByteCounts'].values
for i in range(len(old_offsets)):
# update offset pointer (more nasty "strings are immutable" crap)
offset=self.n2s(len(tiff), strip_len)
tiff=tiff[:strip_off]+offset+tiff[strip_off+strip_len:]
strip_off+=strip_len
# add pixel strip to end
self.file.seek(self.offset+old_offsets[i])
tiff+=self.file.read(old_counts[i])
self.tags['TIFFThumbnail']=tiff
# decode all the camera-specific MakerNote formats
# Note is the data that comprises this MakerNote. The MakerNote will
# likely have pointers in it that point to other parts of the file. We'll
# use self.offset as the starting point for most of those pointers, since
# they are relative to the beginning of the file.
#
# If the MakerNote is in a newer format, it may use relative addressing
# within the MakerNote. In that case we'll use relative addresses for the
# pointers.
#
# As an aside: it's not just to be annoying that the manufacturers use
# relative offsets. It's so that if the makernote has to be moved by the
# picture software all of the offsets don't have to be adjusted. Overall,
# this is probably the right strategy for makernotes, though the spec is
# ambiguous. (The spec does not appear to imagine that makernotes would
# follow EXIF format internally. Once they did, it's ambiguous whether
# the offsets should be from the header at the start of all the EXIF info,
# or from the header at the start of the makernote.)
def decode_maker_note(self):
note=self.tags['EXIF MakerNote']
make=self.tags['Image Make'].printable
model=self.tags['Image Model'].printable
# Nikon
# The maker note usually starts with the word Nikon, followed by the
# type of the makernote (1 or 2, as a short). If the word Nikon is
# not at the start of the makernote, it's probably type 2, since some
# cameras work that way.
if make in ('NIKON', 'NIKON CORPORATION'):
if note.values[0:7] == [78, 105, 107, 111, 110, 00, 0o1]:
if self.debug:
print("Looks like a type 1 Nikon MakerNote.")
self.dump_IFD(note.field_offset+8, 'MakerNote',
dict=MAKERNOTE_NIKON_OLDER_TAGS)
elif note.values[0:7] == [78, 105, 107, 111, 110, 00, 0o2]:
if self.debug:
print("Looks like a labeled type 2 Nikon MakerNote")
if note.values[12:14] != [0, 42] and note.values[12:14] != [42, 0]:
raise ValueError("Missing marker tag '42' in MakerNote.")
# skip the Makernote label and the TIFF header
self.dump_IFD(note.field_offset+10+8, 'MakerNote',
dict=MAKERNOTE_NIKON_NEWER_TAGS, relative=1)
else:
# E99x or D1
if self.debug:
print("Looks like an unlabeled type 2 Nikon MakerNote")
self.dump_IFD(note.field_offset, 'MakerNote',
dict=MAKERNOTE_NIKON_NEWER_TAGS)
return
# Olympus
if make[:7] == 'OLYMPUS':
self.dump_IFD(note.field_offset+8, 'MakerNote',
dict=MAKERNOTE_OLYMPUS_TAGS)
return
# Casio
if make == 'Casio':
self.dump_IFD(note.field_offset, 'MakerNote',
dict=MAKERNOTE_CASIO_TAGS)
return
# Fujifilm
if make == 'FUJIFILM':
# bug: everything else is "Motorola" endian, but the MakerNote
# is "Intel" endian
endian=self.endian
self.endian='I'
# bug: IFD offsets are from beginning of MakerNote, not
# beginning of file header
offset=self.offset
self.offset+=note.field_offset
# process note with bogus values (note is actually at offset 12)
self.dump_IFD(12, 'MakerNote', dict=MAKERNOTE_FUJIFILM_TAGS)
# reset to correct values
self.endian=endian
self.offset=offset
return
# Canon
if make == 'Canon':
self.dump_IFD(note.field_offset, 'MakerNote',
dict=MAKERNOTE_CANON_TAGS)
for i in (('MakerNote Tag 0x0001', MAKERNOTE_CANON_TAG_0x001),
('MakerNote Tag 0x0004', MAKERNOTE_CANON_TAG_0x004)):
self.canon_decode_tag(self.tags[i[0]].values, i[1])
return
# decode Canon MakerNote tag based on offset within tag
# see http://www.burren.cx/david/canon.html by David Burren
def canon_decode_tag(self, value, dict):
for i in range(1, len(value)):
x=dict.get(i, ('Unknown', ))
if self.debug:
print(i, x)
name=x[0]
if len(x) > 1:
val=x[1].get(value[i], 'Unknown')
else:
val=value[i]
# it's not a real IFD Tag but we fake one to make everybody
# happy. this will have a "proprietary" type
self.tags['MakerNote '+name]=IFD_Tag(str(val), None, 0, None,
None, None)
# process an image file (expects an open file object)
# this is the function that has to deal with all the arbitrary nasty bits
# of the EXIF standard
def process_file(file, debug=0):
# determine whether it's a JPEG or TIFF
data=file.read(12)
if data[0:4] in ['II*\x00', 'MM\x00*']:
# it's a TIFF file
file.seek(0)
endian=file.read(1)
file.read(1)
offset=0
elif data[0:2] == '\xFF\xD8':
# it's a JPEG file
# skip JFIF style header(s)
fake_exif=0
while data[2] == '\xFF' and data[6:10] in ('JFIF', 'JFXX', 'OLYM'):
length=ord(data[4])*256+ord(data[5])
file.read(length-8)
# fake an EXIF beginning of file
data='\xFF\x00'+file.read(10)
fake_exif=1
if data[2] == '\xFF' and data[6:10] == 'Exif':
# detected EXIF header
offset=file.tell()
endian=file.read(1)
else:
# no EXIF information
return {}
else:
# file format not recognized
return {}
# deal with the EXIF info we found
if debug:
print({'I': 'Intel', 'M': 'Motorola'}[endian], 'format')
hdr=EXIF_header(file, endian, offset, fake_exif, debug)
ifd_list=hdr.list_IFDs()
ctr=0
for i in ifd_list:
if ctr == 0:
IFD_name='Image'
elif ctr == 1:
IFD_name='Thumbnail'
thumb_ifd=i
else:
IFD_name='IFD %d' % ctr
if debug:
print(' IFD %d (%s) at offset %d:' % (ctr, IFD_name, i))
hdr.dump_IFD(i, IFD_name)
# EXIF IFD
exif_off=hdr.tags.get(IFD_name+' ExifOffset')
if exif_off:
if debug:
print(' EXIF SubIFD at offset %d:' % exif_off.values[0])
hdr.dump_IFD(exif_off.values[0], 'EXIF')
# Interoperability IFD contained in EXIF IFD
intr_off=hdr.tags.get('EXIF SubIFD InteroperabilityOffset')
if intr_off:
if debug:
print(' EXIF Interoperability SubSubIFD at offset %d:' \
% intr_off.values[0])
hdr.dump_IFD(intr_off.values[0], 'EXIF Interoperability',
dict=INTR_TAGS)
# GPS IFD
gps_off=hdr.tags.get(IFD_name+' GPSInfo')
if gps_off:
if debug:
print(' GPS SubIFD at offset %d:' % gps_off.values[0])
hdr.dump_IFD(gps_off.values[0], 'GPS', dict=GPS_TAGS)
ctr+=1
# extract uncompressed TIFF thumbnail
thumb=hdr.tags.get('Thumbnail Compression')
if thumb and thumb.printable == 'Uncompressed TIFF':
hdr.extract_TIFF_thumbnail(thumb_ifd)
# JPEG thumbnail (thankfully the JPEG data is stored as a unit)
thumb_off=hdr.tags.get('Thumbnail JPEGInterchangeFormat')
if thumb_off:
file.seek(offset+thumb_off.values[0])
size=hdr.tags['Thumbnail JPEGInterchangeFormatLength'].values[0]
hdr.tags['JPEGThumbnail']=file.read(size)
# deal with MakerNote contained in EXIF IFD
if 'EXIF MakerNote' in hdr.tags:
hdr.decode_maker_note()
# Sometimes in a TIFF file, a JPEG thumbnail is hidden in the MakerNote
# since it's not allowed in a uncompressed TIFF IFD
if 'JPEGThumbnail' not in hdr.tags:
thumb_off=hdr.tags.get('MakerNote JPEGThumbnail')
if thumb_off:
file.seek(offset+thumb_off.values[0])
hdr.tags['JPEGThumbnail']=file.read(thumb_off.field_length)
return hdr.tags
# library test/debug function (dump given files)
if __name__ == '__main__':
import sys
if len(sys.argv) < 2:
print('Usage: %s files...\n' % sys.argv[0])
sys.exit(0)
for filename in sys.argv[1:]:
try:
file=open(filename, 'rb')
except:
print(filename, 'unreadable')
print()
continue
print(filename+':')
# data=process_file(file, 1) # with debug info
data=process_file(file)
if not data:
print('No EXIF information found')
continue
x=list(data.keys())
x.sort()
for i in x:
if i in ('JPEGThumbnail', 'TIFFThumbnail'):
continue
try:
print(' %s (%s): %s' % \
(i, FIELD_TYPES[data[i].field_type][2], data[i].printable))
except:
print('error', i, '"', data[i], '"')
if 'JPEGThumbnail' in data:
print('File has JPEG thumbnail')
print()
|
import pandas as pd
from gensim import models
from janome.tokenizer import Tokenizer
from janome.analyzer import Analyzer
from janome.charfilter import *
from janome.tokenfilter import *
import neologdn
import re
def split_into_words(text, tokenizer):
# tokens = tokenizer.tokenize(text)
normalized_text = neologdn.normalize(text)
normalized_text = re.sub(r'[!-/:-@[-`{-~]', r' ', normalized_text)
tokens = [token for token in tokenizer.analyze(normalized_text)]
ret = []
for idx in range(len(tokens)):
token = tokens[idx]
# print(token)
if idx+1 == len(tokens):
if parts[0] == '名詞' and parts[1] != '接尾' and parts[1] != '副詞可能':
ret.append(token.base_form)
elif parts[0] == '名詞': continue
else:
ret.append(token.base_form)
break
post_token = tokens[idx+1]
parts = token.part_of_speech.split(',')
post_parts = post_token.part_of_speech.split(',')
if parts[0] == '名詞':
if parts[1] == '一般' and post_parts[0] == '名詞' and post_parts[1] == '接尾':
ret.append(token.base_form + post_token.base_form)
elif parts[1] == '一般':
ret.append(token.base_form)
elif parts[1] == '接尾': continue
elif parts[1] == '副詞可能': continue
else:
ret.append(token.base_form)
else:
ret.append(token.base_form)
return ret
if __name__ == '__main__':
tokenizer = Tokenizer(mmap=True)
char_filters = [UnicodeNormalizeCharFilter()]
token_filters = [POSStopFilter(['記号','助詞']), LowerCaseFilter()]
analyzer = Analyzer(char_filters, tokenizer, token_filters)
model_path = './models/doc2vec.model'
delim = '_'
programs = pd.read_pickle('data/example/programs.pkl')
p_text = {'prog_%d'%key: programs[key]['text'] for key in programs.keys()}
creatives = pd.read_pickle('data/example/creatives.pkl')
c_text = {'crea_%d'%key: (creatives[key]['text'], creatives[key]['creative_category']) for key in creatives.keys()}
p_id = list(p_text.keys())[2]
print(p_id)
# print(split_into_words(p_text[p_id], analyzer))
print(p_text[p_id])
# print(p_text[p_id][1])
print()
print()
print()
model = models.Doc2Vec.load(model_path)
syms = model.docvecs.most_similar(p_id, topn=15)
# print(model.docvecs[0])
for s_id in syms:
prefix = s_id[0].split(delim)[0]
if prefix == 'prog':
continue
# print(s_id[1])
# print(p_text[s_id[0]])
# print(p_text[s_id[0]][1])
else:
print(s_id[1])
print(c_text[s_id[0]][0])
print(c_text[s_id[0]][1])
# print(split_into_words(c_text[s_id[0]], analyzer))
print()
|
import sys
from cx_Freeze import setup, Executable
import os
import shutil
from distutils.dir_util import copy_tree
def abs_path_maker(path):
return os.path.join(os.getcwd(), path)
if os.path.exists(abs_path_maker("build")):
print("deleting old builds...", end="")
shutil.rmtree(abs_path_maker("build"))
print("done")
if sys.platform == 'win32':
base = 'Win32GUI'
if sys.platform == "win32" : base = "Win32GUI"
exe = Executable(script = "main.py", base= base)
setup(name = "photo_slider",
version = '0.1',
description = 'photo slider',
options={"build_exe":{"includes": [], "excludes":["PyQt4", "PyQt5", "scipy", "numpy", "tkinter", "win32com", "distutils"], "packages":[]}},
executables = [exe])
shutil.copytree(abs_path_maker("files"), abs_path_maker("build\\exe.win-amd64-3.8\\files"))
pygame_example_path = abs_path_maker("build\\exe.win-amd64-3.8\\lib\\pygame\\examples")
print("deleting" + pygame_example_path + "...", end="")
shutil.rmtree(pygame_example_path)
print("done")
copy_from = abs_path_maker("build\\exe.win-amd64-3.8")
copy_to = abs_path_maker("build")
print("copying" + copy_from + " -> " + copy_to)
copy_tree(copy_from, copy_to)
shutil.rmtree(abs_path_maker("build\\exe.win-amd64-3.8"))
print("build complete")
|
from flask import Flask, jsonify, abort
import json
from random import randint
from operator import itemgetter
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy.sql import func
app = Flask(__name__, instance_relative_config=True)
app.config.from_pyfile('config.py', silent=True)
db = SQLAlchemy(app)
from models import MemberOfParliament
def send_api_response(data_dict):
response = jsonify(data_dict)
response.headers['Access-Control-Allow-Origin'] = "*"
return response
@app.route('/')
def route():
links = [
'<a href="/get_member/male/">/get_member/male/</a>',
'<a href="/get_member/female/">/get_member/female/</a>',
'<a href="/hot/pamela-tshwete/">/hot/pamela-tshwete/</a>',
'<a href="/not/pamela-tshwete/">/not/pamela-tshwete/</a>',
'<a href="/ranking/">/ranking/</a>'
]
return "<br>".join(links)
@app.route('/get_member/<gender>/')
def get_member(gender):
"""
Return the details of a randomly selected member of parliament.
"""
if not gender.lower() in ["male", "female"]:
abort(400)
gender_key = "M"
if gender.lower() == "female":
gender_key = "F"
mp = MemberOfParliament.query.filter_by(gender=gender_key).order_by(func.random()).first()
return send_api_response(mp.as_dict())
@app.route('/hot/<mp_key>/')
def hot(mp_key):
"""
Increment the score for an MP.
"""
try:
mp = MemberOfParliament.query.filter_by(key=mp_key).first()
mp.score += 1
except AttributeError:
abort(404)
db.session.add(mp)
db.session.commit()
return send_api_response(mp.as_dict())
@app.route('/not/<mp_key>/')
def not_hot(mp_key):
"""
Decrement the score for an MP.
"""
try:
mp = MemberOfParliament.query.filter_by(key=mp_key).first()
mp.score -= 1
except AttributeError:
abort(404)
db.session.add(mp)
db.session.commit()
return send_api_response(mp.as_dict())
@app.route('/ranking/')
def ranking():
"""
Return the 10 highest ranked MP's of each gender.
"""
top_males = []
top_females = []
males = MemberOfParliament.query.filter_by(gender="M").order_by(MemberOfParliament.score.desc()).limit(10).all()
for mp in males:
top_males.append(mp.as_dict())
females = MemberOfParliament.query.filter_by(gender="F").order_by(MemberOfParliament.score.desc()).limit(10).all()
for mp in females:
top_females.append(mp.as_dict())
return send_api_response({"male": top_males, "female": top_females})
|
from django.contrib.auth.decorators import login_required
login_decorator = login_required(login_url='/', redirect_field_name=None)
|
import os
import pystac
from pystac.layout import BestPracticesLayoutStrategy
from pystac.utils import (is_absolute_href, make_relative_href)
from shapely.geometry import shape, mapping
from stactools.core.copy import (move_asset_file_to_item, move_assets as
do_move_assets)
def merge_items(source_item,
target_item,
move_assets=False,
ignore_conflicts=False):
"""Merges the assets from source_item into target_item.
The geometry and bounding box of the items will also be merged.
Args:
source_item (pystac.Item): The Item that will be merged into target_item.
This item is not mutated in this operation.
target_item (pystac.Item): The target item that will be merged into.
This item will be mutated in this operation.
move_assets (bool): If true, move the asset files alongside the target item.
ignore_conflicts (bool): If True, assets with the same keys will not be merged,
and asset files that would be moved to overwrite an existing file
will not be moved. If False, either of these situations will throw an error.
"""
target_item_href = target_item.get_self_href()
for key, asset in source_item.assets.items():
if key in target_item.assets:
if ignore_conflicts:
continue
else:
raise Exception(
'Target item {} already has asset with key {}, '
'cannot merge asset in from {}'.format(
target_item, key, source_item))
else:
if move_assets:
asset_href = asset.get_absolute_href()
new_asset_href = move_asset_file_to_item(
target_item, asset_href, ignore_conflicts=ignore_conflicts)
else:
asset_href = asset.get_absolute_href()
if not is_absolute_href(asset.href):
asset_href = make_relative_href(asset_href,
target_item_href)
new_asset_href = asset_href
new_asset = asset.clone()
new_asset.href = new_asset_href
target_item.add_asset(key, new_asset)
source_geom = shape(source_item.geometry)
target_geom = shape(target_item.geometry)
union_geom = source_geom.union(target_geom).buffer(0)
target_item.geometry = mapping(union_geom)
target_item.bbox = list(union_geom.bounds)
def merge_all_items(source_catalog,
target_catalog,
move_assets=False,
ignore_conflicts=False):
"""Merge all items from source_catalog into target_catalog.
Calls merge_items on any items that have the same ID between the two catalogs.
Any items that don't exist in the taret_catalog will be added to the target_catalog.
If the target_catalog is a Collection, it will be set as the collection of any
new items.
Args:
source_catalog (Catalog or Colletion): The catalog or collection that items
will be drawn from to merge into the target catalog.
This catalog is not mutated in this operation.
target_item (Catalog or Colletion): The target catalog that will be merged into.
This catalog will not be mutated in this operation.
move_assets (bool): If true, move the asset files alongside the target item.
ignore_conflicts (bool): If True, assets with the same keys will not be merged,
and asset files that would be moved to overwrite an existing file
will not be moved. If False, either of these situations will throw an error.
Returns:
Catalog or Colletion: The target_catalog
"""
source_items = source_catalog.get_all_items()
ids_to_items = {item.id: item for item in source_items}
for item in target_catalog.get_all_items():
source_item = ids_to_items.get(item.id)
if source_item is not None:
merge_items(source_item,
item,
move_assets=move_assets,
ignore_conflicts=ignore_conflicts)
del ids_to_items[item.id]
# Process source items that did not match existing target items
layout_strategy = BestPracticesLayoutStrategy()
parent_dir = os.path.dirname(target_catalog.get_self_href())
for item in ids_to_items.values():
item_copy = item.clone()
item_copy.set_self_href(
layout_strategy.get_item_href(item_copy, parent_dir))
target_catalog.add_item(item_copy)
if isinstance(target_catalog, pystac.Collection):
item_copy.set_collection(target_catalog)
else:
item_copy.set_collection(None)
if move_assets:
do_move_assets(item_copy, copy=False)
if target_catalog.STAC_OBJECT_TYPE == pystac.STACObjectType.COLLECTION:
target_catalog.update_extent_from_items()
return target_catalog
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import sys
import warnings
def total_seconds(td): # pragma: no cover
return td.total_seconds()
def is_timestamp(value):
if type(value) == bool:
return False
try:
float(value)
return True
except Exception:
return False
# Python 2.7 / 3.0+ definitions for isstr function.
try: # pragma: no cover
basestring
def isstr(s):
return isinstance(s, basestring) # noqa: F821
except NameError: # pragma: no cover
def isstr(s):
return isinstance(s, str)
class list_to_iter_shim(list):
""" A temporary shim for functions that currently return a list but that will, after a
deprecation period, return an iteratator.
"""
def __init__(self, iterable=(), **kwargs):
""" Equivalent to list(iterable). warn_text will be emitted on all non-iterator operations.
"""
self._warn_text = (
kwargs.pop("warn_text", None)
or "this object will be converted to an iterator in a future release"
)
self._iter_count = 0
list.__init__(self, iterable, **kwargs)
def _warn(self):
warnings.warn(self._warn_text, DeprecationWarning)
def __iter__(self):
self._iter_count += 1
if self._iter_count > 1:
self._warn()
return list.__iter__(self)
def _wrap_method(name):
list_func = getattr(list, name)
def wrapper(self, *args, **kwargs):
self._warn()
return list_func(self, *args, **kwargs)
return wrapper
__contains__ = _wrap_method("__contains__")
__add__ = _wrap_method("__add__")
__mul__ = _wrap_method("__mul__")
__getitem__ = _wrap_method("__getitem__")
# Ideally, we would throw warnings from __len__, but list(x) calls len(x)
index = _wrap_method("index")
count = _wrap_method("count")
__setitem__ = _wrap_method("__setitem__")
__delitem__ = _wrap_method("__delitem__")
append = _wrap_method("append")
if sys.version_info.major >= 3: # pragma: no cover
clear = _wrap_method("clear")
copy = _wrap_method("copy")
extend = _wrap_method("extend")
__iadd__ = _wrap_method("__iadd__")
__imul__ = _wrap_method("__imul__")
insert = _wrap_method("insert")
pop = _wrap_method("pop")
remove = _wrap_method("remove")
reverse = _wrap_method("reverse")
sort = _wrap_method("sort")
del _wrap_method
__all__ = ["total_seconds", "is_timestamp", "isstr", "list_to_iter_shim"]
|
'''
Autor: Raphael Nascimento
ID: Nask
Objetivo: Pegar o Texto formatado de um .txt e reescreve-lo no formato docx
'''
import docx
class Docx(object):
def __init__(self, caminho, diretorio, content):
self.caminho = caminho
self.diretorio = diretorio
self.content = content
self.lista = []
def contLines(self): # Itero o arquivo para poder saber o numero de linhas
arq = open(self.caminho, 'r')
linhas = len(arq.readlines())
arq.close()
return linhas
def read(self):
arquivo = open(self.caminho, 'r')
linhas = self.contLines()
for i in range(linhas):
self.lista.append(str(arquivo.readline()))
arquivo.close()
def docx(self):
documento = docx.Document()
style = documento.styles['Normal']
font = style.font
font.name = 'Arial'
font.size = docx.shared.Pt(11)
documento.add_heading(self.content['title'], 0)
for i in self.lista:
if 7 >= len(i) > 0:
documento.add_heading(i, level=0)
elif 10 >= len(i) > 0:
documento.add_heading(i, level=1)
elif 20 >= len(i) > 0:
documento.add_heading(i, level=2)
elif 35 >= len(i) > 0:
documento.add_heading(i, level=3)
else:
documento.add_paragraph(i)
documento.save(self.diretorio + '/' + self.content['title'] + '.docx')
def chamadas(self):
self.read()
self.docx()
|
#!/usr/bin/env python
import os
import json
import torch
import numpy as np
import queue
import pprint
import random
import argparse
import importlib
import threading
import traceback
from tqdm import tqdm
from utils import stdout_to_tqdm
from config import system_configs
from nnet.py_factory import NetworkFactory
from torch.multiprocessing import Process, Queue, Pool
from db.datasets import datasets
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
def parse_args():
parser = argparse.ArgumentParser(description="Train CenterNet")
parser.add_argument("cfg_file", help="config file", type=str)
parser.add_argument("--iter", dest="start_iter",
help="train at iteration i",
default=0, type=int)
parser.add_argument("--threads", dest="threads", default=4, type=int)
#args = parser.parse_args()
args, unparsed = parser.parse_known_args()
return args
def prefetch_data(db, queue, sample_data, data_aug):
ind = 0
print("start prefetching data...")
np.random.seed(os.getpid())
while True:
try:
data, ind = sample_data(db, ind, data_aug=data_aug)
queue.put(data)
except Exception as e:
traceback.print_exc()
raise e
def pin_memory(data_queue, pinned_data_queue, sema):
while True:
data = data_queue.get()
data["xs"] = [x.pin_memory() for x in data["xs"]]
data["ys"] = [y.pin_memory() for y in data["ys"]]
pinned_data_queue.put(data)
if sema.acquire(blocking=False):
return
def init_parallel_jobs(dbs, queue, fn, data_aug):
tasks = [Process(target=prefetch_data, args=(db, queue, fn, data_aug)) for db in dbs]
for task in tasks:
task.daemon = True
task.start()
return tasks
def train(training_dbs, validation_db, start_iter=0):
learning_rate = system_configs.learning_rate
max_iteration = system_configs.max_iter
pretrained_model = system_configs.pretrain
snapshot = system_configs.snapshot
val_iter = system_configs.val_iter
display = system_configs.display
decay_rate = system_configs.decay_rate
stepsize = system_configs.stepsize
# getting the size of each database
training_size = len(training_dbs[0].db_inds)
validation_size = len(validation_db.db_inds)
# queues storing data for training
training_queue = Queue(system_configs.prefetch_size)
validation_queue = Queue(5)
# queues storing pinned data for training
pinned_training_queue = queue.Queue(system_configs.prefetch_size)
pinned_validation_queue = queue.Queue(5)
# load data sampling function
data_file = "sample.{}".format(training_dbs[0].data)
sample_data = importlib.import_module(data_file).sample_data
# allocating resources for parallel reading
training_tasks = init_parallel_jobs(training_dbs, training_queue, sample_data, True)
if val_iter:
validation_tasks = init_parallel_jobs([validation_db], validation_queue, sample_data, False)
training_pin_semaphore = threading.Semaphore()
validation_pin_semaphore = threading.Semaphore()
training_pin_semaphore.acquire()
validation_pin_semaphore.acquire()
training_pin_args = (training_queue, pinned_training_queue, training_pin_semaphore)
training_pin_thread = threading.Thread(target=pin_memory, args=training_pin_args)
training_pin_thread.daemon = True
training_pin_thread.start()
validation_pin_args = (validation_queue, pinned_validation_queue, validation_pin_semaphore)
validation_pin_thread = threading.Thread(target=pin_memory, args=validation_pin_args)
validation_pin_thread.daemon = True
validation_pin_thread.start()
print("building model...")
nnet = NetworkFactory(training_dbs[0])
if pretrained_model is not None:
if not os.path.exists(pretrained_model):
raise ValueError("pretrained model does not exist")
print("loading from pretrained model")
nnet.load_pretrained_params(pretrained_model)
if start_iter:
learning_rate /= (decay_rate ** (start_iter // stepsize))
nnet.load_params(start_iter)
nnet.set_lr(learning_rate)
print("training starts from iteration {} with learning_rate {}".format(start_iter + 1, learning_rate))
else:
nnet.set_lr(learning_rate)
print("training start...")
nnet.cuda()
nnet.train_mode()
with stdout_to_tqdm() as save_stdout:
for iteration in tqdm(range(start_iter + 1, max_iteration + 1), file=save_stdout, ncols=80):
training = pinned_training_queue.get(block=True)
training_loss, focal_loss, pull_loss, push_loss, regr_loss = nnet.train(**training)
#training_loss, focal_loss, pull_loss, push_loss, regr_loss, cls_loss = nnet.train(**training)
if display and iteration % display == 0:
print("training loss at iteration {}: {}".format(iteration, training_loss.item()))
print("focal loss at iteration {}: {}".format(iteration, focal_loss.item()))
print("pull loss at iteration {}: {}".format(iteration, pull_loss.item()))
print("push loss at iteration {}: {}".format(iteration, push_loss.item()))
print("regr loss at iteration {}: {}".format(iteration, regr_loss.item()))
#print("cls loss at iteration {}: {}\n".format(iteration, cls_loss.item()))
del training_loss, focal_loss, pull_loss, push_loss, regr_loss#, cls_loss
if val_iter and validation_db.db_inds.size and iteration % val_iter == 0:
nnet.eval_mode()
validation = pinned_validation_queue.get(block=True)
validation_loss = nnet.validate(**validation)
print("validation loss at iteration {}: {}".format(iteration, validation_loss.item()))
nnet.train_mode()
if iteration % snapshot == 0:
nnet.save_params(iteration)
if iteration % stepsize == 0:
learning_rate /= decay_rate
nnet.set_lr(learning_rate)
# sending signal to kill the thread
training_pin_semaphore.release()
validation_pin_semaphore.release()
# terminating data fetching processes
for training_task in training_tasks:
training_task.terminate()
for validation_task in validation_tasks:
validation_task.terminate()
if __name__ == "__main__":
args = parse_args()
cfg_file = os.path.join(system_configs.config_dir, args.cfg_file + ".json")
with open(cfg_file, "r") as f:
configs = json.load(f)
configs["system"]["snapshot_name"] = args.cfg_file
system_configs.update_config(configs["system"])
train_split = system_configs.train_split
val_split = system_configs.val_split
print("loading all datasets...")
dataset = system_configs.dataset
# threads = max(torch.cuda.device_count() * 2, 4)
threads = args.threads
print("using {} threads".format(threads))
training_dbs = [datasets[dataset](configs["db"], train_split) for _ in range(threads)]
validation_db = datasets[dataset](configs["db"], val_split)
print("system config...")
pprint.pprint(system_configs.full)
print("db config...")
pprint.pprint(training_dbs[0].configs)
print("len of db: {}".format(len(training_dbs[0].db_inds)))
train(training_dbs, validation_db, args.start_iter)
|
#!/usr/bin/env python
# MIT License
# (c) baltasar 2017
from google.appengine.ext import ndb
class Story(ndb.Model):
added = ndb.DateProperty(auto_now_add=True)
user = ndb.StringProperty(required=True, indexed=True)
title = ndb.StringProperty(required=True, indexed=True)
subtitle = ndb.StringProperty(required=True, indexed=True)
summary = ndb.TextProperty()
@ndb.transactional
def update(story):
"""Updates a story.
:param story: The story to update.
:return: The key of the story.
"""
return story.put()
|
# Copyright (c) 2005-2007 The Regents of The University of Michigan
# Copyright (c) 2011 Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
# Rick Strong
from m5.SimObject import SimObject
from m5.defines import buildEnv
from m5.params import *
from m5.proxy import *
from SimpleMemory import *
class MemoryMode(Enum): vals = ['invalid', 'atomic', 'timing',
'atomic_noncaching']
class System(MemObject):
type = 'System'
cxx_header = "sim/system.hh"
system_port = MasterPort("System port")
# Override the clock from the ClockedObject which looks at the
# parent clock by default. The 1 GHz default system clock serves
# as a start for the modules that rely on the parent to provide
# the clock.
clock = '1GHz'
@classmethod
def export_method_cxx_predecls(cls, code):
code('#include "sim/system.hh"')
@classmethod
def export_methods(cls, code):
code('''
Enums::MemoryMode getMemoryMode() const;
void setMemoryMode(Enums::MemoryMode mode);
''')
memories = VectorParam.AbstractMemory(Self.all,
"All memories in the system")
mem_mode = Param.MemoryMode('atomic', "The mode the memory system is in")
# The memory ranges are to be populated when creating the system
# such that these can be passed from the I/O subsystem through an
# I/O bridge or cache
mem_ranges = VectorParam.AddrRange([], "Ranges that constitute main memory")
work_item_id = Param.Int(-1, "specific work item id")
num_work_ids = Param.Int(16, "Number of distinct work item types")
work_begin_cpu_id_exit = Param.Int(-1,
"work started on specific id, now exit simulation")
work_begin_ckpt_count = Param.Counter(0,
"create checkpoint when work items begin count value is reached")
work_begin_exit_count = Param.Counter(0,
"exit simulation when work items begin count value is reached")
work_end_ckpt_count = Param.Counter(0,
"create checkpoint when work items end count value is reached")
work_end_exit_count = Param.Counter(0,
"exit simulation when work items end count value is reached")
work_cpus_ckpt_count = Param.Counter(0,
"create checkpoint when active cpu count value is reached")
init_param = Param.UInt64(0, "numerical value to pass into simulator")
boot_osflags = Param.String("a", "boot flags to pass to the kernel")
kernel = Param.String("", "file that contains the kernel code")
readfile = Param.String("", "file to read startup script from")
symbolfile = Param.String("", "file to get the symbols from")
load_addr_mask = Param.UInt64(0xffffffffff,
"Address to mask loading binaries with");
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import operator
import os
import platform
import sys
from setuptools.extern.pyparsing import ParseException, ParseResults, stringStart, stringEnd
from setuptools.extern.pyparsing import ZeroOrMore, Group, Forward, QuotedString
from setuptools.extern.pyparsing import Literal as L # noqa
from ._compat import string_types
from .specifiers import Specifier, InvalidSpecifier
__all__ = [
"InvalidMarker", "UndefinedComparison", "UndefinedEnvironmentName",
"Marker", "default_environment",
]
class InvalidMarker(ValueError):
"""
An invalid marker was found, users should refer to PEP 508.
"""
class UndefinedComparison(ValueError):
"""
An invalid operation was attempted on a value that doesn't support it.
"""
class UndefinedEnvironmentName(ValueError):
"""
A name was attempted to be used that does not exist inside of the
environment.
"""
class Node(object):
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
def __repr__(self):
return "<{0}({1!r})>".format(self.__class__.__name__, str(self))
def serialize(self):
raise NotImplementedError
class Variable(Node):
def serialize(self):
return str(self)
class Value(Node):
def serialize(self):
return '"{0}"'.format(self)
class Op(Node):
def serialize(self):
return str(self)
VARIABLE = (
L("implementation_version") |
L("platform_python_implementation") |
L("implementation_name") |
L("python_full_version") |
L("platform_release") |
L("platform_version") |
L("platform_machine") |
L("platform_system") |
L("python_version") |
L("sys_platform") |
L("os_name") |
L("os.name") | # PEP-345
L("sys.platform") | # PEP-345
L("platform.version") | # PEP-345
L("platform.machine") | # PEP-345
L("platform.python_implementation") | # PEP-345
L("python_implementation") | # undocumented setuptools legacy
L("extra")
)
ALIASES = {
'os.name': 'os_name',
'sys.platform': 'sys_platform',
'platform.version': 'platform_version',
'platform.machine': 'platform_machine',
'platform.python_implementation': 'platform_python_implementation',
'python_implementation': 'platform_python_implementation'
}
VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0])))
VERSION_CMP = (
L("===") |
L("==") |
L(">=") |
L("<=") |
L("!=") |
L("~=") |
L(">") |
L("<")
)
MARKER_OP = VERSION_CMP | L("not in") | L("in")
MARKER_OP.setParseAction(lambda s, l, t: Op(t[0]))
MARKER_VALUE = QuotedString("'") | QuotedString('"')
MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0]))
BOOLOP = L("and") | L("or")
MARKER_VAR = VARIABLE | MARKER_VALUE
MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR)
MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0]))
LPAREN = L("(").suppress()
RPAREN = L(")").suppress()
MARKER_EXPR = Forward()
MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN)
MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR)
MARKER = stringStart + MARKER_EXPR + stringEnd
def _coerce_parse_result(results):
if isinstance(results, ParseResults):
return [_coerce_parse_result(i) for i in results]
else:
return results
def _format_marker(marker, first=True):
assert isinstance(marker, (list, tuple, string_types))
# Sometimes we have a structure like [[...]] which is a single item list
# where the single item is itself it's own list. In that case we want skip
# the rest of this function so that we don't get extraneous () on the
# outside.
if (isinstance(marker, list) and len(marker) == 1 and
isinstance(marker[0], (list, tuple))):
return _format_marker(marker[0])
if isinstance(marker, list):
inner = (_format_marker(m, first=False) for m in marker)
if first:
return " ".join(inner)
else:
return "(" + " ".join(inner) + ")"
elif isinstance(marker, tuple):
return " ".join([m.serialize() for m in marker])
else:
return marker
_operators = {
"in": lambda lhs, rhs: lhs in rhs,
"not in": lambda lhs, rhs: lhs not in rhs,
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def _eval_op(lhs, op, rhs):
try:
spec = Specifier("".join([op.serialize(), rhs]))
except InvalidSpecifier:
pass
else:
return spec.contains(lhs)
oper = _operators.get(op.serialize())
if oper is None:
raise UndefinedComparison(
"Undefined {0!r} on {1!r} and {2!r}.".format(op, lhs, rhs)
)
return oper(lhs, rhs)
_undefined = object()
def _get_env(environment, name):
value = environment.get(name, _undefined)
if value is _undefined:
raise UndefinedEnvironmentName(
"{0!r} does not exist in evaluation environment.".format(name)
)
return value
def _evaluate_markers(markers, environment):
groups = [[]]
for marker in markers:
assert isinstance(marker, (list, tuple, string_types))
if isinstance(marker, list):
groups[-1].append(_evaluate_markers(marker, environment))
elif isinstance(marker, tuple):
lhs, op, rhs = marker
if isinstance(lhs, Variable):
lhs_value = _get_env(environment, lhs.value)
rhs_value = rhs.value
else:
lhs_value = lhs.value
rhs_value = _get_env(environment, rhs.value)
groups[-1].append(_eval_op(lhs_value, op, rhs_value))
else:
assert marker in ["and", "or"]
if marker == "or":
groups.append([])
return any(all(item) for item in groups)
def format_full_version(info):
version = '{0.major}.{0.minor}.{0.micro}'.format(info)
kind = info.releaselevel
if kind != 'final':
version += kind[0] + str(info.serial)
return version
def default_environment():
if hasattr(sys, 'implementation'):
iver = format_full_version(sys.implementation.version)
implementation_name = sys.implementation.name
else:
iver = '0'
implementation_name = ''
return {
"implementation_name": implementation_name,
"implementation_version": iver,
"os_name": os.name,
"platform_machine": platform.machine(),
"platform_release": platform.release(),
"platform_system": platform.system(),
"platform_version": platform.version(),
"python_full_version": platform.python_version(),
"platform_python_implementation": platform.python_implementation(),
"python_version": platform.python_version()[:3],
"sys_platform": sys.platform,
}
class Marker(object):
def __init__(self, marker):
try:
self._markers = _coerce_parse_result(MARKER.parseString(marker))
except ParseException as e:
err_str = "Invalid marker: {0!r}, parse error at {1!r}".format(
marker, marker[e.loc:e.loc + 8])
raise InvalidMarker(err_str)
def __str__(self):
return _format_marker(self._markers)
def __repr__(self):
return "<Marker({0!r})>".format(str(self))
def evaluate(self, environment=None):
"""Evaluate a marker.
Return the boolean from evaluating the given marker against the
environment. environment is an optional argument to override all or
part of the determined environment.
The environment is determined from the current Python process.
"""
current_environment = default_environment()
if environment is not None:
current_environment.update(environment)
return _evaluate_markers(self._markers, current_environment)
|
name = "BackpackTF"
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 22 10:27:03 2018
@author: demiliu
"""
from peakaboo.data_smoothing import earth_smooth_matrix
import numpy as np
import matplotlib.pyplot as plt
def twodcontourplot(tadata_nm, tadata_timedelay, tadata_z_corr):
"""
make contour plot
Args:
tadata_nm: wavelength array
tadata_timedelay: time delay array
tadata_z_corr: matrix of z values
"""
timedelayi, nmi = np.meshgrid(tadata_timedelay, tadata_nm)
# find the maximum and minimum
# these are used for color bar
z_min = np.amin(np.amin(tadata_z_corr, axis=1))
z_max = np.amax(np.amax(tadata_z_corr, axis=1))
return [nmi, timedelayi, z_min, z_max]
def smoothing(nm, time, z):
"""Reduce noise in data, then visualize data before and
after smoothening in contour plot.
Args:
nm: wavelength array, numpy array
time: time array, numpy array
z: data matric, numpy array
Returns:
z_smooth: data after reducing noise, numpy array
"""
# smoothing data
z_smooth = earth_smooth_matrix(nm, z)
# check data shape doesn't change
assert np.shape(z_smooth) == np.shape(z), \
'ShapeError'
# contour plot of original data BEFORE smoothing
original_contour = twodcontourplot(nm, time, z)
nm_contour, time_contour, min_contour, max_contour = original_contour[
0], original_contour[1], original_contour[2], original_contour[3]
fig, (ax1, ax2) = plt.subplots(1, 2, dpi=100)
ax1.set_title('Raw data', fontsize=20, fontweight='bold')
ax1.set_xlabel('Wavelength (nm)', fontsize=20, fontweight='bold')
ax1.set_ylabel('Time delay (ps)', fontsize=20, fontweight='bold')
plt.xlabel('Wavelength (nm)', fontsize=20, fontweight='bold')
ax1.pcolormesh(
nm_contour,
time_contour,
z,
cmap='PiYG',
vmin=min_contour /
2.5,
vmax=max_contour /
10)
# contour plot of data AFTER smoothing
smooth_contour = twodcontourplot(nm, time, z_smooth)
nm_contour, time_contour, min_contour, max_contour = smooth_contour[
0], smooth_contour[1], smooth_contour[2], smooth_contour[3]
ax2.set_title('Smooth data', fontsize=20, fontweight='bold')
ax2.pcolormesh(
nm_contour,
time_contour,
z_smooth,
cmap='PiYG',
vmin=min_contour / 2.5,
vmax=max_contour / 10)
plt.tight_layout(pad=0.25, h_pad=None, w_pad=None, rect=None)
plt.show()
return z_smooth
|
"""
Django settings for config project.
Generated by 'django-admin startproject' using Django 3.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
from decouple import config
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'dy5nd(hc5vgbflj!f6xefqq*q8d(0a*ulr@y(bcmubgvepsy%^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'SlideShow',
'social_django',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = BASE_DIR / 'media'
AUTHENTICATION_BACKENDS = (
'social_core.backends.google.GoogleOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
LOGIN_URL = '/auth/login/google-oauth2/'
LOGIN_REDIRECT_URL = '/'
LOGOUT_REDIRECT_URL = '/'
SOCIAL_AUTH_URL_NAMESPACE = 'social'
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = config('SOCIAL_AUTH_GOOGLE_OAUTH2_KEY')
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = config('SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET')
|
from changes.config import db
from changes.models import SnapshotImage, SnapshotStatus
from changes.testutils import APITestCase
class SnapshotImageDetailsTest(APITestCase):
def test_simple(self):
project = self.create_project()
snapshot = self.create_snapshot(project)
plan = self.create_plan(project)
image = self.create_snapshot_image(snapshot, plan)
path = '/api/0/snapshotimages/{0}/'.format(image.id)
resp = self.client.get(path)
assert resp.status_code == 200
data = self.unserialize(resp)
assert data['id'] == image.id.hex
class UpdateSnapshotImageTest(APITestCase):
def setUp(self):
super(UpdateSnapshotImageTest, self).setUp()
self.project = self.create_project()
self.snapshot = self.create_snapshot(self.project)
self.plan = self.create_plan(self.project)
self.image = self.create_snapshot_image(self.snapshot, self.plan)
self.path = '/api/0/snapshotimages/{0}/'.format(self.image.id)
def test_simple(self):
for status in ('active', 'failed', 'invalidated'):
resp = self.client.post(self.path, data={
'status': status,
})
assert resp.status_code == 200
data = self.unserialize(resp)
assert data['id'] == self.image.id.hex
assert data['status']['id'] == status
db.session.expire(self.image)
image = SnapshotImage.query.get(self.image.id)
assert image.status == SnapshotStatus[status]
def test_invalid_status(self):
resp = self.client.post(self.path, data={
'status': 'invalid_status',
})
assert resp.status_code == 400
|
###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
# https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#resource
# https://cloud.google.com/bigquery/docs/reference/v2/jobs#resource
# https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/list
import re
import sys
import csv
import pprint
import uuid
import json
from time import sleep
from io import StringIO, BytesIO
from datetime import datetime, timedelta
from googleapiclient.errors import HttpError
from googleapiclient.http import MediaIoBaseUpload
from starthinker.config import BUFFER_SCALE
from starthinker.util import flag_last
from starthinker.util.project import project
from starthinker.util.google_api import API_BigQuery, API_Retry
from starthinker.util.csv import row_header_sanitize
BIGQUERY_BUFFERMAX = 4294967296
BIGQUERY_CHUNKSIZE = int(200 * 1024000 * BUFFER_SCALE) # 200 MB * scale in config.py
BIGQUERY_BUFFERSIZE = min(BIGQUERY_CHUNKSIZE * 4, BIGQUERY_BUFFERMAX) # 1 GB * scale in config.py
def bigquery_date(value):
return value.strftime('%Y%m%d')
def query_parameters(query, parameters):
'''
Replace variables in a query string with values.
CAUTION: Possible SQL injection, please check up stream.
query = "SELECT * FROM {project}.{dataset}.Some_Table"
parameters = {'project': 'Test_Project', 'dataset':'Test_dataset'}
print query_parameters(query, parameters)
'''
if not parameters:
return query
elif isinstance(parameters, dict):
return query.format(**parameters)
else:
while '[PARAMETER]' in query:
try:
parameter = parameters.pop(0)
except IndexError:
raise IndexError('BigQuery: Missing PARAMETER values for this query.')
if isinstance(parameter, list) or isinstance(parameter, tuple): parameter = ', '.join([str(p) for p in parameter])
query = query.replace('[PARAMETER]', parameter, 1)
if project.verbose: print('QUERY:', query)
return query
def job_wait(auth, job):
if job:
if project.verbose: print('BIGQUERY JOB WAIT:', job['jobReference']['jobId'])
request = API_BigQuery(auth).jobs().get(
projectId=job['jobReference']['projectId'],
jobId=job['jobReference']['jobId']
)
while True:
sleep(5)
if project.verbose: print('.', end='')
sys.stdout.flush()
result = API_Retry(request)
if 'errorResult' in result['status']:
errors = ' '.join([e['message'] for e in result['status']['errors']])
raise Exception('BigQuery Job Error: %s' % errors)
elif result['status']['state'] == 'DONE':
if project.verbose: print('JOB COMPLETE:', result['id'])
break
def datasets_create(auth, project_id, dataset_id):
body = {
"description":dataset_id,
"datasetReference": {
"projectId":project_id,
"datasetId":dataset_id,
},
"location":"US",
"friendlyName":dataset_id,
}
API_BigQuery(auth).datasets().insert(projectId=project_id, body=body).execute()
# roles = READER, WRITER, OWNER
def datasets_access(auth, project_id, dataset_id, role='READER', emails=[], groups=[], views=[]):
if emails or groups or views:
access = API_BigQuery(auth).datasets().get(projectId=project_id, datasetId=dataset_id).execute()["access"]
# if emails
for email in emails:
access.append({
"userByEmail":email,
"role":role,
})
# if groups
for group in groups:
access.append({
"groupByEmail":group,
"role":role,
})
for view in views:
access.append({
"view": {
"projectId": project_id,
"datasetId": view['dataset'],
"tableId": view['view']
}
})
API_BigQuery(auth).datasets().patch(projectId=project_id, datasetId=dataset_id, body={'access':access}).execute()
# TODO terwilleger: Remove project_id
def run_query(auth, project_id, query, legacy=True):
body={
'configuration': {
'query': {
'useLegacySql': legacy,
'query':query
}
}
}
job_wait(auth, API_BigQuery(auth).jobs().insert(projectId=project_id, body=body).execute())
def query_to_table(auth, project_id, dataset_id, table_id, query, disposition='WRITE_TRUNCATE', legacy=True, billing_project_id=None, target_project_id=None):
target_project_id = target_project_id or project_id
if not billing_project_id:
billing_project_id = project_id
body={
'configuration': {
'query': {
'useLegacySql': legacy,
'query':query,
'destinationTable': {
'projectId':target_project_id,
'datasetId':dataset_id,
'tableId':table_id
},
'createDisposition':'CREATE_IF_NEEDED',
'writeDisposition':disposition,
'allowLargeResults':True
},
}
}
job_wait(auth, API_BigQuery(auth).jobs().insert(projectId=billing_project_id, body=body).execute())
def query_to_view(auth, project_id, dataset_id, view_id, query, legacy=True, replace=False):
body={
'tableReference': {
'projectId':project_id,
'datasetId':dataset_id,
'tableId':view_id,
},
'view': {
'query':query,
'useLegacySql':legacy
}
}
response = API_BigQuery(auth).tables().insert(projectId=project_id, datasetId=dataset_id, body=body).execute()
if response is None and replace:
return API_BigQuery(auth).tables().update(projectId=project_id,datasetId=dataset_id, tableId=view_id, body=body).execute()
#struture = CSV, NEWLINE_DELIMITED_JSON
#disposition = WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY
def storage_to_table(auth, project_id, dataset_id, table_id, path, schema=[], skip_rows=1, structure='CSV', disposition='WRITE_TRUNCATE', wait=True):
if project.verbose: print('BIGQUERY STORAGE TO TABLE: ', project_id, dataset_id, table_id)
body = {
'configuration': {
'load': {
'destinationTable': {
'projectId': project_id,
'datasetId': dataset_id,
'tableId': table_id,
},
'sourceFormat': 'NEWLINE_DELIMITED_JSON',
'writeDisposition': disposition,
'autodetect': True,
'allowJaggedRows': True,
'allowQuotedNewlines':True,
'ignoreUnknownValues':True,
'sourceUris': [
'gs://%s' % path.replace(':', '/'),
],
}
}
}
if schema:
body['configuration']['load']['schema'] = { 'fields':schema }
body['configuration']['load']['autodetect'] = False
if structure == 'CSV':
body['configuration']['load']['sourceFormat'] = 'CSV'
body['configuration']['load']['skipLeadingRows'] = skip_rows
job = API_BigQuery(auth).jobs().insert(projectId=project_id, body=body).execute()
if wait:
try: job_wait(auth, job)
except Exception as e: print('BIGQUERY SKIPPING: %s, %s' % (path, str(e)))
else:
return job
def rows_to_table(auth, project_id, dataset_id, table_id, rows, schema=[], skip_rows=1, disposition='WRITE_TRUNCATE', wait=True):
if project.verbose: print('BIGQUERY ROWS TO TABLE: ', project_id, dataset_id, table_id)
buffer_data = StringIO()
writer = csv.writer(buffer_data, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
has_rows = False
if rows == []:
if project.verbose: print('BigQuery Zero Rows')
return io_to_table(auth, project_id, dataset_id, table_id, buffer_data, 'CSV', schema, skip_rows, disposition, wait)
for is_last, row in flag_last(rows):
# write row to csv buffer
writer.writerow(row)
# write the buffer in chunks
if is_last or buffer_data.tell() + 1 > BIGQUERY_BUFFERSIZE:
if project.verbose: print('BigQuery Buffer Size', buffer_data.tell())
buffer_data.seek(0) # reset for read
io_to_table(auth, project_id, dataset_id, table_id, buffer_data, 'CSV', schema, skip_rows, disposition)
# reset buffer for next loop, be sure to do an append to the table
buffer_data.seek(0) #reset for write
buffer_data.truncate() # reset for write ( yes its needed for EOF marker )
disposition = 'WRITE_APPEND' # append all remaining records
skip_rows = 0
has_rows = True
# if no rows, clear table to simulate empty write
if not has_rows:
if project.verbose: print('BigQuery Zero Rows')
return io_to_table(auth, project_id, dataset_id, table_id, buffer_data, 'CSV', schema, skip_rows, disposition, wait)
def json_to_table(auth, project_id, dataset_id, table_id, json_data, schema=None, disposition='WRITE_TRUNCATE', wait=True):
if project.verbose: print('BIGQUERY JSON TO TABLE: ', project_id, dataset_id, table_id)
buffer_data = StringIO()
has_rows = False
for is_last, record in flag_last(json_data):
# check if json is already string encoded, and write to buffer
buffer_data.write(record if isinstance(record, str) else json.dumps(record))
# write the buffer in chunks
if is_last or buffer_data.tell() + 1 > BIGQUERY_BUFFERSIZE:
if project.verbose: print('BigQuery Buffer Size', buffer_data.tell())
buffer_data.seek(0) # reset for read
io_to_table(auth, project_id, dataset_id, table_id, buffer_data, 'NEWLINE_DELIMITED_JSON', schema, 0, disposition)
# reset buffer for next loop, be sure to do an append to the table
buffer_data.seek(0) #reset for write
buffer_data.truncate() # reset for write ( yes its needed for EOF marker )
disposition = 'WRITE_APPEND' # append all remaining records
has_rows = True
# if not end append newline, for newline delimited json
else:
buffer_data.write('\n')
# if no rows, clear table to simulate empty write
if not has_rows:
if project.verbose: print('BigQuery Zero Rows')
return io_to_table(auth, project_id, dataset_id, table_id, buffer_data, 'NEWLINE_DELIMITED_JSON', schema, skip_rows, disposition, wait)
# NEWLINE_DELIMITED_JSON, CSV
def io_to_table(auth, project_id, dataset_id, table_id, data, source_format='CSV', schema=None, skip_rows=0, disposition='WRITE_TRUNCATE', wait=True):
# if data exists, write data to table
data.seek(0, 2)
if data.tell() > 0:
data.seek(0)
media = MediaIoBaseUpload(
BytesIO(data.read().encode('utf8')),
mimetype='application/octet-stream',
resumable=True,
chunksize=BIGQUERY_CHUNKSIZE
)
body = {
'configuration': {
'load': {
'destinationTable': {
'projectId': project_id,
'datasetId': dataset_id,
'tableId': table_id,
},
'sourceFormat': source_format,
'writeDisposition': disposition,
'autodetect': True,
'allowJaggedRows': True,
'allowQuotedNewlines':True,
'ignoreUnknownValues': True,
}
}
}
if schema:
body['configuration']['load']['schema'] = { 'fields':schema }
body['configuration']['load']['autodetect'] = False
if source_format == 'CSV':
body['configuration']['load']['skipLeadingRows'] = skip_rows
job = API_BigQuery(auth).jobs().insert(projectId=project.id, body=body, media_body=media).execute(run=False)
execution = job.execute()
response = None
while response is None:
status, response = job.next_chunk()
if project.verbose and status: print("Uploaded %d%%." % int(status.progress() * 100))
if project.verbose: print("Uploaded 100%")
if wait: job_wait(auth, job.execute())
else: return job
# if it does not exist and write, clear the table
elif disposition == 'WRITE_TRUNCATE':
if project.verbose: print("BIGQUERY: No data, clearing table.")
body = {
"tableReference": {
"projectId": project_id,
"datasetId": dataset_id,
"tableId": table_id
},
"schema": {
"fields": schema
}
}
# change project_id to be project.id, better yet project.cloud_id from JSON
API_BigQuery(auth).tables().insert(projectId=project.id, datasetId=dataset_id, body=body).execute()
def incremental_rows_to_table(auth, project_id, dataset_id, table_id, rows, schema=[], skip_rows=1, disposition='WRITE_APPEND', billing_project_id=None):
if project.verbose: print('BIGQUERY INCREMENTAL ROWS TO TABLE: ', project_id, dataset_id, table_id)
#load the data in rows to BQ into a temp table
table_id_temp = table_id + str(uuid.uuid4()).replace('-','_')
rows_to_table(auth, project_id, dataset_id, table_id_temp, rows, schema, skip_rows, disposition)
try:
#query the temp table to find the max and min date
start_date = _get_min_date_from_table(auth, project_id, dataset_id, table_id_temp, billing_project_id=billing_project_id)
end_date = _get_max_date_from_table(auth, project_id, dataset_id, table_id_temp, billing_project_id=billing_project_id)
#check if master table exists: if not create it, if so clear old data
if not table_exists(auth, project_id, dataset_id, table_id):
table_create(auth, project_id, dataset_id, table_id)
else:
_clear_data_in_date_range_from_table(auth, project_id, dataset_id, table_id, start_date, end_date, billing_project_id=billing_project_id)
#append temp table to master
query = ('SELECT * FROM `'
+ project_id + '.' + dataset_id + '.' + table_id_temp + '` ')
query_to_table(auth, project_id, dataset_id, table_id, query, disposition, False, billing_project_id=billing_project_id)
#delete temp table
drop_table(auth, project_id, dataset_id, table_id_temp, billing_project_id=billing_project_id)
except:
#delete temp table
drop_table(auth, project_id, dataset_id, table_id_temp, billing_project_id=billing_project_id)
def table_create(auth, project_id, dataset_id, table_id, is_time_partition=False):
body = {
"tableReference": {
"projectId": project_id,
"tableId": table_id,
"datasetId": dataset_id,
}
}
if is_time_partition:
body['timePartitioning'] = {
"type": "DAY"
}
API_BigQuery(auth).tables().insert(projectId=project_id, datasetId=dataset_id, body=body).execute()
def table_get(auth, project_id, dataset_id, table_id):
return API_BigQuery(auth).tables().get(projectId=project_id, datasetId=dataset_id, tableId=table_id).execute()
def table_exists(auth, project_id, dataset_id, table_id):
try:
table_get(auth, project_id, dataset_id, table_id)
return True
except HttpError as e:
if e.resp.status != 404: raise
return False
def table_copy(auth, from_project, from_dataset, from_table, to_project, to_dataset, to_table):
body = {
"copy": {
"sourceTable": {
"projectId": from_project,
"datasetId": from_dataset,
"tableId": from_table
},
"destinationTable": {
"projectId": to_project,
"datasetId": to_dataset,
"tableId": to_table
}
}
}
job_wait(auth, API_BigQuery(auth).jobs().insert(projectId=project.id, body=body).execute())
def table_to_rows(auth, project_id, dataset_id, table_id, fields=None, row_start=0, row_max=None):
if project.verbose: print('BIGQUERY ROWS:', project_id, dataset_id, table_id)
schema = table_to_schema(auth, project_id, dataset_id, table_id)
converter = None
for row in API_BigQuery(auth, iterate=True).tabledata().list(
projectId=project_id,
datasetId=dataset_id,
tableId=table_id,
selectedFields=fields,
startIndex=row_start,
maxResults=row_max,
).execute():
if converter is None:
converter = _build_converter_array(schema, fields, len(row.get('f')))
yield [converter[i](next(iter(r.values()))) for i, r in enumerate(row['f'])] # may break if we attempt nested reads
def table_to_schema(auth, project_id, dataset_id, table_id):
if project.verbose: print('TABLE SCHEMA:', project_id, dataset_id, table_id)
return API_BigQuery(auth).tables().get(projectId=project_id, datasetId=dataset_id, tableId=table_id).execute()['schema']
# https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/query
def query_to_rows(auth, project_id, dataset_id, query, row_max=None, legacy=True):
# Create the query
body = {
"kind": "bigquery#queryRequest",
"query": query,
"timeoutMs": 10000,
"dryRun": False,
"useQueryCache": True,
"useLegacySql": legacy
}
if row_max: body['maxResults'] = row_max
if dataset_id:
body['defaultDataset'] = {
"projectId": project_id,
"datasetId": dataset_id
}
# wait for query to complete
response = API_BigQuery(auth).jobs().query(projectId=project_id, body=body).execute()
while not response['jobComplete']:
sleep(5)
response = API_BigQuery(auth).jobs().getQueryResults(projectId=project_id, jobId=response['jobReference']['jobId']).execute(iterate=False)
# fetch query results
row_count = 0
while 'rows' in response:
converters = _build_converter_array(response.get('schema', None), None, len(response['rows'][0].get('f')))
for row in response['rows']:
yield [converters[i](next(iter(r.values()))) for i, r in enumerate(row['f'])] # may break if we attempt nested reads
row_count += 1
if 'PageToken' in response:
response = API_BigQuery(auth).jobs().getQueryResults(projectId=project_id, jobId=response['jobReference']['jobId'], pageToken=response['PageToken']).execute(iterate=False)
elif row_count < int(response['totalRows']):
response = API_BigQuery(auth).jobs().getQueryResults(projectId=project_id, jobId=response['jobReference']['jobId'], startIndex=row_count).execute(iterate=False)
else:
break
def make_schema(header):
return [{
'name': name,
'type': 'STRING',
'mode': 'NULLABLE'
} for name in row_header_sanitize(header)]
def get_schema(rows, header=True, infer_type=True):
'''
CAUTION: Memory suck. This function sabotages iteration by iterating thorough the new object and returning a new iterator
RECOMMEND: Define the schema yourself, it will also ensure data integrity downstream.
'''
schema = []
row_buffer = []
# everything else defaults to STRING
type_to_bq = {int:'INTEGER', bool:'BOOLEAN', float:'FLOAT'} if infer_type else {} # empty lookup defaults to STRING below
# first non null value determines type
non_null_column = set()
first = True
ct_columns = 0
for row in rows:
# buffer the iterator to be returned with schema
row += [None] * (ct_columns - len(row))
row_buffer.append(row)
# define schema field names and set defaults ( if no header enumerate fields )
if first:
ct_columns = len(row)
for index, value in enumerate(row_header_sanitize(row)):
schema.append({ "name":value if header else 'Field_%d' % index, "type":"STRING" })
# then determine type of each column
if not first and header:
for index, value in enumerate(row):
# if null, set only mode
if value is None or value == '':
schema[index]['mode'] = 'NULLABLE'
else:
column_type = type_to_bq.get(type(value), 'STRING')
# if type is set, check to make sure its consistent
if index in non_null_column:
# change type only if its inconsistent
if column_type != schema[index]['type']:
# mixed integers and floats default to floats
if column_type in ('INTEGER', 'FLOAT') and schema[index]['type'] in ('INTEGER', 'FLOAT'):
schema[index]['type'] = 'FLOAT'
# any strings are always strings
else:
schema[index]['type'] = 'STRING'
# if first non null value, then just set type
else:
schema[index]['type'] = column_type
non_null_column.add(index)
# no longer first row
first = False
return row_buffer, schema
def _int_from_json(value):
if value:
return int(value)
else:
return value
def _float_from_json(value):
if value:
return float(value)
else:
return value
_JSON_CONVERTERS = {
'INTEGER': _int_from_json,
'INT64': _int_from_json,
'FLOAT': _float_from_json,
'FLOAT64': _float_from_json,
'BOOLEAN': lambda v: v,
'BOOL': lambda v: v,
'STRING': lambda v: v, #no conversion needed, adapt others as needed
'BYTES': lambda v: v,
'TIMESTAMP': lambda v: v,
'DATETIME': lambda v: v,
'DATE': lambda v: v,
'TIME': lambda v: v,
'RECORD': lambda v: v,
}
def _build_converter_array(schema, fields, col_count):
converters = []
if schema:
for field in schema['fields']:
if fields is None or field in fields:
converters.append(_JSON_CONVERTERS[field['type']])
else:
#No schema so simply return the string as string
converters = [lambda v: v] * col_count
#print(converters)
return converters
def drop_table(auth, project_id, dataset_id, table_id, billing_project_id=None):
if not billing_project_id:
billing_project_id = project_id
query = ('DROP TABLE `'
+ project_id + '.' + dataset_id + '.' + table_id + '` ')
body = {
"kind": "bigquery#queryRequest",
'query': query,
'defaultDataset': {
'datasetId' : dataset_id,
},
'useLegacySql': False,
}
job_wait(auth, API_BigQuery(auth).jobs().query(projectId=billing_project_id, body=body).execute())
def _get_max_date_from_table(auth, project_id, dataset_id, table_id, billing_project_id=None):
if not billing_project_id:
billing_project_id = project_id
query = ('SELECT MAX(Report_Day) FROM `'
+ project_id + '.' + dataset_id + '.' + table_id + '` ')
body = {
"kind": "bigquery#queryRequest",
'query': query,
'defaultDataset': {
'datasetId' : dataset_id,
},
'useLegacySql': False,
}
job = API_BigQuery(auth).jobs().query(projectId=billing_project_id, body=body).execute()
return job['rows'][0]['f'][0]['v']
def _get_min_date_from_table(auth, project_id, dataset_id, table_id, billing_project_id=None):
if not billing_project_id:
billing_project_id = project_id
query = ('SELECT MIN(Report_Day) FROM `'
+ project_id + '.' + dataset_id + '.' + table_id + '` ')
body = {
"kind": "bigquery#queryRequest",
'query': query,
'defaultDataset': {
'datasetId' : dataset_id,
},
'useLegacySql': False,
}
job = API_BigQuery(auth).jobs().query(projectId=billing_project_id, body=body).execute()
return job['rows'][0]['f'][0]['v']
def execute_statement(auth, project_id, dataset_id, statement, billing_project_id=None, use_legacy_sql=False):
if not billing_project_id:
billing_project_id = project_id
body = {
"kind": "bigquery#queryRequest",
'query': statement,
'defaultDataset': {
'datasetId' : dataset_id,
},
'useLegacySql': use_legacy_sql,
}
job_wait(auth, API_BigQuery(auth).jobs().query(projectId=billing_project_id, body=body).execute())
#start and end date must be in format YYYY-MM-DD
def _clear_data_in_date_range_from_table(auth, project_id, dataset_id, table_id, start_date, end_date, billing_project_id=None):
if not billing_project_id:
billing_project_id = project_id
query = ('DELETE FROM `'
+ project_id + '.' + dataset_id + '.' + table_id + '` '
+ 'WHERE Report_Day >= "' + start_date + '"' + 'AND Report_Day <= "' + end_date + '"'
)
body = {
"kind": "bigquery#queryRequest",
'query': query,
'defaultDataset': {
'datasetId' : dataset_id,
},
'useLegacySql': False,
}
job_wait(auth, API_BigQuery(auth).jobs().query(projectId=billing_project_id, body=body).execute())
|
'''
@copyright: 2022 - Symas Corporation
'''
# config attribute names
ATTRIBUTES = 'attributes'
USER_OU = 'users'
ROLE_OU = 'roles'
PERM_OU = 'perms'
SUFFIX = 'suffix'
DIT = 'dit'
UID = 'uid'
PROP_OC_NAME = 'ftProperties'
OU = 'ou'
INTERNAL_ID = 'ftid'
CN = 'cn'
SN = 'sn'
DN = 'dn'
CONSTRAINT = 'ftCstr'
DESC = 'description'
PROPS = 'ftProps'
CONSTRAINT = 'ftCstr'
SUCCESS = 0
OBJECT_ALREADY_EXISTS = 68
NOT_FOUND = 32
NO_SUCH_ATTRIBUTE = 16
NOT_ALLOWED_ON_NONLEAF = 66
CONFIG_BOOTSTRAP_FAILED = 126
ROLE_ALREADY_ACTIVATED_ERROR = 2001
ROLE_NOT_ACTIVATED_ERROR = 2002
USER_SEARCH_FAILED = 1000
USER_READ_FAILED = 1001
USER_ADD_FAILED = 1002
USER_UPDATE_FAILED = 1003
USER_DELETE_FAILED = 1004
USER_NOT_FOUND = 1005
USER_ID_NULL = 1006
USER_NULL = 1008
USER_PW_INVLD = 1013
USER_PW_CHK_FAILED = 1014
USER_SESS_NULL = 1030
URLE_NULL = 2003
URLE_ASSIGN_FAILED = 2004
URLE_DEASSIGN_FAILED = 2005
URLE_ACTIVATE_FAILED = 2006
URLE_DEACTIVE_FAILED = 2007
URLE_ASSIGN_EXIST = 2008
URLE_ASSIGN_NOT_EXIST = 2009
URLE_SEARCH_FAILED = 2010
URLE_ALREADY_ACTIVE = 2011
URLE_NOT_ACTIVE = 2022
ACTV_FAILED_DAY = 2050
ACTV_FAILED_DATE = 2051
ACTV_FAILED_TIME = 2052
ACTV_FAILED_TIMEOUT = 2053
ACTV_FAILED_LOCK = 2054
PERM_SEARCH_FAILED = 3000
PERM_READ_OP_FAILED = 3001
PERM_READ_OBJ_FAILED = 3002
PERM_ADD_FAILED = 3003
PERM_UPDATE_FAILED = 3004
PERM_DELETE_FAILED = 3005
PERM_NULL = 3008
PERM_OPERATION_NULL = 3009
PERM_OBJECT_NULL = 3010
PERM_OBJECT_NM_NULL = 3027
PERM_OPERATION_NM_NULL = 3026
PERM_GRANT_FAILED = 3012
PERM_REVOKE_FAILED = 3024
PERM_OP_NOT_FOUND = 3006
PERM_OBJ_NOT_FOUND = 3007
PERM_DUPLICATE = 3011
PERM_ROLE_NOT_EXIST = 3016
PERM_ROLE_SEARCH_FAILED = 3019
PERM_NOT_EXIST = 3029
PERM_OBJECT_DELETE_FAILED_NONLEAF = 3050
ROLE_SEARCH_FAILED = 5000
ROLE_READ_FAILED = 5001
ROLE_ADD_FAILED = 5002
ROLE_UPDATE_FAILED = 5003
ROLE_DELETE_FAILED = 5004
ROLE_NM_NULL = 5005
ROLE_NOT_FOUND = 5006
ROLE_NULL = 5007
ROLE_USER_ASSIGN_FAILED = 5008
ROLE_USER_DEASSIGN_FAILED = 5009
ROLE_LST_NULL = 5010
ROLE_OCCUPANT_SEARCH_FAILED = 5011
ROLE_REMOVE_OCCUPANT_FAILED = 5012
CNTR_CREATE_FAILED = 6001
CNTR_DELETE_FAILED = 6002
CNTR_NAME_NULL = 6003
CNTR_NAME_INVLD = 6004
CNTR_PARENT_NULL = 6005
CNTR_PARENT_INVLD = 6006
CNTR_NOT_FOUND = 6007
CNTR_ALREADY_EXISTS = 6008
SUFX_CREATE_FAILED = 6010
SUFX_DELETE_FAILED = 6011
SUFX_NAME_NULL = 6012
SUFX_ALREADY_EXISTS = 6016
SUFX_NOT_EXIST = 6017
|
#!/usr/bin/env python
import numpy as np
from scipy.spatial import Delaunay
from . import pg_utilities
from . import imports_and_exports
"""
.. module:: generate_shapes
:synopsis: Contains code to generate placental shapes for generic placental models.
:synopsis:Contains code to generate placental shapes for generic placental models \n
(i.e. from literature measures without specific data from an individual
"""
def equispaced_data_in_ellipsoid(n, volume, thickness, ellipticity):
"""
:Function name: **equispaced_data_in_ellipsoid**
Generates equally spaced data points in an ellipsoid.
:inputs:
- n: Number of data points which we aim to generate
- volume: Volume of ellipsoid
- thickness: Placental thickness (z-dimension)
- ellipticity: Ratio of y to x axis dimensions
return:
- Edata: A nx3 array of datapoints, with each point being defined by its x-,y-, and z- coordinates
A way you might want to use me is:
>>> n = 100
>>> volume = 10
>>> thickness = 3
>>> ellipticity = 1.1
>>> equispaced_data_in_ellipsoid(n, volume, thickness, ellipticity)
This will return 100 data points in an ellipse with z-axis thickness 3, volume 10, and with the y-axis dimension 1.1 times the x-axis dimension.
"""
data_spacing = (volume / n) ** (1.0 / 3.0)
print('Generating data ' + str(data_spacing) + ' apart')
radii = pg_utilities.calculate_ellipse_radii(volume, thickness, ellipticity)
z_radius = radii['z_radius']
x_radius = radii['x_radius']
y_radius = radii['y_radius']
# Aiming to generate seed points that fill a cuboid encompasing the placental volume then remove seed points that
# are external to the ellipsoid
num_data = 0 # zero the total number of data points
# Calculate the number of points that should lie in each dimension in a cube
nd_x = np.floor(2.0 * (x_radius + data_spacing) / data_spacing)
nd_y = np.floor(2.0 * (y_radius + data_spacing) / data_spacing)
nd_z = np.floor(2.0 * (z_radius + data_spacing) / data_spacing)
nd_x = int(nd_x)
nd_y = int(nd_y)
nd_z = int(nd_z)
# Set up edge node coordinates
x_coord = np.linspace(-x_radius - data_spacing / 2.0, x_radius + data_spacing / 2.0, nd_x)
y_coord = np.linspace(-y_radius - data_spacing / 2.0, y_radius + data_spacing / 2.0, nd_y)
z_coord = np.linspace(-z_radius - data_spacing / 2.0, z_radius + data_spacing / 2.0, nd_z)
# Use these vectors to form a unifromly spaced grid
data_coords = np.vstack(np.meshgrid(x_coord, y_coord, z_coord)).reshape(3, -1).T
# Store nodes that lie within ellipsoid
datapoints = np.zeros((nd_x * nd_y * nd_z, 3))
for i in range(len(data_coords)): # Loop through grid
coord_check = pg_utilities.check_in_ellipsoid(data_coords[i][0], data_coords[i][1], data_coords[i][2], x_radius,
y_radius, z_radius)
if coord_check is True: # Has to be strictly in the ellipsoid
datapoints[num_data, :] = data_coords[i, :] # add to data array
num_data = num_data + 1
datapoints.resize(num_data, 3,refcheck=False) # resize data array to correct size
print('Data points within ellipsoid allocated. Total = ' + str(len(datapoints)))
return datapoints
def uniform_data_on_ellipsoid(n, volume, thickness, ellipticity, random_seed):
"""
:Function name: **uniform_data_on_ellipsoid**
Generates equally spaced data points on the positive z-surface of an ellipsoid
:inputs:
- n: number of data points which we aim to generate
- volume: volume of ellipsoid
- thickness: placental thickness (z-dimension)
- ellipticity: ratio of y to x axis dimensions
:return:
- chorion_data: A nx3 array of datapoints, with each point being defined by its x-,y-, and z- coordinates
A way you might want to use me is:
>>> n = 100
>>> volume = 10
>>> thickness = 3
>>> ellipticity = 1.1
>>> equispaced_data_on_ellipsoid(n, volume, thickness, ellipticity)
This will return 100 data points on the positive z-surface ellipse with z-axis thickness 3, volume 10,
and with the y-axis dimension 1.1 times the x-axis dimension.
"""
radii = pg_utilities.calculate_ellipse_radii(volume, thickness, ellipticity)
z_radius = radii['z_radius']
x_radius = radii['x_radius']
y_radius = radii['y_radius']
area_estimate = np.pi * x_radius * y_radius
data_spacing = 0.85 * np.sqrt(area_estimate / n)
chorion_data = np.zeros((n, 3))
np.random.seed(random_seed)
generated_seed = 0
acceptable_attempts = n * 1000 # try not to have too many failures
attempts = 0
while generated_seed < n and attempts < acceptable_attempts:
# generate random x-y coordinates between negative and positive radii
new_x = np.random.uniform(-x_radius, x_radius)
new_y = np.random.uniform(-y_radius, y_radius)
# check if new coordinate is on the ellipse
if ((new_x / x_radius) ** 2 + (new_y / y_radius) ** 2) < 1: # on the surface
if generated_seed == 0:
generated_seed = generated_seed + 1
new_z = pg_utilities.z_from_xy(new_x, new_y, x_radius, y_radius, z_radius)
chorion_data[generated_seed - 1][:] = [new_x, new_y, new_z]
else:
reject = False
for j in range(0, generated_seed + 1):
distance = (chorion_data[j - 1][0] - new_x) ** 2 + (chorion_data[j - 1][1] - new_y) ** 2
distance = np.sqrt(distance)
if distance <= data_spacing:
reject = True
break
if reject is False:
generated_seed = generated_seed + 1
new_z = pg_utilities.z_from_xy(new_x, new_y, x_radius, y_radius, z_radius)
chorion_data[generated_seed - 1][:] = [new_x, new_y, new_z]
attempts = attempts + 1
chorion_data.resize(generated_seed, 3) # resize data array to correct size
print('Data points on ellipsoid allocated. Total = ' + str(len(chorion_data)) )
return chorion_data
def gen_rect_cover_ellipsoid(volume, thickness, ellipticity, x_spacing, y_spacing, z_spacing):
# Generates equally spaced data nodes and elements and constructs a rectangular 'mesh' that covers the space that is
# made up of an ellipsoidal placenta
# volume=volume of ellipsoid
# thickness = placental thickness (z-dimension)
# ellipticity = ratio of y to x axis dimensions
# X,Y,Z spacing is the number of elements required in each of the x, y z directions
# Calculate the dimensions of the ellipsoid
radii = pg_utilities.calculate_ellipse_radii(volume, thickness, ellipticity)
z_radius = radii['z_radius']
x_radius = radii['x_radius']
y_radius = radii['y_radius']
# z height of ellipsoid is 2* zradius
# We want number of nodes to cover height and have prescribed spaing
nnod_x = int(np.ceil(x_radius * 2.0 / x_spacing)) + 1
x_width = x_spacing * (nnod_x - 1)
nnod_y = int(np.ceil(y_radius * 2.0 / y_spacing)) + 1
y_width = y_spacing * (nnod_y - 1)
nnod_z = int(np.ceil(z_radius * 2.0 / z_spacing)) + 1
z_width = z_spacing * (nnod_z - 1)
node_loc = gen_rectangular_node(x_width, y_width, z_width, nnod_x, nnod_y, nnod_z)
# Generating the element connectivity of each cube element, 8 nodes for each 3D cube element
elems = cube_mesh_connectivity(nnod_x, nnod_y, nnod_z)
return {'nodes': node_loc, 'elems': elems, 'total_nodes': nnod_x * nnod_y * nnod_z,
'total_elems': (nnod_x - 1) * (nnod_y - 1) * (nnod_z - 1)}
def gen_ellip_mesh_tet(volume, thickness, ellipticity, n):
""" Generates ellipsoid tetrahedral mesh for 3D problems
Inputs:
- volume: volume of placental ellipsoid
- thickness: placental thickness (z-dimension)
- ellipticity: ratio of y to x axis dimensions
- n: number of datapoints generated to create the mesh
Returns:
- nodes: nodes location of mesh
- elems: element connectivity of mesh (tetrahedral element)
- node_array: array of nodes
- element_array: array of elements
"""
radii = pg_utilities.calculate_ellipse_radii(volume, thickness, ellipticity)
z_radius = radii['z_radius']
x_radius = radii['x_radius']
y_radius = radii['y_radius']
nodeSpacing = (n / (2 * x_radius * 2 * y_radius * 2 * z_radius)) ** (1. / 3)
nnod_x = 2 * x_radius * nodeSpacing
nnod_y = 2 * y_radius * nodeSpacing
nnod_z = 2 * z_radius * nodeSpacing
nodes = gen_rectangular_node(x_radius * 2, y_radius * 2, z_radius * 2, nnod_x, nnod_y, nnod_z)
# nodes inside the ellipsoid
ellipsoid_node = np.zeros((len(nodes), 3))
count = 0
for nnode in range(0, len(nodes)):
coord_point = nodes[nnode][0:3]
inside = pg_utilities.check_in_on_ellipsoid(coord_point[0], coord_point[1], coord_point[2], x_radius, y_radius,
z_radius)
if inside:
ellipsoid_node[count, :] = coord_point[:]
count = count + 1
ellipsoid_node.resize(count, 3,refcheck=False)
xyList = ellipsoid_node[:, [0, 1]]
xyListUnique = np.vstack({tuple(row) for row in xyList})
# looking for z_coordinate of surface nodes
for xyColumn in xyListUnique:
xyNodes = np.where(np.all(xyList == xyColumn, axis=1))[0]
if len(xyNodes) > 1:
x_coord = ellipsoid_node[xyNodes[0], 0]
y_coord = ellipsoid_node[xyNodes[0], 1]
ellipsoid_node[xyNodes[len(xyNodes) - 1], 2] = pg_utilities.z_from_xy(x_coord, y_coord, x_radius, y_radius,
z_radius)
ellipsoid_node[xyNodes[0], 2] = -1 * (
pg_utilities.z_from_xy(x_coord, y_coord, x_radius, y_radius, z_radius))
# generate tetrahedral mesh
pyMesh = Delaunay(ellipsoid_node)
# Build arrays to pass into openCMISS conversion:
node_loc = pyMesh.points
temp_elems = pyMesh.simplices
# CHECK ELEMENTS FOR 0 VOLUME:
min_vol = 0.00001
index = 0
indexArr = []
for element in temp_elems:
x_coor = []
y_coor = []
z_coor = []
for node in element:
x_coor.append(node_loc[node][0])
y_coor.append(node_loc[node][1])
z_coor.append(node_loc[node][2])
vmat = np.vstack((x_coor, y_coor, z_coor, [1.0, 1.0, 1.0, 1.0])) # matrix of coor of element
elem_volume = (1 / 6.0) * abs(np.linalg.det(vmat)) # volume of each tetrahedral element
# if volume is not zero
if elem_volume > min_vol:
indexArr.append(index)
index = index + 1
# update arrays without 0 volume elements, to pass into openCMISS
elems = temp_elems[indexArr, :]
for i in range(len(elems)):
elems[i] = [x + 1 for x in elems[i]]
element_array = range(1, len(elems) + 1)
node_array = range(1, len(node_loc) + 1)
return {'nodes': node_loc, 'elems': elems, 'element_array': element_array, 'node_array': node_array,
'nodeSpacing': nodeSpacing}
def gen_rectangular_node(x_width, y_width, z_width, nnod_x, nnod_y, nnod_z):
# Create linspaces for x y and z coordinates
x = np.linspace(-x_width / 2.0, x_width / 2.0, int(nnod_x)) # linspace for x axis
y = np.linspace(-y_width / 2.0, y_width / 2.0, int(nnod_y)) # linspace for y axis
z = np.linspace(-z_width / 2.0, z_width / 2.0, int(nnod_z)) # linspace for z axis
node_loc_temp = np.vstack(np.meshgrid(y, z, x)).reshape(3, -1).T # generate nodes for rectangular mesh
node_loc = np.zeros((len(node_loc_temp), 3))
for i in range(0, len(node_loc)):
node_loc[i][0] = node_loc_temp[i][2]
node_loc[i][1] = node_loc_temp[i][0]
node_loc[i][2] = node_loc_temp[i][1]
return node_loc
def gen_rectangular_mesh2(nel_x, nel_y, nel_z, xdim, ydim, zdim, element_type):
# generates a rectangular mesh of defined dimenions using either linear or quadratic elements
if element_type == 1: # linear element
nnod_x = int(nel_x + 1)
nnod_y = int(nel_y + 1)
nnod_z = int(nel_z + 1)
elif element_type == 2: # quadratic element
nnod_x = int((nel_x * 2) + 1)
nnod_y = int((nel_y * 2) + 1)
nnod_z = int((nel_z * 2) + 1)
node = gen_rectangular_node(xdim, ydim, zdim, nnod_x, nnod_y, nnod_z) # getting nodes
if element_type == 1: # linear element
elems = cube_mesh_connectivity(nnod_x, nnod_y, nnod_z) # getting elem connectivity
elif element_type == 2: # quadratic element
elems = cube_mesh_connectivity_quadratic(nel_x, nel_y, nel_z, nnod_x, nnod_y,
nnod_z) # getting element connectivity
element_array = range(1, len(elems) + 1)
node_array = range(1, len(node) + 1)
if element_type == 2:
surfacenodes = identify_surface_node_quad(nel_x, nel_y, nel_z)
else:
print("This element type has no implemented surface node definition")
surfacenodes = 0
return {'nodes': node, 'elems': elems, 'element_array': element_array,
'node_array': node_array, 'surface_nodes': surfacenodes}
def gen_3d_ellipsoid(nel_x, nel_y, nel_z, volume, thickness, ellipticity, element_type):
""" Generates ellipsoid placental mesh to solve 3D problems (note this is not a quality structured mesh)
Inputs:
- nel: number of element in x,y,z axis , the more nel, the rounder the mesh
- volume: volume of placental ellipsoid
- thickness: placental thickness (z-dimension)
- ellipticity: ratio of y to x axis dimensions
Returns:
- placental_node_coor: nodes location of mesh
- placental_el_con: element connectivity of mesh (tetrahedral element)
- node_array: array of nodes
- element_array: array of elements
"""
# creating cube between -1 and 1 with n number of element
# cubelength=2
if element_type == 1: # linear element
nnod_x = int(nel_x + 1)
nnod_y = int(nel_y + 1)
nnod_z = int(nel_z + 1)
elif element_type == 2: # quadratic element
nnod_x = int((nel_x * 2) + 1)
nnod_y = int((nel_y * 2) + 1)
nnod_z = int((nel_z * 2) + 1)
radii = pg_utilities.calculate_ellipse_radii(volume, thickness, ellipticity)
z_radius = radii['z_radius']
x_radius = radii['x_radius']
y_radius = radii['y_radius']
cube_node = gen_rectangular_node(2 * x_radius, 2 * y_radius, 2 * z_radius, nnod_x, nnod_y, nnod_z)
if element_type == 1: # linear element
cube_elems = cube_mesh_connectivity(nnod_x, nnod_y, nnod_z) # getting elem connectivity
elif element_type == 2: # quadratic element
cube_elems = cube_mesh_connectivity_quadratic(nel_x, nel_y, nel_z, nnod_x, nnod_y,
nnod_z) # getting element connectivity
ellipsoid_coor = np.zeros((len(cube_node), 3))
for ii in range(0, len(cube_node)):
ellipsoid_coor[ii, 0] = cube_node[ii, 0] * np.sqrt(1.0 - cube_node[ii, 1] ** 2 / (2.0 * y_radius ** 2) -
cube_node[ii, 2] ** 2 / (2.0 * z_radius ** 2) + cube_node[
ii, 1] ** 2 *
cube_node[ii, 2] ** 2 / (
3.0 * y_radius ** 2 * z_radius ** 2)) # for x_coor
ellipsoid_coor[ii, 1] = cube_node[ii, 1] * np.sqrt(1.0 - cube_node[ii, 0] ** 2 / (2.0 * x_radius ** 2) -
cube_node[ii, 2] ** 2 / (2.0 * z_radius ** 2) + cube_node[
ii, 0] ** 2 * cube_node[ii, 2] ** 2
/ (3.0 * x_radius ** 2 * z_radius ** 2)) # for y_coor
ellipsoid_coor[ii, 2] = cube_node[ii, 2] * np.sqrt(1.0 - cube_node[ii, 1] ** 2 / (2.0 * y_radius ** 2) -
cube_node[ii, 0] ** 2 / (2.0 * x_radius ** 2) + cube_node[
ii, 1] ** 2 * cube_node[ii, 0] ** 2
/ (3.0 * y_radius ** 2 * x_radius ** 2)) # for z_coor
element_array = range(1, len(cube_elems) + 1)
node_array = range(1, len(ellipsoid_coor) + 1)
if element_type == 2:
surfacenodes = identify_surface_node_quad(nel_x, nel_y, nel_z)
else:
print("This element type has no implemented surface node definition")
surfacenodes = 0
return {'placental_node_coor': ellipsoid_coor, 'placental_el_con': cube_elems, 'element_array': element_array,
'node_array': node_array, 'surface_nodes': surfacenodes}
def cube_mesh_connectivity(nnod_x, nnod_y, nnod_z):
"""Generates element connectivity in cube mesh
Inputs:
- nnod_x:number of node in x axis
- nnod_y:number of node in y axis
- nnod_z:number of node in z axis
Outputs:
- elems: array of element connectivity
"""
num_elems = (nnod_x - 1) * (nnod_y - 1) * (nnod_z - 1)
elems = np.zeros((num_elems, 9),
dtype=int) # this stores first element number and then the nodes of each mesh element
element_number = 0
ne = 0
# loop through elements
for k in range(1, nnod_z):
for j in range(1, nnod_y):
for i in range(1, nnod_x):
elems[ne][0] = ne # store element number
elems[ne][1] = (i - 1) + (nnod_x) * (j - 1) + nnod_x * nnod_y * (k - 1) # lowest coordinates
elems[ne][2] = elems[ne][1] + 1 # add one in x
elems[ne][3] = elems[ne][1] + nnod_x # go through x and find first in y
elems[ne][4] = elems[ne][3] + 1 # add one in y
elems[ne][5] = elems[ne][1] + nnod_x * nnod_y # same as 1 -4 but at higher z -coord
elems[ne][6] = elems[ne][2] + nnod_x * nnod_y
elems[ne][7] = elems[ne][3] + nnod_x * nnod_y
elems[ne][8] = elems[ne][4] + nnod_x * nnod_y
ne = ne + 1
return elems
def cube_mesh_connectivity_quadratic(nel_x, nel_y, nel_z, nnod_x, nnod_y, nnod_z):
"""Generates element connectivity in quadratic cube mesh
Inputs:
- nnod_x:number of node in x axis
- nnod_y:number of node in y axis
- nnod_z:number of node in z axis
Outputs:
- elems: array of element connectivity in quadratic
"""
num_elems = nel_x * nel_y * nel_z
elems = np.zeros((num_elems, 28), dtype=int)
element_number = 0
ne = 0
# Got the element
for k in range(1, nnod_z, 2):
for j in range(1, nnod_y, 2):
for i in range(1, nnod_x, 2):
# 1st layer
elems[ne][0] = ne
elems[ne][1] = (i - 1) + (nnod_x) * (j - 1) + nnod_x * nnod_y * (k - 1) # 1st node
elems[ne][2] = (i - 1) + (nnod_x) * (j - 1) + nnod_x * nnod_y * (k - 1) + 1 # right subsequent node
elems[ne][3] = (i - 1) + (nnod_x) * (j - 1) + nnod_x * nnod_y * (k - 1) + 2 # right subsequent node
elems[ne][4] = elems[ne][1] + nnod_x # 1st node in another y layer
elems[ne][5] = elems[ne][1] + nnod_x + 1 # right subsequent node
elems[ne][6] = elems[ne][1] + nnod_x + 2 # right subsequent node
elems[ne][7] = elems[ne][1] + 2 * (nnod_x) # 1st node in another y layer
elems[ne][8] = elems[ne][1] + 2 * (nnod_x) + 1 # right subsequent node
elems[ne][9] = elems[ne][1] + 2 * (nnod_x) + 2 # right subsequent node
# 2nd layer
elems[ne][10] = elems[ne][1] + nnod_x * nnod_y # same in one z layer
elems[ne][11] = elems[ne][2] + nnod_x * nnod_y
elems[ne][12] = elems[ne][3] + nnod_x * nnod_y
elems[ne][13] = elems[ne][4] + nnod_x * nnod_y
elems[ne][14] = elems[ne][5] + nnod_x * nnod_y
elems[ne][15] = elems[ne][6] + nnod_x * nnod_y
elems[ne][16] = elems[ne][7] + nnod_x * nnod_y
elems[ne][17] = elems[ne][8] + nnod_x * nnod_y
elems[ne][18] = elems[ne][9] + nnod_x * nnod_y
# thrid layer
elems[ne][19] = elems[ne][1] + nnod_x * nnod_y * 2 # same in another z layer
elems[ne][20] = elems[ne][2] + nnod_x * nnod_y * 2
elems[ne][21] = elems[ne][3] + nnod_x * nnod_y * 2
elems[ne][22] = elems[ne][4] + nnod_x * nnod_y * 2
elems[ne][23] = elems[ne][5] + nnod_x * nnod_y * 2
elems[ne][24] = elems[ne][6] + nnod_x * nnod_y * 2
elems[ne][25] = elems[ne][7] + nnod_x * nnod_y * 2
elems[ne][26] = elems[ne][8] + nnod_x * nnod_y * 2
elems[ne][27] = elems[ne][9] + nnod_x * nnod_y * 2
ne = ne + 1
return elems
def identify_surface_node_quad(nel_x, nel_y, nel_z):
"""Generates collection of nodes that are on the surface of in quadratic placental mesh
Inputs:
- nel_x:number of elem in x axis
- nel_y:number of elem in y axis
- nel_z:number of elem in z axis
Outputs:
- surfacenode: collection of nodes on the surface of placental mesh
"""
nnod_x = int((nel_x * 2) + 1) # number of nodes in x axis
nnod_y = int((nel_y * 2) + 1) # number of nodes in y axis
nnod_z = int((nel_z * 2) + 1) # number of nodes in z axis
# For left and right surface
sIEN = np.zeros((9, nel_y * nel_z), dtype=int) # to store surface indiviaul element nodes (sIEN)
e = 0
for k in range(1, nnod_x * nnod_y * (nnod_z - 1), (nnod_x * nnod_y) * 2): # go up
for j in range(1, nnod_x * (nnod_y - 1), 2 * nnod_x): # go left
sIEN[0, e] = j + (k - 1) # 1st node
sIEN[1, e] = sIEN[0, e] + (nnod_x) * (nnod_y) # 2nd node
sIEN[2, e] = sIEN[1, e] + (nnod_x) * (nnod_y) # 3rd node
sIEN[3, e] = sIEN[0, e] + nnod_x # 4th node
sIEN[4, e] = sIEN[1, e] + nnod_x # 5th node
sIEN[5, e] = sIEN[2, e] + nnod_x # 6th node
sIEN[6, e] = sIEN[3, e] + nnod_x # 7th node
sIEN[7, e] = sIEN[4, e] + nnod_x # 8th node
sIEN[8, e] = sIEN[5, e] + nnod_x # 9th node
e = e + 1
left = np.unique(sIEN) # collection of nodes of left surface
right = np.unique(sIEN.T + (nnod_x - 1)) # collection of nodes on right surface
# For front and back surface
sIEN = np.zeros((9, nel_x * nel_z), dtype=int)
e = 0
for k in range(1, nnod_x * nnod_y * (nnod_z - 2), (nnod_x * nnod_y) * 2): # go up
for i in range(1, nnod_x - 1, 2): # go right
sIEN[0, e] = i + (k - 1)
sIEN[1, e] = sIEN[0, e] + 1
sIEN[2, e] = sIEN[0, e] + 2
sIEN[3, e] = sIEN[0, e] + (nnod_x * nnod_y)
sIEN[4, e] = sIEN[3, e] + 1
sIEN[5, e] = sIEN[3, e] + 2
sIEN[6, e] = sIEN[3, e] + (nnod_x * nnod_y)
sIEN[7, e] = sIEN[6, e] + 1
sIEN[8, e] = sIEN[6, e] + 2
e = e + 1
front = np.unique(sIEN) # collection of nodes on front surface
back = np.unique(sIEN.T + (nnod_x * (nnod_y - 1))) # collection of nodes on back surface
# For top and bottom surface
sIEN = np.zeros((9, nel_x * nel_y), dtype=int)
e = 0
for j in range(1, nnod_x * (nnod_y - 1), nnod_x * 2): # go up
for i in range(1, nnod_x - 1, 2): # go back
sIEN[0, e] = i + (j - 1)
sIEN[1, e] = sIEN[0, e] + 1
sIEN[2, e] = sIEN[0, e] + 2
sIEN[3, e] = sIEN[0, e] + nnod_x
sIEN[4, e] = sIEN[3, e] + 1
sIEN[5, e] = sIEN[3, e] + 2
sIEN[6, e] = sIEN[3, e] + nnod_x
sIEN[7, e] = sIEN[6, e] + 1
sIEN[8, e] = sIEN[6, e] + 2
e = e + 1
bottom = np.unique(sIEN) # collection of nodes on bottom surface
top = np.unique(sIEN.T + (nnod_x * nnod_y) * (nnod_z - 1)) # collection of nodes on top surface
surfacenode = np.hstack((front, back, left, right, bottom, top))
surfacenode = np.unique(surfacenode) # collection of surface nodes from all surface
return surfacenode
def identify_node_from_coord(nodes, filename):
# reading in the node location
xyz = open(filename, 'r')
xyz_coor = xyz.readlines() # readlines
startLines = range(0, len(xyz_coor))
for i in range(len(xyz_coor)):
xyz_coor[i] = xyz_coor[i].split()
xyzList = []
for i in startLines:
targetpoint = []
targetpoint.append(float(xyz_coor[i][0])) # x coor
targetpoint.append((float(xyz_coor[i][1]))) # y coor
targetpoint.append((float(xyz_coor[i][1]))) # y coor
xyzList.append(targetpoint)
xyz.close()
node_list = np.zeros(len(xyzList))
mindist = 100000
for i in range(0, len(xyzList)):
for j in range(0, len(nodes)):
print(xyzList[i][0], nodes[j][0])
return i
def identify_vessel_node(ellipsoid_coor, surfacenode, stem_file, sa_radius, dv_radius, volume,thickness, ellipticity):
"""Generates array of spiral artery nodes and decidual vein nodes. Spiral artery nodes are mapped with stem villi.
Inputs:
- ellipsoid_coor:coordinate of nodes of placental mesh
- surfacenode:array of surface nodes
- stem_file:txt file that described stem villi locations
Outputs:
- spiral_array: array of spiral artery nodes
- decidual_array: array of decidual artery nodes
- vesselnode: array of both spiral and decidual nodes
- surfnode_ex_vessel: array of surface node excluding vessel nodes
"""
radii = pg_utilities.calculate_ellipse_radii(volume, thickness, ellipticity)
z_radius = radii['z_radius']
x_radius = radii['x_radius']
y_radius = radii['y_radius']
xyList = np.zeros((len(surfacenode), 4))
count = 0
for i in range(0, len(surfacenode)): # taking only x and y coordinates
if ellipsoid_coor[surfacenode[i] - 1, 3] < 0: # take if upper surface nodes as this is where vessele reside
# location from upper surface nodes only
xyList[count, 0] = ellipsoid_coor[surfacenode[i] - 1, 0] #node number
xyList[count, 1] = ellipsoid_coor[surfacenode[i] - 1, 1] #x-coordinate
xyList[count, 2] = ellipsoid_coor[surfacenode[i] - 1, 2] #y-coordinate
xyList[count, 3] = ellipsoid_coor[surfacenode[i] - 1, 3] #z-coordinate
count = count + 1
xyList = xyList[0:count, :]
surfnode_ex_vessel = np.copy(surfacenode)
vesselnode_temp = np.vstack({tuple(row) for row in xyList}) #nodes that might be vessels
# reading in the stem vessel to map the spiral artery location
stem_xy = open(stem_file, 'r')
stem_coor = stem_xy.readlines() # readlines
stem_xyList = imports_and_exports.import_stemxy(stem_file)['stem_xy']
print('Total stem read = '+ str(len(stem_xyList)))
vessel_mapped_stem = stem_xyList # this is the x,y location where we want to put spiral artery
spiral_array = np.zeros((len(xyList)), dtype=int) # store the node nuber of spiral artery
decidual_array = np.zeros((len(xyList)), dtype=int) # store the node number of decidual vein
check = ellipsoid_coor[:, 0:2]
np.random.seed(0)
sa_nodes = 0
dv_nodes = 0
for i in range(0, len(vessel_mapped_stem)): # for each blood vessel,Cycle through to find closest nodes
closest_node = 0
for nodeX in vesselnode_temp:
distance=np.sqrt((vessel_mapped_stem[i][0] - nodeX[1]) ** 2 + (
vessel_mapped_stem[i][1] - nodeX[2]) ** 2 ) # distance from the nodes
if(distance < sa_radius):
#print('SA Node', int(nodeX[0]),nodeX[1],nodeX[2],vessel_mapped_stem[i][0],vessel_mapped_stem[i][1])
arterynode = nodeX[0]
A = np.where(vesselnode_temp == arterynode)
vesselnode_temp = np.delete(vesselnode_temp, A[0], axis=0)
A2 = np.where(surfnode_ex_vessel == int(arterynode))
surfnode_ex_vessel = np.delete(surfnode_ex_vessel, A2)
spiral_array[sa_nodes] = arterynode
sa_nodes = sa_nodes +1
#print(closest_node[0])
#arterynode = closest_node[0]
#A = np.where(vesselnode_temp == arterynode)
#vesselnode_temp = np.delete(vesselnode_temp, A[0], axis=0)
#A2 = np.where(surfnode_ex_vessel == int(arterynode))
#surfnode_ex_vessel = np.delete(surfnode_ex_vessel, A2)
#spiral_array[i] = arterynode
#sa_nodes = sa_nodes +1
#Doing decidual veins after arteries to make sure we dont take up any spots that arteries would have otherwise beein
for i in range(0, len(vessel_mapped_stem)): #need same number of arteries as veins
V = np.random.choice(len(vesselnode_temp)) # choosing random , won't repeat arteries as they are already
vessel_location = vesselnode_temp[V]
for nodeX in vesselnode_temp:
distance=np.sqrt((vessel_location[1] - nodeX[1]) ** 2 + (
vessel_location[2] - nodeX[2]) ** 2 ) # distance from the nodes
dv_from_centre = np.sqrt(nodeX[1] ** 2 + nodeX[2] ** 2 )
if(distance < dv_radius and dv_from_centre < 0.9*x_radius):
#print('DV Node', int(nodeX[0]))
veinnode = nodeX[0]
V = np.where(vesselnode_temp == veinnode)
vesselnode_temp = np.delete(vesselnode_temp, V[0], axis=0)
V2 = np.where(surfnode_ex_vessel == int(veinnode))
surfnode_ex_vessel = np.delete(surfnode_ex_vessel, V2)
decidual_array[dv_nodes] = veinnode
dv_nodes = dv_nodes +1
#veinnode = vesselnode_temp[V][0]
#vesselnode_temp = np.delete(vesselnode_temp, V, axis=0)
#V2 = np.where(surfnode_ex_vessel == int(veinnode))
#surfnode_ex_vessel = np.delete(surfnode_ex_vessel, V2)
#decidual_array[i] = veinnode
#dv_nodes = dv_nodes+1
spiral_array = np.resize(spiral_array,sa_nodes)
print('SAs found = ' + str(sa_nodes))
decidual_array = np.resize(decidual_array, dv_nodes)
#print('dec',decidual_array)
return {'spiral_array': spiral_array, 'decidual_array': decidual_array, 'surfnode_ex_vessel': surfnode_ex_vessel,
'num_sa': len(stem_xyList)}
def identify_vessel_node_test_mesh(ellipsoid_coor, surfacenode,volume, thickness, ellipticity):
"""Generates array of spiral artery nodes and decidual vein nodes. Spiral artery nodes are mapped with stem villi.
Inputs:
- ellipsoid_coor:coordinate of nodes of placental mesh
- surfacenode:array of surface nodes
- stem_file:txt file that described stem villi locations
Outputs:
- spiral_array: array of spiral artery nodes
- decidual_array: array of decidual artery nodes
- vesselnode: array of both spiral and decidual nodes
- surfnode_ex_vessel: array of surface node excluding vessel nodes
"""
sa_radius = 3.7 / 2.0
dv_radius = sa_radius
radii = pg_utilities.calculate_ellipse_radii(volume, thickness, ellipticity)
z_radius = radii['z_radius']
x_radius = radii['x_radius']
y_radius = radii['y_radius']
xyList = np.zeros((len(surfacenode), 4))
count = 0
for i in range(0, len(surfacenode)): # taking only x and y coordinates
if ellipsoid_coor[surfacenode[i] - 1, 3] > 0: # take if upper surface nodes as this is where vessele reside
# location from upper surface nodes only
xyList[count, 0] = ellipsoid_coor[surfacenode[i] - 1, 0] # node number
xyList[count, 1] = ellipsoid_coor[surfacenode[i] - 1, 1] # x-coordinate
xyList[count, 2] = ellipsoid_coor[surfacenode[i] - 1, 2] # y-coordinate
xyList[count, 3] = ellipsoid_coor[surfacenode[i] - 1, 3] # z-coordinate
count = count + 1
xyList = xyList[0:count, :]
surfnode_ex_vessel = np.copy(surfacenode)
vesselnode_temp = np.vstack({tuple(row) for row in xyList}) # nodes that might be vessels
# reading in the stem vessel to map the spiral artery location
vessel_mapped_stem = [-9.822741e+00, 1.550285e+01]
vessel_mapped_stem_v = [1.155144e+01, 1.435972e+01]
spiral_array = np.zeros((len(xyList)), dtype=int) # store the node nuber of spiral artery
decidual_array = np.zeros((len(xyList)), dtype=int) # store the node number of decidual vein
check = ellipsoid_coor[:, 0:2]
np.random.seed(0)
sa_nodes = 0
dv_nodes = 0
for i in range(0, len(vessel_mapped_stem)): # for each blood vessel,Cycle through to find closest nodes
for nodeX in vesselnode_temp:
distance = np.sqrt((vessel_mapped_stem[0] - nodeX[1]) ** 2 + (
vessel_mapped_stem[1] - nodeX[2]) ** 2) # distance from the nodes
if (distance < sa_radius):
#print('SA Node', int(nodeX[0]))
arterynode = nodeX[0]
A = np.where(vesselnode_temp == arterynode)
vesselnode_temp = np.delete(vesselnode_temp, A[0], axis=0)
A2 = np.where(surfnode_ex_vessel == int(arterynode))
surfnode_ex_vessel = np.delete(surfnode_ex_vessel, A2)
spiral_array[sa_nodes] = arterynode
sa_nodes = sa_nodes + 1
# Doing decidual veins after arteries to make sure we dont take up any spots that arteries would have otherwise beein
for i in range(0, len(vessel_mapped_stem_v)): # need same number of arteries as veins
for nodeX in vesselnode_temp:
distance = np.sqrt((vessel_mapped_stem_v[0] - nodeX[1]) ** 2 + (
vessel_mapped_stem_v[1] - nodeX[2]) ** 2) # distance from the nodes
if (distance < dv_radius):
#print('DV Node', int(nodeX[0]))
veinnode = nodeX[0]
V = np.where(vesselnode_temp == veinnode)
vesselnode_temp = np.delete(vesselnode_temp, V[0], axis=0)
V2 = np.where(surfnode_ex_vessel == int(veinnode))
surfnode_ex_vessel = np.delete(surfnode_ex_vessel, V2)
decidual_array[dv_nodes] = veinnode
dv_nodes = dv_nodes + 1
spiral_array = np.resize(spiral_array, sa_nodes)
decidual_array = np.resize(decidual_array, dv_nodes)
#print('dec', decidual_array)
return {'spiral_array': spiral_array, 'decidual_array': decidual_array, 'surfnode_ex_vessel': surfnode_ex_vessel}
def gen_3d_ellipsoid_structured(size_el, volume, thickness, ellipticity, squareSizeRatio, circle_prop, el_type, debug):
""" Generates a structured ellipsoid mesh to solve 3D problems. The aim is for a quality computational mesh that
has as regular elements as possible, within the constraints of typical dimensions of ellipsoids representing the
volume of the placenta. This code is derived from an openCMISS example written by Chris Bradley, which is used to
simulate fluid structure interactions in a cylinder. Note that this hasn't been tested on linear elements
Inputs:
- size_el: approximate dimension of an element in each axis that we are aiming for
- volume: volume of placental ellipsoid
- thickness: placental thickness (z-dimension)
- ellipticity: ratio of y to x axis dimension
- squareSizeRatio: ratio of square in mesh cross-section to radius
- circle_prop: proportion of ellipse in x-y that is made up by 'plate' of nodes and elements
- debug (True or False) allows you to print certain statements to screen
Returns:
- placental_node_coor: nodes location of mesh
- placental_el_con: element connectivity of mesh (tetrahedral element)
- node_array: array of nodes
- element_array: array of elements
"""
radii = pg_utilities.calculate_ellipse_radii(volume, thickness, ellipticity)
ellipsoidRadius_z = radii['z_radius']
ellipsoidRadius_x = radii['x_radius']
ellipsoidRadius_y = radii['y_radius']
if (debug):
print('Solving a model with x-radius: ' + str(ellipsoidRadius_x) + ' y-radius: ' + str(
ellipsoidRadius_y) + 'z-radius: ' + str(ellipsoidRadius_z))
nel_x = int(np.floor((ellipsoidRadius_x * 2) / size_el)) # number of elems needed in x aixs in mesh
nel_y = int(np.floor((ellipsoidRadius_y * 2) / size_el)) # number of elems needed in in y axis in mesh (need to
# implement having different x,y element numbers
nel_z = int(np.floor((ellipsoidRadius_z * 2) / size_el)) # number of elems needed in in z axis in mesh
# total number of elements in x,y are number in square plus 2* number in arm
# If square takes up half the radius need even numbers in arm and square at one third of total each
# If square takes up squareSizeRatio of the total, then need the square part to be multiplied by that proportion
total_square_arm = 2.0 * nel_x / 3.0
numberOfSquareElements = int(np.floor(squareSizeRatio * total_square_arm))
numberOfArmElements = int(np.floor((1 - squareSizeRatio) * total_square_arm))
# In future for cross-sections that deviate a lot from circular will need different number of elements in square
# and arm in x- and y-
numberOfZElements = nel_z
if (el_type == 1): # linear
numberOfNodesXi = 2
elif (el_type == 2): # quadratic
numberOfNodesXi = 3
numberOfLocalNodes = numberOfNodesXi * numberOfNodesXi * numberOfNodesXi
numberOfLocalInterfaceNodes = numberOfNodesXi * numberOfNodesXi
localNodeIdx000 = 0
localNodeIdx100 = numberOfNodesXi - 1
localNodeIdx010 = numberOfNodesXi * (numberOfNodesXi - 1)
localNodeIdx110 = numberOfNodesXi * numberOfNodesXi - 1
localNodeIdx001 = numberOfNodesXi * numberOfNodesXi * (numberOfNodesXi - 1)
localNodeIdx101 = numberOfNodesXi - 1 + numberOfNodesXi * numberOfNodesXi * (numberOfNodesXi - 1)
localNodeIdx011 = numberOfNodesXi * (numberOfNodesXi - 1) + numberOfNodesXi * numberOfNodesXi * (
numberOfNodesXi - 1)
localNodeIdx111 = numberOfLocalNodes - 1
numberOfNodesPerBlock = numberOfSquareElements * (numberOfNodesXi - 1) * (
numberOfArmElements * (numberOfNodesXi - 1) + 1)
numberOfElementsPerBlock = numberOfSquareElements * numberOfArmElements
numberOfNodesPerLength = 4 * numberOfNodesPerBlock + \
(numberOfSquareElements * (numberOfNodesXi - 1) - 1) * (
numberOfSquareElements * (numberOfNodesXi - 1) - 1)
numberOfElementsPerLength = 4 * numberOfElementsPerBlock + numberOfSquareElements * numberOfSquareElements
numberOfNodes = numberOfNodesPerLength * (numberOfZElements * (numberOfNodesXi - 1) + 1)
numberOfElements = numberOfElementsPerLength * numberOfZElements
if debug:
print(' Mesh Parameters:')
print(' numberOfSquareElements: %d' % (numberOfSquareElements))
print(' numberOfArmElements: %d' % (numberOfArmElements))
print(' numberOfZElements: %d' % (numberOfZElements))
print(' numberOfNodesXi: %d' % (numberOfNodesXi))
print(' numberOfNodesPerBlock: %d' % (numberOfNodesPerBlock))
print(' numberOfElementPerBlock: %d' % (numberOfElementsPerBlock))
print(' numberOfNodesPerLength: %d' % (numberOfNodesPerLength))
print(' numberOfElementsPerLength: %d' % (numberOfElementsPerLength))
print(' numberOfNodes: %d' % (numberOfNodes))
print(' numberOfElements: %d' % (numberOfElements))
print(' numberOfLocalNodes: %d' % (numberOfLocalNodes))
elems = np.zeros((numberOfElements, numberOfLocalNodes+1), dtype='int32')
node_array = np.zeros((numberOfNodes,4))
nodelist = [0]*numberOfNodes
surface_nodes = [0] * numberOfNodes
num_surface_nodes = 0
for zElementIdx in range(1, max(numberOfZElements + 1, 2)):
# Handle the arm blocks first
previousBlock = 4
for blockIdx in range(1, 5): # generating arm blocks
# DEFINING NODES AND ELEMENTS WITH CONNECTIVITY
for yElementIdx in range(1, numberOfArmElements + 1):
for xElementIdx in range(1, numberOfSquareElements + 1):
localNodes = [0] * numberOfLocalNodes # Nodes local to this arm block
elementNumber = xElementIdx + (yElementIdx - 1) * numberOfSquareElements + (
blockIdx - 1) * numberOfSquareElements * numberOfArmElements + \
(zElementIdx - 1) * numberOfElementsPerLength
if (xElementIdx == 1):
localNodes[localNodeIdx000] = (
previousBlock - 1) * numberOfNodesPerBlock + numberOfSquareElements * (
numberOfNodesXi - 1) + \
(yElementIdx - 1) * (
numberOfNodesXi - 1) * numberOfSquareElements * (
numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (numberOfNodesXi - 1)
localNodes[localNodeIdx100] = (blockIdx - 1) * numberOfNodesPerBlock + numberOfNodesXi - 1 + \
(yElementIdx - 1) * (
numberOfNodesXi - 1) * numberOfSquareElements * (
numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (numberOfNodesXi - 1)
else:
localNodes[localNodeIdx000] = (blockIdx - 1) * numberOfNodesPerBlock + (xElementIdx - 2) * (
numberOfNodesXi - 1) + (numberOfNodesXi - 2) + 1 + \
(yElementIdx - 1) * (numberOfNodesXi - 1) * (
numberOfSquareElements * (numberOfNodesXi - 1)) + \
(zElementIdx - 1) * numberOfNodesPerLength * (numberOfNodesXi - 1)
localNodes[localNodeIdx100] = localNodes[localNodeIdx000] + numberOfNodesXi - 1
localNodes[localNodeIdx010] = localNodes[localNodeIdx000] + numberOfSquareElements * (
numberOfNodesXi - 1) * (numberOfNodesXi - 1)
localNodes[localNodeIdx110] = localNodes[localNodeIdx100] + numberOfSquareElements * (
numberOfNodesXi - 1) * (numberOfNodesXi - 1)
localNodes[localNodeIdx001] = localNodes[localNodeIdx000] + numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx101] = localNodes[localNodeIdx100] + numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx011] = localNodes[localNodeIdx010] + numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx111] = localNodes[localNodeIdx110] + numberOfNodesPerLength * (
numberOfNodesXi - 1)
if (el_type == 2):
localNodes[1] = localNodes[localNodeIdx100] - 1
localNodes[3] = localNodes[localNodeIdx000] + numberOfSquareElements * (numberOfNodesXi - 1)
localNodes[4] = localNodes[1] + numberOfSquareElements * (numberOfNodesXi - 1)
localNodes[5] = localNodes[4] + 1
localNodes[7] = localNodes[localNodeIdx110] - 1
localNodes[9] = localNodes[0] + numberOfNodesPerLength
localNodes[10] = localNodes[1] + numberOfNodesPerLength
localNodes[11] = localNodes[2] + numberOfNodesPerLength
localNodes[12] = localNodes[3] + numberOfNodesPerLength
localNodes[13] = localNodes[4] + numberOfNodesPerLength
localNodes[14] = localNodes[5] + numberOfNodesPerLength
localNodes[15] = localNodes[6] + numberOfNodesPerLength
localNodes[16] = localNodes[7] + numberOfNodesPerLength
localNodes[17] = localNodes[8] + numberOfNodesPerLength
localNodes[19] = localNodes[10] + numberOfNodesPerLength
localNodes[21] = localNodes[12] + numberOfNodesPerLength
localNodes[22] = localNodes[13] + numberOfNodesPerLength
localNodes[23] = localNodes[14] + numberOfNodesPerLength
localNodes[25] = localNodes[16] + numberOfNodesPerLength
linearNodes = [localNodes[localNodeIdx000], localNodes[localNodeIdx100],
localNodes[localNodeIdx010], localNodes[localNodeIdx110], \
localNodes[localNodeIdx001], localNodes[localNodeIdx101],
localNodes[localNodeIdx011], localNodes[localNodeIdx111]]
if (debug):
print(' Element %8d; Nodes: %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d' % \
(elementNumber, linearNodes[0], linearNodes[1], linearNodes[2], linearNodes[3],
linearNodes[4], linearNodes[5], linearNodes[6], linearNodes[7]))
if (el_type == 2):
print(' Nodes: %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d' % \
(localNodes[0], localNodes[1], localNodes[2], localNodes[3], localNodes[4],
localNodes[5], localNodes[6], localNodes[7], localNodes[8]))
print(' %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d' % \
(localNodes[9], localNodes[10], localNodes[11], localNodes[12], localNodes[13],
localNodes[14], localNodes[15], localNodes[16], localNodes[17]))
print(' %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d' % \
(localNodes[18], localNodes[19], localNodes[20], localNodes[21], localNodes[22],
localNodes[23], localNodes[24], localNodes[25], localNodes[26]))
if (el_type == 1):
elems[elementNumber-1][0] = elementNumber
elems[elementNumber-1][1:numberOfLocalNodes+1] = linearNodes
if (el_type == 2):
elems[elementNumber - 1][0] = elementNumber
elems[elementNumber - 1][1:numberOfLocalNodes + 1] = localNodes
previousBlock = blockIdx
# Handle the square block
if (numberOfSquareElements == 1):
elementNumber = elementNumber + 1
localNodes[localNodeIdx000] = 3 * numberOfNodesPerBlock + \
(zElementIdx - 1) * numberOfNodesPerLength * (numberOfNodesXi - 1)
localNodes[localNodeIdx100] = 4 * numberOfNodesPerBlock + \
(zElementIdx - 1) * numberOfNodesPerLength * (numberOfNodesXi - 1)
localNodes[localNodeIdx010] = 2 * numberOfNodesPerBlock + \
(zElementIdx - 1) * numberOfNodesPerLength * (numberOfNodesXi - 1)
localNodes[localNodeIdx110] = numberOfNodesPerBlock + \
(zElementIdx - 1) * numberOfNodesPerLength * (numberOfNodesXi - 1)
if (el_type == 2):
localNodes[1] = localNodes[localNodeIdx100] - 1
localNodes[3] = localNodes[localNodeIdx000] - 1
localNodes[4] = localNodes[localNodeIdx100] + 1
localNodes[5] = localNodes[localNodeIdx110] - 1
localNodes[7] = localNodes[localNodeIdx010] - 1
localNodes[localNodeIdx001] = localNodes[localNodeIdx000] + numberOfNodesPerLength * (numberOfNodesXi - 1)
localNodes[localNodeIdx101] = localNodes[localNodeIdx100] + numberOfNodesPerLength * (numberOfNodesXi - 1)
localNodes[localNodeIdx011] = localNodes[localNodeIdx010] + numberOfNodesPerLength * (numberOfNodesXi - 1)
localNodes[localNodeIdx111] = localNodes[localNodeIdx110] + numberOfNodesPerLength * (numberOfNodesXi - 1)
linearNodes = [localNodes[localNodeIdx000], localNodes[localNodeIdx100], localNodes[localNodeIdx010],
localNodes[localNodeIdx110], \
localNodes[localNodeIdx001], localNodes[localNodeIdx101], localNodes[localNodeIdx011],
localNodes[localNodeIdx111]]
if (el_type == 2):
localNodes[9] = localNodes[0] + numberOfNodesPerLength
localNodes[10] = localNodes[1] + numberOfNodesPerLength
localNodes[11] = localNodes[2] + numberOfNodesPerLength
localNodes[12] = localNodes[3] + numberOfNodesPerLength
localNodes[13] = localNodes[4] + numberOfNodesPerLength
localNodes[14] = localNodes[5] + numberOfNodesPerLength
localNodes[15] = localNodes[6] + numberOfNodesPerLength
localNodes[16] = localNodes[7] + numberOfNodesPerLength
localNodes[17] = localNodes[8] + numberOfNodesPerLength
localNodes[19] = localNodes[10] + numberOfNodesPerLength
localNodes[21] = localNodes[12] + numberOfNodesPerLength
localNodes[22] = localNodes[13] + numberOfNodesPerLength
localNodes[23] = localNodes[14] + numberOfNodesPerLength
localNodes[25] = localNodes[16] + numberOfNodesPerLength
if (debug):
print(' Element %8d; Nodes: %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d' % \
(elementNumber, linearNodes[0], linearNodes[1], linearNodes[2], linearNodes[3], linearNodes[4],
linearNodes[5], linearNodes[6], linearNodes[7]))
if (el_type == 2):
print(' Nodes: %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d' % \
(localNodes[0], localNodes[1], localNodes[2], localNodes[3], localNodes[4], localNodes[5],
localNodes[6], localNodes[7], localNodes[8]))
print(' %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d' % \
(
localNodes[9], localNodes[10], localNodes[11], localNodes[12], localNodes[13], localNodes[14],
localNodes[15], localNodes[16], localNodes[17]))
print(' %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d' % \
(localNodes[18], localNodes[19], localNodes[20], localNodes[21], localNodes[22],
localNodes[23], localNodes[24], localNodes[25], localNodes[26]))
if (el_type == 1):
elems[elementNumber - 1][0] = elementNumber
elems[elementNumber - 1][1:numberOfLocalNodes + 1] = linearNodes
if (el_type == 2):
elems[elementNumber - 1][0] = elementNumber
elems[elementNumber - 1][1:numberOfLocalNodes + 1] = localNodes
else:
for yElementIdx in range(1, numberOfSquareElements + 1):
for xElementIdx in range(1, numberOfSquareElements + 1):
localNodes = [0] * numberOfLocalNodes
elementNumber = 4 * numberOfElementsPerBlock + xElementIdx + (
yElementIdx - 1) * numberOfSquareElements + \
(zElementIdx - 1) * numberOfElementsPerLength
if (yElementIdx == 1):
if (xElementIdx == 1):
# Bottom-left
localNodes[localNodeIdx000] = 3 * numberOfNodesPerBlock + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx100] = 3 * numberOfNodesPerBlock + numberOfArmElements * (
numberOfNodesXi - 1) * \
numberOfSquareElements * (numberOfNodesXi - 1) + (
numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx010] = 3 * numberOfNodesPerBlock - (numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx110] = 4 * numberOfNodesPerBlock + (
numberOfSquareElements * (numberOfNodesXi - 1) - 1) * \
(numberOfNodesXi - 2) + (numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
if (el_type == 2):
localNodes[1] = localNodes[localNodeIdx100] - 1
localNodes[3] = localNodes[localNodeIdx000] - 1
localNodes[4] = localNodes[localNodeIdx110] - numberOfSquareElements * (
numberOfNodesXi - 1)
localNodes[5] = localNodes[4] + 1
localNodes[7] = localNodes[localNodeIdx110] - 1
elif (xElementIdx == numberOfSquareElements):
# Bottom-right
localNodes[localNodeIdx000] = 4 * numberOfNodesPerBlock - (numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx100] = 4 * numberOfNodesPerBlock + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx010] = 4 * numberOfNodesPerBlock + (
numberOfSquareElements * (numberOfNodesXi - 1) - 1) * \
(numberOfNodesXi - 1) - (numberOfNodesXi - 2) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx110] = numberOfSquareElements * (numberOfNodesXi - 1) * \
numberOfArmElements * (numberOfNodesXi - 1) + (
numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
if (el_type == 2):
localNodes[1] = localNodes[localNodeIdx000] + 1
localNodes[3] = localNodes[localNodeIdx010] - numberOfSquareElements * (
numberOfNodesXi - 1) + 1
localNodes[4] = localNodes[3] + 1
localNodes[5] = localNodes[localNodeIdx110] - 1
localNodes[7] = localNodes[localNodeIdx010] + 1
else:
# Bottom
localNodes[localNodeIdx000] = 3 * numberOfNodesPerBlock + numberOfSquareElements * (
numberOfNodesXi - 1) * \
numberOfArmElements * (numberOfNodesXi - 1) + (
xElementIdx - 1) * (numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx100] = localNodes[localNodeIdx000] + (numberOfNodesXi - 1)
localNodes[localNodeIdx010] = 4 * numberOfNodesPerBlock + (
numberOfSquareElements * (numberOfNodesXi - 1) - 1) * \
(numberOfNodesXi - 2) + (xElementIdx - 1) * (
numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx110] = localNodes[localNodeIdx010] + (numberOfNodesXi - 1)
if (el_type == 2):
localNodes[1] = localNodes[localNodeIdx000] + 1
localNodes[3] = localNodes[localNodeIdx010] - numberOfSquareElements * (
numberOfNodesXi - 1) + 1
localNodes[4] = localNodes[3] + 1
localNodes[5] = localNodes[4] + 1
localNodes[7] = localNodes[localNodeIdx110] - 1
elif (yElementIdx == numberOfSquareElements):
if (xElementIdx == 1):
# Top-left
localNodes[localNodeIdx000] = 2 * numberOfNodesPerBlock + numberOfSquareElements * (
numberOfNodesXi - 1) * \
numberOfArmElements * (numberOfNodesXi - 1) + (
numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx100] = 4 * numberOfNodesPerBlock + (
numberOfSquareElements * (numberOfNodesXi - 1) - 1) * \
((numberOfSquareElements - 1) * (numberOfNodesXi - 1) - 1) + (
numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx010] = 2 * numberOfNodesPerBlock + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx110] = 2 * numberOfNodesPerBlock - (numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
if (el_type == 2):
localNodes[1] = localNodes[localNodeIdx100] - 1
localNodes[3] = localNodes[localNodeIdx000] - 1
localNodes[4] = localNodes[1] + numberOfSquareElements * (numberOfNodesXi - 1) - 1
localNodes[5] = localNodes[4] + 1
localNodes[7] = localNodes[localNodeIdx110] + 1
elif (xElementIdx == numberOfSquareElements):
# Top-right
localNodes[localNodeIdx000] = 4 * numberOfNodesPerBlock + (
numberOfSquareElements * (numberOfNodesXi - 1) - 1) * \
((numberOfSquareElements - 1) * (numberOfNodesXi - 1) - 1) + \
(numberOfSquareElements - 1) * (numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx100] = numberOfSquareElements * (numberOfNodesXi - 1) * \
numberOfArmElements * (numberOfNodesXi - 1) + \
(numberOfSquareElements - 1) * (numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx010] = numberOfNodesPerBlock + numberOfSquareElements * (
numberOfNodesXi - 1) * \
numberOfArmElements * (numberOfNodesXi - 1) + (
numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx110] = numberOfNodesPerBlock + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
if (el_type == 2):
localNodes[1] = localNodes[localNodeIdx000] + 1
localNodes[3] = localNodes[localNodeIdx000] + numberOfSquareElements * (
numberOfNodesXi - 1) - 1
localNodes[4] = localNodes[3] + 1
localNodes[5] = localNodes[localNodeIdx110] - 1
localNodes[7] = localNodes[localNodeIdx010] - 1
else:
# Top
localNodes[localNodeIdx000] = 4 * numberOfNodesPerBlock + (
numberOfSquareElements * (numberOfNodesXi - 1) - 1) * \
((numberOfSquareElements - 1) * (numberOfNodesXi - 1) - 1) + \
(xElementIdx - 1) * (numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx100] = localNodes[localNodeIdx000] + (numberOfNodesXi - 1)
localNodes[localNodeIdx010] = 2 * numberOfNodesPerBlock - (xElementIdx - 1) * (
numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx110] = localNodes[localNodeIdx010] - (numberOfNodesXi - 1)
if (el_type == 2):
localNodes[1] = localNodes[localNodeIdx000] + 1
localNodes[3] = localNodes[localNodeIdx000] + numberOfSquareElements * (
numberOfNodesXi - 1) - 1
localNodes[4] = localNodes[3] + 1
localNodes[5] = localNodes[4] + 1
localNodes[7] = localNodes[localNodeIdx010] - 1
else:
if (xElementIdx == 1):
# Left
localNodes[localNodeIdx000] = 3 * numberOfNodesPerBlock - (yElementIdx - 1) * (
numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx100] = 4 * numberOfNodesPerBlock + (
numberOfSquareElements * (numberOfNodesXi - 1) - 1) * \
((yElementIdx - 1) * (numberOfNodesXi - 1) - 1) + (
numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx010] = localNodes[localNodeIdx000] - (numberOfNodesXi - 1)
localNodes[localNodeIdx110] = localNodes[localNodeIdx100] + (
numberOfSquareElements * (numberOfNodesXi - 1) - 1) * \
(numberOfNodesXi - 1)
if (el_type == 2):
localNodes[1] = localNodes[localNodeIdx100] - 1
localNodes[3] = localNodes[localNodeIdx000] - 1
localNodes[4] = localNodes[localNodeIdx110] - numberOfSquareElements * (
numberOfNodesXi - 1)
localNodes[5] = localNodes[4] + 1
localNodes[7] = localNodes[localNodeIdx110] - 1
elif (xElementIdx == numberOfSquareElements):
# Right
localNodes[localNodeIdx000] = 4 * numberOfNodesPerBlock + (
numberOfSquareElements * (numberOfNodesXi - 1) - 1) * \
((yElementIdx - 1) * (numberOfNodesXi - 1) - 1) + (
numberOfSquareElements - 1) * (
numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx100] = numberOfSquareElements * (
numberOfNodesXi - 1) * numberOfArmElements * (numberOfNodesXi - 1) + \
(yElementIdx - 1) * (numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx010] = localNodes[localNodeIdx000] + (
numberOfSquareElements * (numberOfNodesXi - 1) - 1) * \
(numberOfNodesXi - 1)
localNodes[localNodeIdx110] = localNodes[localNodeIdx100] + (numberOfNodesXi - 1)
if (el_type == 2):
localNodes[1] = localNodes[localNodeIdx000] + 1
localNodes[3] = localNodes[localNodeIdx010] - numberOfSquareElements * (
numberOfNodesXi - 1) + 1
localNodes[4] = localNodes[3] + 1
localNodes[5] = localNodes[localNodeIdx100] + 1
localNodes[7] = localNodes[localNodeIdx010] + 1
else:
# Middle
localNodes[localNodeIdx000] = 4 * numberOfNodesPerBlock + (
numberOfSquareElements * (numberOfNodesXi - 1) - 1) * \
((yElementIdx - 1) * (numberOfNodesXi - 1) - 1) + (
xElementIdx - 1) * (numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx100] = localNodes[localNodeIdx000] + (numberOfNodesXi - 1)
localNodes[localNodeIdx010] = localNodes[localNodeIdx000] + (
numberOfSquareElements * (numberOfNodesXi - 1) - 1) * \
(numberOfNodesXi - 1)
localNodes[localNodeIdx110] = localNodes[localNodeIdx010] + (numberOfNodesXi - 1)
if (el_type == 2):
localNodes[1] = localNodes[localNodeIdx000] + 1
localNodes[3] = localNodes[localNodeIdx000] + numberOfSquareElements * (
numberOfNodesXi - 1) - 1
localNodes[4] = localNodes[3] + 1
localNodes[5] = localNodes[4] + 1
localNodes[7] = localNodes[localNodeIdx010] + 1
localNodes[localNodeIdx001] = localNodes[localNodeIdx000] + numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx101] = localNodes[localNodeIdx100] + numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx011] = localNodes[localNodeIdx010] + numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx111] = localNodes[localNodeIdx110] + numberOfNodesPerLength * (
numberOfNodesXi - 1)
linearNodes = [localNodes[localNodeIdx000], localNodes[localNodeIdx100],
localNodes[localNodeIdx010], localNodes[localNodeIdx110], \
localNodes[localNodeIdx001], localNodes[localNodeIdx101],
localNodes[localNodeIdx011], localNodes[localNodeIdx111]]
if (el_type == 2):
localNodes[9] = localNodes[0] + numberOfNodesPerLength
localNodes[10] = localNodes[1] + numberOfNodesPerLength
localNodes[11] = localNodes[2] + numberOfNodesPerLength
localNodes[12] = localNodes[3] + numberOfNodesPerLength
localNodes[13] = localNodes[4] + numberOfNodesPerLength
localNodes[14] = localNodes[5] + numberOfNodesPerLength
localNodes[15] = localNodes[6] + numberOfNodesPerLength
localNodes[16] = localNodes[7] + numberOfNodesPerLength
localNodes[17] = localNodes[8] + numberOfNodesPerLength
localNodes[19] = localNodes[10] + numberOfNodesPerLength
localNodes[21] = localNodes[12] + numberOfNodesPerLength
localNodes[22] = localNodes[13] + numberOfNodesPerLength
localNodes[23] = localNodes[14] + numberOfNodesPerLength
localNodes[25] = localNodes[16] + numberOfNodesPerLength
if (debug):
print(' Element %8d; Nodes: %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d' % \
(elementNumber, linearNodes[0], linearNodes[1], linearNodes[2], linearNodes[3],
linearNodes[4], linearNodes[5], linearNodes[6], linearNodes[7]))
if (el_type == 2):
print(' Nodes: %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d' % \
(localNodes[0], localNodes[1], localNodes[2], localNodes[3], localNodes[4],
localNodes[5], localNodes[6], localNodes[7], localNodes[8]))
print(' %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d' % \
(localNodes[9], localNodes[10], localNodes[11], localNodes[12], localNodes[13],
localNodes[14], localNodes[15], localNodes[16], localNodes[17]))
print(' %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d' % \
(localNodes[18], localNodes[19], localNodes[20], localNodes[21], localNodes[22],
localNodes[23], localNodes[24], localNodes[25], localNodes[26]))
if (el_type == 1):
elems[elementNumber-1][0] = elementNumber
elems[elementNumber-1][1:numberOfLocalNodes+1] = linearNodes
if (el_type == 2):
elems[elementNumber - 1][0] = elementNumber
elems[elementNumber - 1][1:numberOfLocalNodes + 1] = localNodes
if (debug):
print(' Nodes:')
for zNodeIdx in range(1, numberOfZElements * (numberOfNodesXi - 1) + 2):
prop = 1 - (1 - circle_prop) * abs(2.0 * (zNodeIdx - 1) / float(numberOfZElements * (numberOfNodesXi - 1)) -
1.0)
sign = np.sign(2.0 * (zNodeIdx - 1) / float(numberOfZElements * (numberOfNodesXi - 1)) - 1.0)
#This is the z height associated with this prop
zPosition = sign * ellipsoidRadius_z * np.sqrt(1 - prop ** 2)
#This is the radius of the ellipse that lies in the ellipsoid at this z-height
new_x_radius = ellipsoidRadius_x * np.sqrt(ellipsoidRadius_z ** 2.0 - zPosition ** 2.0) \
/ (ellipsoidRadius_z)
new_y_radius = ellipsoidRadius_y * np.sqrt(ellipsoidRadius_z ** 2.0 - zPosition ** 2.0) \
/ (ellipsoidRadius_z)
angle_theta = np.arctan(new_y_radius / new_x_radius)
squareSize_x = squareSizeRatio * new_x_radius * np.cos(angle_theta)
squareSize_y = squareSizeRatio * new_y_radius * np.sin(angle_theta)
#squareSize_x = squareSizeRatio * ellipsoidRadius_x * np.cos(angle_theta)
#squareSize_y = squareSizeRatio * ellipsoidRadius_y * np.sin(angle_theta)
# Handle the arm blocks first
previousBlock = 4
for blockIdx in range(1, 5):
# print('Block which ' + str(blockIdx) + ' ' + str(zNodeIdx))
for yNodeIdx in range(1, numberOfArmElements * (numberOfNodesXi - 1) + 2):
for xNodeIdx in range(1, numberOfSquareElements * (numberOfNodesXi - 1) + 1):
nodeNumber = (blockIdx - 1) * numberOfNodesPerBlock + xNodeIdx + (
yNodeIdx - 1) * numberOfSquareElements * (numberOfNodesXi - 1) + \
(zNodeIdx - 1) * numberOfNodesPerLength
#nodeDomain = decomposition.NodeDomainGet(nodeNumber, 1)
if(nodeNumber > numberOfNodes + 1):
print(nodeNumber)
else:
if (yNodeIdx == numberOfArmElements * (numberOfNodesXi - 1) + 1):
# On the square
# print('On the square', xNodeIdx,yNodeIdx)
if (blockIdx == 1):
xPosition = squareSize_x - 2.0 * xNodeIdx * squareSize_x / (
numberOfSquareElements * (numberOfNodesXi - 1))
yPosition = squareSize_y
elif (blockIdx == 2):
xPosition = -squareSize_x
yPosition = squareSize_y - 2.0 * xNodeIdx * squareSize_y / (
numberOfSquareElements * (numberOfNodesXi - 1))
elif (blockIdx == 3):
xPosition = -squareSize_x + 2.0 * xNodeIdx * squareSize_x / (
numberOfSquareElements * (numberOfNodesXi - 1))
yPosition = -squareSize_y
elif (blockIdx == 4):
xPosition = squareSize_x
yPosition = -squareSize_y + 2.0 * xNodeIdx * squareSize_y / (
numberOfSquareElements * (numberOfNodesXi - 1))
else:
# In the arm
# Work out the r, theta position of each point equally spread on the block
if (blockIdx == 1):
start_theta = np.arctan(new_y_radius / new_x_radius)
end_theta = np.pi - start_theta
theta = start_theta + xNodeIdx * (end_theta - start_theta) / (numberOfSquareElements * (
numberOfNodesXi - 1))
# theta is the angle from the centre of the mesh to the surface of the ellipsoid
sq_x = squareSize_x - 2.0 * xNodeIdx * squareSize_x / (
numberOfSquareElements * (numberOfNodesXi - 1))
sq_y = squareSize_y
elif (blockIdx == 2):
start_theta = np.pi - np.arctan(new_y_radius / new_x_radius)
end_theta = np.pi + np.arctan(new_y_radius / new_x_radius)
theta = start_theta + xNodeIdx * (end_theta - start_theta) / (numberOfSquareElements * (
numberOfNodesXi - 1))
sq_x = -squareSize_x
sq_y = squareSize_y - 2.0 * xNodeIdx * squareSize_y / (
numberOfSquareElements * (numberOfNodesXi - 1))
elif (blockIdx == 3):
start_theta = np.pi + np.arctan(new_y_radius / new_x_radius)
end_theta = 2.0 * np.pi - np.arctan(new_y_radius / new_x_radius)
theta = start_theta + xNodeIdx * (end_theta - start_theta) / (numberOfSquareElements * (
numberOfNodesXi - 1))
sq_x = -squareSize_x + 2.0 * xNodeIdx * squareSize_x / (
numberOfSquareElements * (numberOfNodesXi - 1))
sq_y = -squareSize_y
elif (blockIdx == 4):
start_theta = 2.0 * np.pi - np.arctan(new_y_radius / new_x_radius)
end_theta = 2.0 * np.pi + np.arctan(new_y_radius / new_x_radius)
theta = start_theta + xNodeIdx * (end_theta - start_theta) / (numberOfSquareElements * (
numberOfNodesXi - 1))
sq_x = squareSize_x
sq_y = -squareSize_y + 2.0 * xNodeIdx * squareSize_y / (
numberOfSquareElements * (numberOfNodesXi - 1))
armRadius = new_y_radius * new_x_radius / np.sqrt(
new_x_radius ** 2 * np.sin(theta) ** 2 + new_y_radius ** 2 * np.cos(theta) ** 2)
arm_x = armRadius * np.cos(theta)
arm_y = armRadius * np.sin(theta)
arm_no = (yNodeIdx - 1) / (numberOfArmElements * (numberOfNodesXi - 1) + 1.0)
xPosition = arm_x - arm_no * (arm_x - sq_x)
yPosition = arm_y - arm_no * (arm_y - sq_y)
if (zNodeIdx == 1):#project to top and bottom surface
zPosition = -ellipsoidRadius_z * np.sqrt(
1 - xPosition ** 2 / ellipsoidRadius_x ** 2 - yPosition ** 2 / ellipsoidRadius_y ** 2)
num_surface_nodes = num_surface_nodes + 1
surface_nodes[num_surface_nodes] = nodeNumber
elif (zNodeIdx == numberOfZElements * (numberOfNodesXi - 1) + 1): #project to top and bottom
# surface
zPosition = ellipsoidRadius_z * np.sqrt(
1 - xPosition ** 2 / ellipsoidRadius_x ** 2 - yPosition ** 2 / ellipsoidRadius_y ** 2)
num_surface_nodes = num_surface_nodes + 1
surface_nodes[num_surface_nodes] = nodeNumber
elif(yNodeIdx == 1): #outer ring
num_surface_nodes = num_surface_nodes + 1
surface_nodes[num_surface_nodes] = nodeNumber
nodelist[nodeNumber-1] = nodeNumber
node_array[nodeNumber-1][:]= [nodeNumber,xPosition,yPosition,zPosition]
if (debug):
print(' Node %d:' % (nodeNumber))
print(' Position = [ %.2f, %.2f, %.2f ]' % (
xPosition, yPosition, zPosition))
# Now handle square
for yNodeIdx in range(2, numberOfSquareElements * (numberOfNodesXi - 1) + 1):
for xNodeIdx in range(2, numberOfSquareElements * (numberOfNodesXi - 1) + 1):
nodeNumber = 4 * numberOfNodesPerBlock + (xNodeIdx - 1) + (yNodeIdx - 2) * (
numberOfSquareElements * (numberOfNodesXi - 1) - 1) + \
(zNodeIdx - 1) * numberOfNodesPerLength
if (nodeNumber > numberOfNodes + 1):
print(nodeNumber)
else:
xPosition = squareSize_x - squareSize_x * (yNodeIdx - 1) / (
numberOfSquareElements * (numberOfNodesXi - 1)) * 2.0
yPosition = -squareSize_y + squareSize_y * (xNodeIdx - 1) / (
numberOfSquareElements * (numberOfNodesXi - 1)) * 2.0
if (zNodeIdx == 1):
zPosition = -ellipsoidRadius_z * np.sqrt(
1 - xPosition ** 2 / ellipsoidRadius_x ** 2 - yPosition ** 2 / ellipsoidRadius_y ** 2)
num_surface_nodes = num_surface_nodes + 1
surface_nodes[num_surface_nodes] = nodeNumber
elif (zNodeIdx == numberOfZElements * (numberOfNodesXi - 1) + 1):
zPosition = ellipsoidRadius_z * np.sqrt(
1 - xPosition ** 2 / ellipsoidRadius_x ** 2 - yPosition ** 2 / ellipsoidRadius_y ** 2)
num_surface_nodes = num_surface_nodes + 1
surface_nodes[num_surface_nodes] = nodeNumber
nodelist[nodeNumber - 1] = nodeNumber
node_array[nodeNumber - 1][:] = [nodeNumber, xPosition, yPosition, zPosition]
if (debug):
print(' Node %d:' % (nodeNumber))
print(' Position = [ %.2f, %.2f, %.2f ]' % (xPosition, yPosition, zPosition))
#As we project the top and bottom rows to the surface of the ellipsoid, uneven nodal distribution can impact on
# mesh quality so we resistribute the nodes immediately underneath the surface to improve quality metrics.
second_rows = [2,numberOfZElements * (numberOfNodesXi - 1)]
for zNodeIdx in second_rows:
# Handle the arm blocks first
previousBlock = 4
for blockIdx in range(1, 5):
# print('Block which ' + str(blockIdx) + ' ' + str(zNodeIdx))
for yNodeIdx in range(1, numberOfArmElements * (numberOfNodesXi - 1) + 2):
for xNodeIdx in range(1, numberOfSquareElements * (numberOfNodesXi - 1) + 1):
nodeNumber = (blockIdx - 1) * numberOfNodesPerBlock + xNodeIdx + (
yNodeIdx - 1) * numberOfSquareElements * (numberOfNodesXi - 1) + \
(zNodeIdx - 1) * numberOfNodesPerLength
nodeNumber_above = (blockIdx - 1) * numberOfNodesPerBlock + xNodeIdx + (
yNodeIdx - 1) * numberOfSquareElements * (numberOfNodesXi - 1) + \
(zNodeIdx -1 - 1) * numberOfNodesPerLength
nodeNumber_below = (blockIdx - 1) * numberOfNodesPerBlock + xNodeIdx + (
yNodeIdx - 1) * numberOfSquareElements * (numberOfNodesXi - 1) + \
(zNodeIdx + 1 - 1) * numberOfNodesPerLength
# nodeDomain = decomposition.NodeDomainGet(nodeNumber, 1)
if (nodeNumber > numberOfNodes + 1):
print(nodeNumber)
else:
zPosition_above = node_array[nodeNumber_above - 1][3]
zPosition_below = node_array[nodeNumber_below - 1][3]
zPosition = (zPosition_above + zPosition_below)/2.0
node_array[nodeNumber - 1][3] = zPosition
# Now handle square
for yNodeIdx in range(2, numberOfSquareElements * (numberOfNodesXi - 1) + 1):
for xNodeIdx in range(2, numberOfSquareElements * (numberOfNodesXi - 1) + 1):
nodeNumber = 4 * numberOfNodesPerBlock + (xNodeIdx - 1) + (yNodeIdx - 2) * (
numberOfSquareElements * (numberOfNodesXi - 1) - 1) + \
(zNodeIdx - 1) * numberOfNodesPerLength
if (nodeNumber > numberOfNodes + 1):
print(nodeNumber)
else:
zPosition_above = node_array[nodeNumber_above - 1][3]
zPosition_below = node_array[nodeNumber_below - 1][3]
zPosition = (zPosition_above + zPosition_below) / 2.0
node_array[nodeNumber - 1][3] = zPosition
surface_nodes = np.unique(surface_nodes)
nzid = np.nonzero(surface_nodes)
surface_nodes = surface_nodes[nzid]
return{'nodes':node_array,'elems':elems,'surface_nodes':surface_nodes, 'node_list':nodelist}
def gen_half_ellipsoid_structured(size_el, volume, thickness, ellipticity, squareSizeRatio, circle_prop, el_type, debug):
""" Generates a structured ellipsoid mesh to solve 3D problems. The aim is for a quality computational mesh that
has as regular elements as possible, within the constraints of typical dimensions of ellipsoids representing the
volume of the placenta. This code is derived from an openCMISS example written by Chris Bradley, which is used to
simulate fluid structure interactions in a cylinder. Note that this hasn't been tested on linear elements
Inputs:
- size_el: approximate dimension of an element in each axis that we are aiming for
- volume: volume of placental ellipsoid
- thickness: placental thickness (z-dimension)
- ellipticity: ratio of y to x axis dimension
- squareSizeRatio: ratio of square in mesh cross-section to radius
- circle_prop: proportion of ellipse in x-y that is made up by 'plate' of nodes and elements
- debug (True or False) allows you to print certain statements to screen
Returns:
- placental_node_coor: nodes location of mesh
- placental_el_con: element connectivity of mesh (tetrahedral element)
- node_array: array of nodes
- element_array: array of elements
"""
radii = pg_utilities.calculate_ellipse_radii(volume, thickness, ellipticity)
ellipsoidRadius_z = radii['z_radius']
ellipsoidRadius_x = radii['x_radius']
ellipsoidRadius_y = radii['y_radius']
if (debug):
print('Solving a model with x-radius: ' + str(ellipsoidRadius_x) + ' y-radius: ' + str(
ellipsoidRadius_y) + 'z-radius: ' + str(ellipsoidRadius_z))
nel_x = int(np.floor((ellipsoidRadius_x * 2) / size_el)) # number of elems needed in x aixs in mesh
nel_y = int(np.floor((ellipsoidRadius_y * 2) / size_el)) # number of elems needed in in y axis in mesh (need to
# implement having different x,y element numbers
nel_z = int(np.floor((ellipsoidRadius_z * 2) / size_el)) # number of elems needed in in z axis in mesh
# total number of elements in x,y are number in square plus 2* number in arm
# If square takes up half the radius need even numbers in arm and square at one third of total each
# If square takes up squareSizeRatio of the total, then need the square part to be multiplied by that proportion
total_square_arm = 2.0 * nel_x / 3.0
numberOfSquareElements = int(np.floor(squareSizeRatio * total_square_arm))
numberOfArmElements = int(np.floor((1 - squareSizeRatio) * total_square_arm))
# In future for cross-sections that deviate a lot from circular will need different number of elements in square
# and arm in x- and y-
numberOfZElements = nel_z
if (el_type == 1): # linear
numberOfNodesXi = 2
elif (el_type == 2): # quadratic
numberOfNodesXi = 3
numberOfLocalNodes = numberOfNodesXi * numberOfNodesXi * numberOfNodesXi
numberOfLocalInterfaceNodes = numberOfNodesXi * numberOfNodesXi
localNodeIdx000 = 0
localNodeIdx100 = numberOfNodesXi - 1
localNodeIdx010 = numberOfNodesXi * (numberOfNodesXi - 1)
localNodeIdx110 = numberOfNodesXi * numberOfNodesXi - 1
localNodeIdx001 = numberOfNodesXi * numberOfNodesXi * (numberOfNodesXi - 1)
localNodeIdx101 = numberOfNodesXi - 1 + numberOfNodesXi * numberOfNodesXi * (numberOfNodesXi - 1)
localNodeIdx011 = numberOfNodesXi * (numberOfNodesXi - 1) + numberOfNodesXi * numberOfNodesXi * (
numberOfNodesXi - 1)
localNodeIdx111 = numberOfLocalNodes - 1
numberOfNodesPerBlock = numberOfSquareElements * (numberOfNodesXi - 1) * (
numberOfArmElements * (numberOfNodesXi - 1) + 1)
numberOfElementsPerBlock = numberOfSquareElements * numberOfArmElements
numberOfNodesPerLength = 4 * numberOfNodesPerBlock + \
(numberOfSquareElements * (numberOfNodesXi - 1) - 1) * (
numberOfSquareElements * (numberOfNodesXi - 1) - 1)
numberOfElementsPerLength = 4 * numberOfElementsPerBlock + numberOfSquareElements * numberOfSquareElements
numberOfNodes = numberOfNodesPerLength * (numberOfZElements * (numberOfNodesXi - 1) + 1)
numberOfElements = numberOfElementsPerLength * numberOfZElements
if debug:
print(' Mesh Parameters:')
print(' numberOfSquareElements: %d' % (numberOfSquareElements))
print(' numberOfArmElements: %d' % (numberOfArmElements))
print(' numberOfZElements: %d' % (numberOfZElements))
print(' numberOfNodesXi: %d' % (numberOfNodesXi))
print(' numberOfNodesPerBlock: %d' % (numberOfNodesPerBlock))
print(' numberOfElementPerBlock: %d' % (numberOfElementsPerBlock))
print(' numberOfNodesPerLength: %d' % (numberOfNodesPerLength))
print(' numberOfElementsPerLength: %d' % (numberOfElementsPerLength))
print(' numberOfNodes: %d' % (numberOfNodes))
print(' numberOfElements: %d' % (numberOfElements))
print(' numberOfLocalNodes: %d' % (numberOfLocalNodes))
elems = np.zeros((numberOfElements, numberOfLocalNodes+1), dtype='int32')
node_array = np.zeros((numberOfNodes,4))
nodelist = [0]*numberOfNodes
surface_nodes = [0] * numberOfNodes
num_surface_nodes = 0
elementNumber = 0
localNodes = [0] * numberOfLocalNodes
for zElementIdx in range(1, max(numberOfZElements + 1, 2)):
# Handle the arm blocks first
previousBlock = 4
for blockIdx in range(1, 5): # generating arm blocks
# DEFINING NODES AND ELEMENTS WITH CONNECTIVITY
for yElementIdx in range(1, numberOfArmElements + 1):
for xElementIdx in range(1, numberOfSquareElements + 1):
localNodes = [0] * numberOfLocalNodes # Nodes local to this arm block
elementNumber = xElementIdx + (yElementIdx - 1) * numberOfSquareElements + (
blockIdx - 1) * numberOfSquareElements * numberOfArmElements + \
(zElementIdx - 1) * numberOfElementsPerLength
if (xElementIdx == 1):
localNodes[localNodeIdx000] = (
previousBlock - 1) * numberOfNodesPerBlock + numberOfSquareElements * (
numberOfNodesXi - 1) + \
(yElementIdx - 1) * (
numberOfNodesXi - 1) * numberOfSquareElements * (
numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (numberOfNodesXi - 1)
localNodes[localNodeIdx100] = (blockIdx - 1) * numberOfNodesPerBlock + numberOfNodesXi - 1 + \
(yElementIdx - 1) * (
numberOfNodesXi - 1) * numberOfSquareElements * (
numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (numberOfNodesXi - 1)
else:
localNodes[localNodeIdx000] = (blockIdx - 1) * numberOfNodesPerBlock + (xElementIdx - 2) * (
numberOfNodesXi - 1) + (numberOfNodesXi - 2) + 1 + \
(yElementIdx - 1) * (numberOfNodesXi - 1) * (
numberOfSquareElements * (numberOfNodesXi - 1)) + \
(zElementIdx - 1) * numberOfNodesPerLength * (numberOfNodesXi - 1)
localNodes[localNodeIdx100] = localNodes[localNodeIdx000] + numberOfNodesXi - 1
localNodes[localNodeIdx010] = localNodes[localNodeIdx000] + numberOfSquareElements * (
numberOfNodesXi - 1) * (numberOfNodesXi - 1)
localNodes[localNodeIdx110] = localNodes[localNodeIdx100] + numberOfSquareElements * (
numberOfNodesXi - 1) * (numberOfNodesXi - 1)
localNodes[localNodeIdx001] = localNodes[localNodeIdx000] + numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx101] = localNodes[localNodeIdx100] + numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx011] = localNodes[localNodeIdx010] + numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx111] = localNodes[localNodeIdx110] + numberOfNodesPerLength * (
numberOfNodesXi - 1)
if (el_type == 2):
localNodes[1] = localNodes[localNodeIdx100] - 1
localNodes[3] = localNodes[localNodeIdx000] + numberOfSquareElements * (numberOfNodesXi - 1)
localNodes[4] = localNodes[1] + numberOfSquareElements * (numberOfNodesXi - 1)
localNodes[5] = localNodes[4] + 1
localNodes[7] = localNodes[localNodeIdx110] - 1
localNodes[9] = localNodes[0] + numberOfNodesPerLength
localNodes[10] = localNodes[1] + numberOfNodesPerLength
localNodes[11] = localNodes[2] + numberOfNodesPerLength
localNodes[12] = localNodes[3] + numberOfNodesPerLength
localNodes[13] = localNodes[4] + numberOfNodesPerLength
localNodes[14] = localNodes[5] + numberOfNodesPerLength
localNodes[15] = localNodes[6] + numberOfNodesPerLength
localNodes[16] = localNodes[7] + numberOfNodesPerLength
localNodes[17] = localNodes[8] + numberOfNodesPerLength
localNodes[19] = localNodes[10] + numberOfNodesPerLength
localNodes[21] = localNodes[12] + numberOfNodesPerLength
localNodes[22] = localNodes[13] + numberOfNodesPerLength
localNodes[23] = localNodes[14] + numberOfNodesPerLength
localNodes[25] = localNodes[16] + numberOfNodesPerLength
linearNodes = [localNodes[localNodeIdx000], localNodes[localNodeIdx100],
localNodes[localNodeIdx010], localNodes[localNodeIdx110], \
localNodes[localNodeIdx001], localNodes[localNodeIdx101],
localNodes[localNodeIdx011], localNodes[localNodeIdx111]]
if (debug):
print(' Element %8d; Nodes: %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d' % \
(elementNumber, linearNodes[0], linearNodes[1], linearNodes[2], linearNodes[3],
linearNodes[4], linearNodes[5], linearNodes[6], linearNodes[7]))
if (el_type == 2):
print(' Nodes: %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d' % \
(localNodes[0], localNodes[1], localNodes[2], localNodes[3], localNodes[4],
localNodes[5], localNodes[6], localNodes[7], localNodes[8]))
print(' %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d' % \
(localNodes[9], localNodes[10], localNodes[11], localNodes[12], localNodes[13],
localNodes[14], localNodes[15], localNodes[16], localNodes[17]))
print(' %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d' % \
(localNodes[18], localNodes[19], localNodes[20], localNodes[21], localNodes[22],
localNodes[23], localNodes[24], localNodes[25], localNodes[26]))
if (el_type == 1):
elems[elementNumber-1][0] = elementNumber
elems[elementNumber-1][1:numberOfLocalNodes+1] = linearNodes
if (el_type == 2):
elems[elementNumber - 1][0] = elementNumber
elems[elementNumber - 1][1:numberOfLocalNodes + 1] = localNodes
previousBlock = blockIdx
# Handle the square block
if (numberOfSquareElements == 1):
elementNumber = elementNumber + 1
localNodes[localNodeIdx000] = 3 * numberOfNodesPerBlock + \
(zElementIdx - 1) * numberOfNodesPerLength * (numberOfNodesXi - 1)
localNodes[localNodeIdx100] = 4 * numberOfNodesPerBlock + \
(zElementIdx - 1) * numberOfNodesPerLength * (numberOfNodesXi - 1)
localNodes[localNodeIdx010] = 2 * numberOfNodesPerBlock + \
(zElementIdx - 1) * numberOfNodesPerLength * (numberOfNodesXi - 1)
localNodes[localNodeIdx110] = numberOfNodesPerBlock + \
(zElementIdx - 1) * numberOfNodesPerLength * (numberOfNodesXi - 1)
if (el_type == 2):
localNodes[1] = localNodes[localNodeIdx100] - 1
localNodes[3] = localNodes[localNodeIdx000] - 1
localNodes[4] = localNodes[localNodeIdx100] + 1
localNodes[5] = localNodes[localNodeIdx110] - 1
localNodes[7] = localNodes[localNodeIdx010] - 1
localNodes[localNodeIdx001] = localNodes[localNodeIdx000] + numberOfNodesPerLength * (numberOfNodesXi - 1)
localNodes[localNodeIdx101] = localNodes[localNodeIdx100] + numberOfNodesPerLength * (numberOfNodesXi - 1)
localNodes[localNodeIdx011] = localNodes[localNodeIdx010] + numberOfNodesPerLength * (numberOfNodesXi - 1)
localNodes[localNodeIdx111] = localNodes[localNodeIdx110] + numberOfNodesPerLength * (numberOfNodesXi - 1)
linearNodes = [localNodes[localNodeIdx000], localNodes[localNodeIdx100], localNodes[localNodeIdx010],
localNodes[localNodeIdx110], \
localNodes[localNodeIdx001], localNodes[localNodeIdx101], localNodes[localNodeIdx011],
localNodes[localNodeIdx111]]
if (el_type == 2):
localNodes[9] = localNodes[0] + numberOfNodesPerLength
localNodes[10] = localNodes[1] + numberOfNodesPerLength
localNodes[11] = localNodes[2] + numberOfNodesPerLength
localNodes[12] = localNodes[3] + numberOfNodesPerLength
localNodes[13] = localNodes[4] + numberOfNodesPerLength
localNodes[14] = localNodes[5] + numberOfNodesPerLength
localNodes[15] = localNodes[6] + numberOfNodesPerLength
localNodes[16] = localNodes[7] + numberOfNodesPerLength
localNodes[17] = localNodes[8] + numberOfNodesPerLength
localNodes[19] = localNodes[10] + numberOfNodesPerLength
localNodes[21] = localNodes[12] + numberOfNodesPerLength
localNodes[22] = localNodes[13] + numberOfNodesPerLength
localNodes[23] = localNodes[14] + numberOfNodesPerLength
localNodes[25] = localNodes[16] + numberOfNodesPerLength
if (debug):
print(' Element %8d; Nodes: %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d' % \
(elementNumber, linearNodes[0], linearNodes[1], linearNodes[2], linearNodes[3], linearNodes[4],
linearNodes[5], linearNodes[6], linearNodes[7]))
if (el_type == 2):
print(' Nodes: %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d' % \
(localNodes[0], localNodes[1], localNodes[2], localNodes[3], localNodes[4], localNodes[5],
localNodes[6], localNodes[7], localNodes[8]))
print(' %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d' % \
(
localNodes[9], localNodes[10], localNodes[11], localNodes[12], localNodes[13], localNodes[14],
localNodes[15], localNodes[16], localNodes[17]))
print(' %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d' % \
(localNodes[18], localNodes[19], localNodes[20], localNodes[21], localNodes[22],
localNodes[23], localNodes[24], localNodes[25], localNodes[26]))
if (el_type == 1):
elems[elementNumber - 1][0] = elementNumber
elems[elementNumber - 1][1:numberOfLocalNodes + 1] = linearNodes
if (el_type == 2):
elems[elementNumber - 1][0] = elementNumber
elems[elementNumber - 1][1:numberOfLocalNodes + 1] = localNodes
else:
for yElementIdx in range(1, numberOfSquareElements + 1):
for xElementIdx in range(1, numberOfSquareElements + 1):
localNodes = [0] * numberOfLocalNodes
elementNumber = 4 * numberOfElementsPerBlock + xElementIdx + (
yElementIdx - 1) * numberOfSquareElements + \
(zElementIdx - 1) * numberOfElementsPerLength
if (yElementIdx == 1):
if (xElementIdx == 1):
# Bottom-left
localNodes[localNodeIdx000] = 3 * numberOfNodesPerBlock + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx100] = 3 * numberOfNodesPerBlock + numberOfArmElements * (
numberOfNodesXi - 1) * \
numberOfSquareElements * (numberOfNodesXi - 1) + (
numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx010] = 3 * numberOfNodesPerBlock - (numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx110] = 4 * numberOfNodesPerBlock + (
numberOfSquareElements * (numberOfNodesXi - 1) - 1) * \
(numberOfNodesXi - 2) + (numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
if (el_type == 2):
localNodes[1] = localNodes[localNodeIdx100] - 1
localNodes[3] = localNodes[localNodeIdx000] - 1
localNodes[4] = localNodes[localNodeIdx110] - numberOfSquareElements * (
numberOfNodesXi - 1)
localNodes[5] = localNodes[4] + 1
localNodes[7] = localNodes[localNodeIdx110] - 1
elif (xElementIdx == numberOfSquareElements):
# Bottom-right
localNodes[localNodeIdx000] = 4 * numberOfNodesPerBlock - (numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx100] = 4 * numberOfNodesPerBlock + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx010] = 4 * numberOfNodesPerBlock + (
numberOfSquareElements * (numberOfNodesXi - 1) - 1) * \
(numberOfNodesXi - 1) - (numberOfNodesXi - 2) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx110] = numberOfSquareElements * (numberOfNodesXi - 1) * \
numberOfArmElements * (numberOfNodesXi - 1) + (
numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
if (el_type == 2):
localNodes[1] = localNodes[localNodeIdx000] + 1
localNodes[3] = localNodes[localNodeIdx010] - numberOfSquareElements * (
numberOfNodesXi - 1) + 1
localNodes[4] = localNodes[3] + 1
localNodes[5] = localNodes[localNodeIdx110] - 1
localNodes[7] = localNodes[localNodeIdx010] + 1
else:
# Bottom
localNodes[localNodeIdx000] = 3 * numberOfNodesPerBlock + numberOfSquareElements * (
numberOfNodesXi - 1) * \
numberOfArmElements * (numberOfNodesXi - 1) + (
xElementIdx - 1) * (numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx100] = localNodes[localNodeIdx000] + (numberOfNodesXi - 1)
localNodes[localNodeIdx010] = 4 * numberOfNodesPerBlock + (
numberOfSquareElements * (numberOfNodesXi - 1) - 1) * \
(numberOfNodesXi - 2) + (xElementIdx - 1) * (
numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx110] = localNodes[localNodeIdx010] + (numberOfNodesXi - 1)
if (el_type == 2):
localNodes[1] = localNodes[localNodeIdx000] + 1
localNodes[3] = localNodes[localNodeIdx010] - numberOfSquareElements * (
numberOfNodesXi - 1) + 1
localNodes[4] = localNodes[3] + 1
localNodes[5] = localNodes[4] + 1
localNodes[7] = localNodes[localNodeIdx110] - 1
elif (yElementIdx == numberOfSquareElements):
if (xElementIdx == 1):
# Top-left
localNodes[localNodeIdx000] = 2 * numberOfNodesPerBlock + numberOfSquareElements * (
numberOfNodesXi - 1) * \
numberOfArmElements * (numberOfNodesXi - 1) + (
numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx100] = 4 * numberOfNodesPerBlock + (
numberOfSquareElements * (numberOfNodesXi - 1) - 1) * \
((numberOfSquareElements - 1) * (numberOfNodesXi - 1) - 1) + (
numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx010] = 2 * numberOfNodesPerBlock + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx110] = 2 * numberOfNodesPerBlock - (numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
if (el_type == 2):
localNodes[1] = localNodes[localNodeIdx100] - 1
localNodes[3] = localNodes[localNodeIdx000] - 1
localNodes[4] = localNodes[1] + numberOfSquareElements * (numberOfNodesXi - 1) - 1
localNodes[5] = localNodes[4] + 1
localNodes[7] = localNodes[localNodeIdx110] + 1
elif (xElementIdx == numberOfSquareElements):
# Top-right
localNodes[localNodeIdx000] = 4 * numberOfNodesPerBlock + (
numberOfSquareElements * (numberOfNodesXi - 1) - 1) * \
((numberOfSquareElements - 1) * (numberOfNodesXi - 1) - 1) + \
(numberOfSquareElements - 1) * (numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx100] = numberOfSquareElements * (numberOfNodesXi - 1) * \
numberOfArmElements * (numberOfNodesXi - 1) + \
(numberOfSquareElements - 1) * (numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx010] = numberOfNodesPerBlock + numberOfSquareElements * (
numberOfNodesXi - 1) * \
numberOfArmElements * (numberOfNodesXi - 1) + (
numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx110] = numberOfNodesPerBlock + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
if (el_type == 2):
localNodes[1] = localNodes[localNodeIdx000] + 1
localNodes[3] = localNodes[localNodeIdx000] + numberOfSquareElements * (
numberOfNodesXi - 1) - 1
localNodes[4] = localNodes[3] + 1
localNodes[5] = localNodes[localNodeIdx110] - 1
localNodes[7] = localNodes[localNodeIdx010] - 1
else:
# Top
localNodes[localNodeIdx000] = 4 * numberOfNodesPerBlock + (
numberOfSquareElements * (numberOfNodesXi - 1) - 1) * \
((numberOfSquareElements - 1) * (numberOfNodesXi - 1) - 1) + \
(xElementIdx - 1) * (numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx100] = localNodes[localNodeIdx000] + (numberOfNodesXi - 1)
localNodes[localNodeIdx010] = 2 * numberOfNodesPerBlock - (xElementIdx - 1) * (
numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx110] = localNodes[localNodeIdx010] - (numberOfNodesXi - 1)
if (el_type == 2):
localNodes[1] = localNodes[localNodeIdx000] + 1
localNodes[3] = localNodes[localNodeIdx000] + numberOfSquareElements * (
numberOfNodesXi - 1) - 1
localNodes[4] = localNodes[3] + 1
localNodes[5] = localNodes[4] + 1
localNodes[7] = localNodes[localNodeIdx010] - 1
else:
if (xElementIdx == 1):
# Left
localNodes[localNodeIdx000] = 3 * numberOfNodesPerBlock - (yElementIdx - 1) * (
numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx100] = 4 * numberOfNodesPerBlock + (
numberOfSquareElements * (numberOfNodesXi - 1) - 1) * \
((yElementIdx - 1) * (numberOfNodesXi - 1) - 1) + (
numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx010] = localNodes[localNodeIdx000] - (numberOfNodesXi - 1)
localNodes[localNodeIdx110] = localNodes[localNodeIdx100] + (
numberOfSquareElements * (numberOfNodesXi - 1) - 1) * \
(numberOfNodesXi - 1)
if (el_type == 2):
localNodes[1] = localNodes[localNodeIdx100] - 1
localNodes[3] = localNodes[localNodeIdx000] - 1
localNodes[4] = localNodes[localNodeIdx110] - numberOfSquareElements * (
numberOfNodesXi - 1)
localNodes[5] = localNodes[4] + 1
localNodes[7] = localNodes[localNodeIdx110] - 1
elif (xElementIdx == numberOfSquareElements):
# Right
localNodes[localNodeIdx000] = 4 * numberOfNodesPerBlock + (
numberOfSquareElements * (numberOfNodesXi - 1) - 1) * \
((yElementIdx - 1) * (numberOfNodesXi - 1) - 1) + (
numberOfSquareElements - 1) * (
numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx100] = numberOfSquareElements * (
numberOfNodesXi - 1) * numberOfArmElements * (numberOfNodesXi - 1) + \
(yElementIdx - 1) * (numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx010] = localNodes[localNodeIdx000] + (
numberOfSquareElements * (numberOfNodesXi - 1) - 1) * \
(numberOfNodesXi - 1)
localNodes[localNodeIdx110] = localNodes[localNodeIdx100] + (numberOfNodesXi - 1)
if (el_type == 2):
localNodes[1] = localNodes[localNodeIdx000] + 1
localNodes[3] = localNodes[localNodeIdx010] - numberOfSquareElements * (
numberOfNodesXi - 1) + 1
localNodes[4] = localNodes[3] + 1
localNodes[5] = localNodes[localNodeIdx100] + 1
localNodes[7] = localNodes[localNodeIdx010] + 1
else:
# Middle
localNodes[localNodeIdx000] = 4 * numberOfNodesPerBlock + (
numberOfSquareElements * (numberOfNodesXi - 1) - 1) * \
((yElementIdx - 1) * (numberOfNodesXi - 1) - 1) + (
xElementIdx - 1) * (numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx100] = localNodes[localNodeIdx000] + (numberOfNodesXi - 1)
localNodes[localNodeIdx010] = localNodes[localNodeIdx000] + (
numberOfSquareElements * (numberOfNodesXi - 1) - 1) * \
(numberOfNodesXi - 1)
localNodes[localNodeIdx110] = localNodes[localNodeIdx010] + (numberOfNodesXi - 1)
if (el_type == 2):
localNodes[1] = localNodes[localNodeIdx000] + 1
localNodes[3] = localNodes[localNodeIdx000] + numberOfSquareElements * (
numberOfNodesXi - 1) - 1
localNodes[4] = localNodes[3] + 1
localNodes[5] = localNodes[4] + 1
localNodes[7] = localNodes[localNodeIdx010] + 1
localNodes[localNodeIdx001] = localNodes[localNodeIdx000] + numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx101] = localNodes[localNodeIdx100] + numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx011] = localNodes[localNodeIdx010] + numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx111] = localNodes[localNodeIdx110] + numberOfNodesPerLength * (
numberOfNodesXi - 1)
linearNodes = [localNodes[localNodeIdx000], localNodes[localNodeIdx100],
localNodes[localNodeIdx010], localNodes[localNodeIdx110], \
localNodes[localNodeIdx001], localNodes[localNodeIdx101],
localNodes[localNodeIdx011], localNodes[localNodeIdx111]]
if (el_type == 2):
localNodes[9] = localNodes[0] + numberOfNodesPerLength
localNodes[10] = localNodes[1] + numberOfNodesPerLength
localNodes[11] = localNodes[2] + numberOfNodesPerLength
localNodes[12] = localNodes[3] + numberOfNodesPerLength
localNodes[13] = localNodes[4] + numberOfNodesPerLength
localNodes[14] = localNodes[5] + numberOfNodesPerLength
localNodes[15] = localNodes[6] + numberOfNodesPerLength
localNodes[16] = localNodes[7] + numberOfNodesPerLength
localNodes[17] = localNodes[8] + numberOfNodesPerLength
localNodes[19] = localNodes[10] + numberOfNodesPerLength
localNodes[21] = localNodes[12] + numberOfNodesPerLength
localNodes[22] = localNodes[13] + numberOfNodesPerLength
localNodes[23] = localNodes[14] + numberOfNodesPerLength
localNodes[25] = localNodes[16] + numberOfNodesPerLength
if (debug):
print(' Element %8d; Nodes: %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d' % \
(elementNumber, linearNodes[0], linearNodes[1], linearNodes[2], linearNodes[3],
linearNodes[4], linearNodes[5], linearNodes[6], linearNodes[7]))
if (el_type == 2):
print(' Nodes: %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d' % \
(localNodes[0], localNodes[1], localNodes[2], localNodes[3], localNodes[4],
localNodes[5], localNodes[6], localNodes[7], localNodes[8]))
print(' %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d' % \
(localNodes[9], localNodes[10], localNodes[11], localNodes[12], localNodes[13],
localNodes[14], localNodes[15], localNodes[16], localNodes[17]))
print(' %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d' % \
(localNodes[18], localNodes[19], localNodes[20], localNodes[21], localNodes[22],
localNodes[23], localNodes[24], localNodes[25], localNodes[26]))
if (el_type == 1):
elems[elementNumber-1][0] = elementNumber
elems[elementNumber-1][1:numberOfLocalNodes+1] = linearNodes
if (el_type == 2):
elems[elementNumber - 1][0] = elementNumber
elems[elementNumber - 1][1:numberOfLocalNodes + 1] = localNodes
if (debug):
print(' Nodes:')
k = 0.0
for zNodeIdx in range(1, numberOfZElements * (numberOfNodesXi - 1) + 2):
k = abs(((zNodeIdx-1)/(float(numberOfZElements * (numberOfNodesXi - 1)))))
prop = 1 - (1 - circle_prop) * k
sign = np.sign(k)
#This is the z height associated with this prop
zPosition = sign * ellipsoidRadius_z * np.sqrt(1 - prop ** 2)
#This is the radius of the ellipse that lies in the ellipsoid at this z-height
new_x_radius = ellipsoidRadius_x * np.sqrt(ellipsoidRadius_z ** 2.0 - zPosition ** 2.0) \
/ (ellipsoidRadius_z)
new_y_radius = ellipsoidRadius_y * np.sqrt(ellipsoidRadius_z ** 2.0 - zPosition ** 2.0) \
/ (ellipsoidRadius_z)
angle_theta = np.arctan(new_y_radius / new_x_radius)
squareSize_x = squareSizeRatio * new_x_radius * np.cos(angle_theta)
squareSize_y = squareSizeRatio * new_y_radius * np.sin(angle_theta)
#squareSize_x = squareSizeRatio * ellipsoidRadius_x * np.cos(angle_theta)
#squareSize_y = squareSizeRatio * ellipsoidRadius_y * np.sin(angle_theta)
# Handle the arm blocks first
previousBlock = 4
for blockIdx in range(1, 5):
# print('Block which ' + str(blockIdx) + ' ' + str(zNodeIdx))
for yNodeIdx in range(1, numberOfArmElements * (numberOfNodesXi - 1) + 2):
for xNodeIdx in range(1, numberOfSquareElements * (numberOfNodesXi - 1) + 1):
nodeNumber = (blockIdx - 1) * numberOfNodesPerBlock + xNodeIdx + (
yNodeIdx - 1) * numberOfSquareElements * (numberOfNodesXi - 1) + \
(zNodeIdx - 1) * numberOfNodesPerLength
#nodeDomain = decomposition.NodeDomainGet(nodeNumber, 1)
if(nodeNumber > numberOfNodes + 1):
print(nodeNumber)
else:
if (yNodeIdx == numberOfArmElements * (numberOfNodesXi - 1) + 1):
# On the square
# print('On the square', xNodeIdx,yNodeIdx)
if (blockIdx == 1):
xPosition = squareSize_x - 2.0 * xNodeIdx * squareSize_x / (
numberOfSquareElements * (numberOfNodesXi - 1))
yPosition = squareSize_y
elif (blockIdx == 2):
xPosition = -squareSize_x
yPosition = squareSize_y - 2.0 * xNodeIdx * squareSize_y / (
numberOfSquareElements * (numberOfNodesXi - 1))
elif (blockIdx == 3):
xPosition = -squareSize_x + 2.0 * xNodeIdx * squareSize_x / (
numberOfSquareElements * (numberOfNodesXi - 1))
yPosition = -squareSize_y
elif (blockIdx == 4):
xPosition = squareSize_x
yPosition = -squareSize_y + 2.0 * xNodeIdx * squareSize_y / (
numberOfSquareElements * (numberOfNodesXi - 1))
else:
# In the arm
# Work out the r, theta position of each point equally spread on the block
if (blockIdx == 1):
start_theta = np.arctan(new_y_radius / new_x_radius)
end_theta = np.pi - start_theta
theta = start_theta + xNodeIdx * (end_theta - start_theta) / (numberOfSquareElements * (
numberOfNodesXi - 1))
# theta is the angle from the centre of the mesh to the surface of the ellipsoid
sq_x = squareSize_x - 2.0 * xNodeIdx * squareSize_x / (
numberOfSquareElements * (numberOfNodesXi - 1))
sq_y = squareSize_y
elif (blockIdx == 2):
start_theta = np.pi - np.arctan(new_y_radius / new_x_radius)
end_theta = np.pi + np.arctan(new_y_radius / new_x_radius)
theta = start_theta + xNodeIdx * (end_theta - start_theta) / (numberOfSquareElements * (
numberOfNodesXi - 1))
sq_x = -squareSize_x
sq_y = squareSize_y - 2.0 * xNodeIdx * squareSize_y / (
numberOfSquareElements * (numberOfNodesXi - 1))
elif (blockIdx == 3):
start_theta = np.pi + np.arctan(new_y_radius / new_x_radius)
end_theta = 2.0 * np.pi - np.arctan(new_y_radius / new_x_radius)
theta = start_theta + xNodeIdx * (end_theta - start_theta) / (numberOfSquareElements * (
numberOfNodesXi - 1))
sq_x = -squareSize_x + 2.0 * xNodeIdx * squareSize_x / (
numberOfSquareElements * (numberOfNodesXi - 1))
sq_y = -squareSize_y
elif (blockIdx == 4):
start_theta = 2.0 * np.pi - np.arctan(new_y_radius / new_x_radius)
end_theta = 2.0 * np.pi + np.arctan(new_y_radius / new_x_radius)
theta = start_theta + xNodeIdx * (end_theta - start_theta) / (numberOfSquareElements * (
numberOfNodesXi - 1))
sq_x = squareSize_x
sq_y = -squareSize_y + 2.0 * xNodeIdx * squareSize_y / (
numberOfSquareElements * (numberOfNodesXi - 1))
armRadius = new_y_radius * new_x_radius / np.sqrt(
new_x_radius ** 2 * np.sin(theta) ** 2 + new_y_radius ** 2 * np.cos(theta) ** 2)
arm_x = armRadius * np.cos(theta)
arm_y = armRadius * np.sin(theta)
arm_no = (yNodeIdx - 1) / (numberOfArmElements * (numberOfNodesXi - 1) + 1.0)
xPosition = arm_x - arm_no * (arm_x - sq_x)
yPosition = arm_y - arm_no * (arm_y - sq_y)
if (zNodeIdx == numberOfZElements * (numberOfNodesXi - 1) + 1): #project to top and bottom
# surface
zPosition = ellipsoidRadius_z * np.sqrt(
1 - xPosition ** 2 / ellipsoidRadius_x ** 2 - yPosition ** 2 / ellipsoidRadius_y ** 2)
surface_nodes[num_surface_nodes] = nodeNumber
num_surface_nodes = num_surface_nodes + 1
elif(yNodeIdx == 1): #outer ring
surface_nodes[num_surface_nodes] = nodeNumber
num_surface_nodes = num_surface_nodes + 1
nodelist[nodeNumber-1] = nodeNumber
node_array[nodeNumber-1][:]= [nodeNumber,xPosition,yPosition,zPosition]
if (debug):
print(' Node %d:' % (nodeNumber))
print(' Position = [ %.2f, %.2f, %.2f ]' % (
xPosition, yPosition, zPosition))
# Now handle square
for yNodeIdx in range(2, numberOfSquareElements * (numberOfNodesXi - 1) + 1):
for xNodeIdx in range(2, numberOfSquareElements * (numberOfNodesXi - 1) + 1):
nodeNumber = 4 * numberOfNodesPerBlock + (xNodeIdx - 1) + (yNodeIdx - 2) * (
numberOfSquareElements * (numberOfNodesXi - 1) - 1) + \
(zNodeIdx - 1) * numberOfNodesPerLength
if (nodeNumber > numberOfNodes + 1):
print(nodeNumber)
else:
xPosition = squareSize_x - squareSize_x * (yNodeIdx - 1) / (
numberOfSquareElements * (numberOfNodesXi - 1)) * 2.0
yPosition = -squareSize_y + squareSize_y * (xNodeIdx - 1) / (
numberOfSquareElements * (numberOfNodesXi - 1)) * 2.0
if (zNodeIdx == numberOfZElements * (numberOfNodesXi - 1) + 1):
zPosition = ellipsoidRadius_z * np.sqrt(
1 - xPosition ** 2 / ellipsoidRadius_x ** 2 - yPosition ** 2 / ellipsoidRadius_y ** 2)
num_surface_nodes = num_surface_nodes + 1
surface_nodes[num_surface_nodes] = nodeNumber
nodelist[nodeNumber - 1] = nodeNumber
node_array[nodeNumber - 1][:] = [nodeNumber, xPosition, yPosition, zPosition]
if (debug):
print(' Node %d:' % (nodeNumber))
print(' Position = [ %.2f, %.2f, %.2f ]' % (xPosition, yPosition, zPosition))
#As we project the top and bottom rows to the surface of the ellipsoid, uneven nodal distribution can impact on
# mesh quality so we resistribute the nodes immediately underneath the surface to improve quality metrics. In half ellipsoid only the top ring is projected
second_rows = [numberOfZElements * (numberOfNodesXi - 1)]
for zNodeIdx in second_rows:
# Handle the arm blocks first
previousBlock = 4
for blockIdx in range(1, 5):
# print('Block which ' + str(blockIdx) + ' ' + str(zNodeIdx))
for yNodeIdx in range(1, numberOfArmElements * (numberOfNodesXi - 1) + 2):
for xNodeIdx in range(1, numberOfSquareElements * (numberOfNodesXi - 1) + 1):
nodeNumber = (blockIdx - 1) * numberOfNodesPerBlock + xNodeIdx + (
yNodeIdx - 1) * numberOfSquareElements * (numberOfNodesXi - 1) + \
(zNodeIdx - 1) * numberOfNodesPerLength
nodeNumber_above = (blockIdx - 1) * numberOfNodesPerBlock + xNodeIdx + (
yNodeIdx - 1) * numberOfSquareElements * (numberOfNodesXi - 1) + \
(zNodeIdx -1 - 1) * numberOfNodesPerLength
nodeNumber_below = (blockIdx - 1) * numberOfNodesPerBlock + xNodeIdx + (
yNodeIdx - 1) * numberOfSquareElements * (numberOfNodesXi - 1) + \
(zNodeIdx + 1 - 1) * numberOfNodesPerLength
# nodeDomain = decomposition.NodeDomainGet(nodeNumber, 1)
if (nodeNumber > numberOfNodes + 1):
print(nodeNumber)
else:
zPosition_above = node_array[nodeNumber_above - 1][3]
zPosition_below = node_array[nodeNumber_below - 1][3]
zPosition = (zPosition_above + zPosition_below)/2.0
node_array[nodeNumber - 1][3] = zPosition
# Now handle square
for yNodeIdx in range(2, numberOfSquareElements * (numberOfNodesXi - 1) + 1):
for xNodeIdx in range(2, numberOfSquareElements * (numberOfNodesXi - 1) + 1):
nodeNumber = 4 * numberOfNodesPerBlock + (xNodeIdx - 1) + (yNodeIdx - 2) * (
numberOfSquareElements * (numberOfNodesXi - 1) - 1) + \
(zNodeIdx - 1) * numberOfNodesPerLength
if (nodeNumber > numberOfNodes + 1):
print(nodeNumber)
else:
zPosition_above = node_array[nodeNumber_above - 1][3]
zPosition_below = node_array[nodeNumber_below - 1][3]
zPosition = (zPosition_above + zPosition_below) / 2.0
node_array[nodeNumber - 1][3] = zPosition
surface_nodes = np.unique(surface_nodes)
nzid = np.nonzero(surface_nodes)
surface_nodes = surface_nodes[nzid]
return{'nodes':node_array,'elems':elems,'surface_nodes':surface_nodes, 'node_list':nodelist}
|
import os
import psycopg2
from sys import exit
import cv2
import numpy as np
db_string = 'postgres://postgres:postgres2020!Incyt@172.17.250.12:5432/hashFiles'
#db_string = 'postgres://postgres:Guatemala1@localhost:5432/hashfiles'
sourceImages = '/home/incyt/servicio/uploads'
destinationVideo = '/home/incyt/servicio/uploads/videos_volcanes'
#temporalFolder ='/videosVolcanes/tmp'
#pathVideos = '/videosVolcanes/'
urlVideos = 'https://incyt.url.edu.gt/incyt/api/HashFiles/uploads/videos_volcanes/'
#docker run -it -v /videosVolcanes/tmp:/tmp/ -v /home/incyt/servicio/uploads:/uploads -v /home/incyt/servicio/uploads/videos:/videos -m 2g --cpus=1 --cpu-shares=50 linuxffmpeg
#disk utils
fps = 20
size = (640,480)
def dateYesterday():
from datetime import date
from datetime import timedelta
today = date.today()
print("Today is: ", today)
# Yesterday date
yesterday = today - timedelta(days = 1)
print("yesterday was: ", yesterday)
return yesterday
def runYesterdayVideo():
y = str(dateYesterday())
print(y)
for x in range(0,24):
#print(x)
y0 = y + ' ' + str(x).zfill(2) + ':00:00'
y1 = y + ' ' + str(x).zfill(2) + ':59:00'
q = "select filename from filecatalog where fecha between '"+ y0 +"' and '" + y1 + "' order by fecha asc "
frame_array = []
files = []
print(q)
conn = psycopg2.connect(db_string)
cursor = conn.cursor()
cursor.execute(q)
rows = cursor.fetchall()
for row in rows:
filename = row[0] + '.jpg'
files.append(sourceImages + '/' + filename)
conn.close()
print("number of images to process:" , len(files))
if (len(files) > 200):
archName = str(dateYesterday()).replace('-','') +'_' + str(x).zfill(2) + '.avi'
arch = destinationVideo + '/' + archName
print(arch)
for i in range(len(files)):
img = cv2.imread(files[i])
frame_array.append(img)
out = cv2.VideoWriter(arch,cv2.VideoWriter_fourcc(*'DIVX'), fps, size)
for i in range(len(frame_array)):
out.write(frame_array[i])
out.release()
query = "insert into videos_volcanes (fecha,video,numFotos) values ('" + y0 +"', '" + urlVideos + archName + "','" + str(len(files)) +"')"
updatetData(query)
def updatetData(query):
print(query)
conn = psycopg2.connect(db_string)
cursor = conn.cursor()
try:
cursor.execute(query)
except:
print('*************************** could not insert*********************************')
print(query)
conn.commit()
conn.close()
#https://medium.com/@iKhushPatel/convert-video-to-images-images-to-video-using-opencv-python-db27a128a481
#void main()
if __name__ == '__main__':
print("starting")
runYesterdayVideo()
'''
create table videos_volcanes(
fecha date not null default CURRENT_TIMESTAMP,
video text not null unique,
numFotos numeric null
)
'''
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Mobile classification search space built around MobileNet V3.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from typing import Any, List, NamedTuple, Optional, Sequence, Text, Tuple, Union
from tunas import basic_specs
from tunas import mobile_model_archive
from tunas import schema
from tunas import schema_io
from tunas import search_space_utils
# Reference models we compare against from the published literature.
MOBILENET_V2 = 'mobilenet_v2'
MNASNET_B1 = 'mnasnet_b1'
PROXYLESSNAS_MOBILE = 'proxylessnas_mobile'
MOBILENET_V3_LARGE = 'mobilenet_v3_large'
MOBILENET_MULTI_AVG = 'mobilenet_multi_avg'
MOBILENET_MULTI_MAX = 'mobilenet_multi_max'
# Key search spaces reported in our paper.
PROXYLESSNAS_SEARCH = 'proxylessnas_search'
PROXYLESSNAS_ENLARGED_SEARCH = 'proxylessnas_enlarged_search'
MOBILENET_V3_LIKE_SEARCH = 'mobilenet_v3_like_search'
ALL_SSDS = (
MOBILENET_V2,
MNASNET_B1,
PROXYLESSNAS_MOBILE,
MOBILENET_V3_LARGE,
PROXYLESSNAS_SEARCH,
PROXYLESSNAS_ENLARGED_SEARCH,
MOBILENET_V3_LIKE_SEARCH,
# Multi-hardware models from paper
# "Discovering Multi-Hardware Mobile Models via Architecture Search".
MOBILENET_MULTI_AVG,
MOBILENET_MULTI_MAX,
)
MOBILENET_V3_LIKE_SSDS = (
MOBILENET_V3_LARGE,
MOBILENET_V3_LIKE_SEARCH,
MOBILENET_MULTI_AVG,
MOBILENET_MULTI_MAX,
)
_IntOrIntPair = Union[int, Tuple[int, int]]
TunableKernelSize = Union[
int,
Tuple[int, int],
schema.OneOf[int],
schema.OneOf[Tuple[int, int]]]
@schema_io.register_namedtuple('mobile_search_space_v3.ActivationSpec')
class ActivationSpec(NamedTuple('ActivationSpec', [('name', Text)])):
"""Neural network activation function.
Attributes:
name: Name of the activation function to apply, like 'relu' or 'swish6'.
"""
pass
# List of supported activation functions.
RELU = ActivationSpec('relu')
RELU6 = ActivationSpec('relu6')
SWISH6 = ActivationSpec('swish6')
SIGMOID = ActivationSpec('sigmoid')
@schema_io.register_namedtuple('mobile_search_space_v3.ConvSpec')
class ConvSpec(
NamedTuple('ConvSpec',
[('kernel_size', TunableKernelSize),
('strides', _IntOrIntPair),
('use_batch_norm', bool)])):
"""2D convolution followed by an optional batch norm.
Attributes:
kernel_size: Kernel size.
strides: Output strides.
use_batch_norm: If true, we'll insert a batch norm op after the convolution.
"""
def __new__(cls,
kernel_size,
strides,
use_batch_norm = True):
return super(ConvSpec, cls).__new__(
cls, kernel_size, strides, use_batch_norm)
@schema_io.register_namedtuple('mobile_search_space_v3.SeparableConvSpec')
class SeparableConvSpec(
NamedTuple('SeparableConvSpec',
[('kernel_size', TunableKernelSize),
('strides', _IntOrIntPair),
('activation', ActivationSpec)])):
"""2D depthwise separable convolution followed by a batch norm.
Attributes:
kernel_size: Kernel size for the depthwise convolution.
strides: Strides to use for the depthwise convolution.
activation: Activation function to apply between the depthwise and pointwise
convolutions.
"""
def __new__(cls,
kernel_size,
strides,
activation = RELU):
return super(SeparableConvSpec, cls).__new__(
cls, kernel_size, strides, activation)
@schema_io.register_namedtuple('mobile_search_space_v3.DepthwiseBottleneckSpec')
class DepthwiseBottleneckSpec(
NamedTuple('DepthwiseBottleneckSpec',
[('kernel_size', TunableKernelSize),
('expansion_filters',
Union[schema.OneOf[int],
schema.OneOf[basic_specs.FilterMultiplier],
basic_specs.FilterMultiplier]),
('use_squeeze_and_excite', Union[bool, schema.OneOf[bool]]),
('strides', _IntOrIntPair),
('activation', ActivationSpec),
('se_inner_activation', ActivationSpec),
('se_gating_activation', ActivationSpec)])):
"""Inverted bottleneck: a depthwise convolution between two pointwise convs.
Attributes:
kernel_size: Kernel size to use for the depthwise convolution.
expansion_filters: Number of filters to use in the intermediate layers of
the network.
use_squeeze_and_excite: Set to true to add a squeeze-and-excite layer
immediately after the depthwise convolution.
strides: Strides to use for the depthwise convolution.
activation: Activation function to use between the depthwise and pointwise
convolutions.
se_inner_activation: Activation function to apply between the inner layers
of the squeeze-and-excite function. Used only when
use_squeeze_and_excite is true.
se_gating_activation: Activation function to apply to the output of the
squeeze-and-excite feed-forward network. Used only when
use_squeeze_and_excite is true.
"""
def __new__(
cls,
kernel_size,
expansion_filters,
use_squeeze_and_excite,
strides,
activation = RELU,
se_inner_activation = RELU,
se_gating_activation = SIGMOID):
return super(DepthwiseBottleneckSpec, cls).__new__(
cls, kernel_size, expansion_filters, use_squeeze_and_excite, strides,
activation, se_inner_activation, se_gating_activation)
# NOTE: There's a bug in gpylint that triggers an error if we try to
# use typing.NamedTuple with an empty argument list. We work around the problem
# by using collections.namedtuple instead.
@schema_io.register_namedtuple('mobile_search_space_v3.GlobalAveragePoolSpec')
class GlobalAveragePoolSpec(
collections.namedtuple('GlobalAveragePoolSpec', [])):
"""Global average pooling layer."""
pass
@schema_io.register_namedtuple('mobile_search_space_v3.ResidualSpec')
class ResidualSpec(NamedTuple('ResidualSpec', [('layer', Any)])):
"""Residual connection.
Attributes:
layer: The layer to apply the residual connection to. The input and output
shapes must match.
"""
pass
@schema_io.register_namedtuple('mobile_search_space_v3.DetectionEndpointSpec')
class DetectionEndpointSpec(
collections.namedtuple('DetectionEndpointSpec', [])):
"""Mark the position of an endpoint for object detection."""
pass
def _merge_strides(
lhs,
rhs
):
"""Merge two sets of strides.
Args:
lhs: A tuple (x, y) where each element is either an integer or None.
rhs: A tuple (x, y) where each element is either an integer or None.
Returns:
A merged tuple (x, y). For example:
* merge((1, 1), (None, None)) = (1, 1)
* merge((None, None), (None, None)) = None
* merge((1, 1), (2, 2)) triggers an error, since the two strides
are incompatible.
"""
if lhs[0] is not None and rhs[0] is not None and lhs[0] != rhs[0]:
raise ValueError('Stride mismatch: {} vs {}'.format(lhs, rhs))
if lhs[1] is not None and rhs[1] is not None and lhs[1] != rhs[1]:
raise ValueError('Stride mismatch: {} vs {}'.format(lhs, rhs))
x = lhs[0] if lhs[0] is not None else rhs[0]
y = lhs[1] if lhs[1] is not None else rhs[1]
return (x, y)
def get_strides(layer_spec):
"""Compute the output strides for a given layer."""
strides = (None, None)
if isinstance(layer_spec, ConvSpec):
strides = search_space_utils.normalize_strides(layer_spec.strides)
elif isinstance(layer_spec, SeparableConvSpec):
strides = search_space_utils.normalize_strides(layer_spec.strides)
elif isinstance(layer_spec, DepthwiseBottleneckSpec):
strides = search_space_utils.normalize_strides(layer_spec.strides)
elif isinstance(layer_spec, basic_specs.ZeroSpec):
strides = (1, 1)
elif isinstance(layer_spec, GlobalAveragePoolSpec):
# Cannot be determined statically.
strides = (None, None)
elif isinstance(layer_spec, ActivationSpec):
strides = (1, 1)
elif isinstance(layer_spec, ResidualSpec):
strides = get_strides(layer_spec.layer)
if strides != (1, 1):
raise ValueError('Residual layer must have stride 1: {}'.format(
layer_spec))
elif isinstance(layer_spec, schema.OneOf):
for choice in layer_spec.choices:
strides = _merge_strides(strides, get_strides(choice))
else:
raise ValueError('Unsupported layer_spec type: {}'.format(
type(layer_spec)))
return strides
def choose_filters(choices):
"""Choose one of the filters from the given choices."""
return schema.OneOf(choices, basic_specs.FILTERS_TAG)
def _proxylessnas_search_base(base_filters,
collapse_shared_ops = False):
"""Reproduction of ProxylessNAS search space with custom output filters."""
block = basic_specs.block
residual = ResidualSpec
global_avg_pool = GlobalAveragePoolSpec
def conv(kernel, s, bn=True):
return ConvSpec(
kernel_size=kernel,
strides=s,
use_batch_norm=bn)
def sepconv(s):
choices = []
for kernel_size in (3, 5, 7):
choices.append(
SeparableConvSpec(
kernel_size=kernel_size,
strides=s,
activation=RELU))
return schema.OneOf(choices, basic_specs.OP_TAG)
def bneck(s, skippable):
"""Construct a spec for an inverted bottleneck layer."""
possible_filter_multipliers = [3.0, 6.0]
possible_kernel_sizes = [3, 5, 7]
choices = []
if collapse_shared_ops:
kernel_size = schema.OneOf(possible_kernel_sizes, basic_specs.OP_TAG)
expansion_filters = schema.OneOf(
[basic_specs.FilterMultiplier(multiplier)
for multiplier in possible_filter_multipliers],
basic_specs.FILTERS_TAG)
choices.append(
DepthwiseBottleneckSpec(
kernel_size=kernel_size,
expansion_filters=expansion_filters,
use_squeeze_and_excite=False,
strides=s,
activation=RELU))
else:
for multiplier in possible_filter_multipliers:
for kernel_size in possible_kernel_sizes:
choices.append(
DepthwiseBottleneckSpec(
kernel_size=kernel_size,
expansion_filters=basic_specs.FilterMultiplier(multiplier),
use_squeeze_and_excite=False,
strides=s,
activation=RELU))
if skippable:
choices.append(basic_specs.ZeroSpec())
return schema.OneOf(choices, basic_specs.OP_TAG)
blocks = [
# Stem
block([
conv(kernel=3, s=2),
RELU,
], filters=base_filters[0]),
block([
# NOTE: The original MobileNet V2 paper used an inverted bottleneck
# layer with an expansion factor of 1 here. Under the definition used
# by the paper, an inverted bottleneck layer with an expansion factor
# of 1 was equivalent to a depthwise separable convolution, which is
# what we use. (Our definition of an inverted bottleneck layer with
# an expansion factor of 1 is slightly different from the one used in
# the MobileNet paper.)
sepconv(s=1),
DetectionEndpointSpec(),
], filters=base_filters[1]),
# Body
block([
bneck(s=2, skippable=False),
residual(bneck(s=1, skippable=True)),
residual(bneck(s=1, skippable=True)),
residual(bneck(s=1, skippable=True)),
DetectionEndpointSpec(),
], filters=base_filters[2]),
block([
bneck(s=2, skippable=False),
residual(bneck(s=1, skippable=True)),
residual(bneck(s=1, skippable=True)),
residual(bneck(s=1, skippable=True)),
DetectionEndpointSpec(),
], filters=base_filters[3]),
block([
bneck(s=2, skippable=False),
residual(bneck(s=1, skippable=True)),
residual(bneck(s=1, skippable=True)),
residual(bneck(s=1, skippable=True)),
], filters=base_filters[4]),
block([
bneck(s=1, skippable=False),
residual(bneck(s=1, skippable=True)),
residual(bneck(s=1, skippable=True)),
residual(bneck(s=1, skippable=True)),
DetectionEndpointSpec(),
], filters=base_filters[5]),
block([
bneck(s=2, skippable=False),
residual(bneck(s=1, skippable=True)),
residual(bneck(s=1, skippable=True)),
residual(bneck(s=1, skippable=True)),
DetectionEndpointSpec(),
], filters=base_filters[6]),
block([
bneck(s=1, skippable=False),
DetectionEndpointSpec(),
], filters=base_filters[7]),
# Head
block([
conv(kernel=1, s=1),
RELU,
global_avg_pool(),
], filters=base_filters[8]),
]
return basic_specs.ConvTowerSpec(blocks=blocks, filters_base=8)
def proxylessnas_search():
return _proxylessnas_search_base(
mobile_model_archive.PROXYLESSNAS_MOBILE_FILTERS,
collapse_shared_ops=True)
def proxylessnas_enlarged_search():
base_filters = (16, 16, 16, 32, 64, 128, 256, 512, 1024)
multipliers = (0.5, 0.625, 0.75, 1.0, 1.25, 1.5, 2.0)
model_spec = _proxylessnas_search_base(base_filters, collapse_shared_ops=True)
return search_space_utils.scale_conv_tower_spec(model_spec, multipliers)
def mobilenet_v2():
"""Specification for MobileNet V2 w/ relative expansion filters."""
model_spec = _proxylessnas_search_base(
mobile_model_archive.MOBILENET_V2_FILTERS)
model_spec = search_space_utils.prune_model_spec(
model_spec,
{basic_specs.OP_TAG: mobile_model_archive.MOBILENET_V2_OPERATIONS})
return model_spec
def mnasnet_b1():
model_spec = _proxylessnas_search_base(
mobile_model_archive.MNASNET_FILTERS)
model_spec = search_space_utils.prune_model_spec(
model_spec,
{basic_specs.OP_TAG: mobile_model_archive.MNASNET_OPERATIONS})
return model_spec
def proxylessnas_mobile():
model_spec = _proxylessnas_search_base(
mobile_model_archive.PROXYLESSNAS_MOBILE_FILTERS)
model_spec = search_space_utils.prune_model_spec(
model_spec,
{basic_specs.OP_TAG: mobile_model_archive.PROXYLESSNAS_MOBILE_OPERATIONS})
return model_spec
def _mobilenet_v3_large_base(
use_relative_filter_sizes):
"""Specification for MobileNet V3 - Large model."""
block = basic_specs.block
residual = ResidualSpec
global_avg_pool = GlobalAveragePoolSpec
def conv(kernel, s, bn=True):
return ConvSpec(
kernel_size=kernel,
strides=s,
use_batch_norm=bn)
def sepconv(kernel, s, act):
return SeparableConvSpec(
kernel_size=kernel,
strides=s,
activation=act)
def bneck(kernel, input_size, exp_size, se, s, act):
if use_relative_filter_sizes:
# The expanded filter size will be computed relative to the input filter
# size. Separate logic in the model builder code ensures that the expanded
# filter size will be an integer multiple of `model_spec.filters_base`.
filters = basic_specs.FilterMultiplier(exp_size / input_size)
else:
filters = exp_size
return DepthwiseBottleneckSpec(
kernel_size=kernel,
expansion_filters=choose_filters([filters]),
use_squeeze_and_excite=se,
strides=s,
activation=act)
blocks = [
# Stem
block([
conv(kernel=3, s=2),
SWISH6,
residual(sepconv(kernel=3, s=1, act=RELU)),
DetectionEndpointSpec(),
], filters=16),
# Body
block([
bneck(kernel=3, input_size=16, exp_size=64, se=False, s=2, act=RELU),
residual(bneck(kernel=3, input_size=24, exp_size=72, se=False, s=1,
act=RELU)),
DetectionEndpointSpec(),
], filters=24),
block([
bneck(kernel=5, input_size=24, exp_size=72, se=True, s=2, act=RELU),
residual(bneck(kernel=5, input_size=40, exp_size=120, se=True, s=1,
act=RELU)),
residual(bneck(kernel=5, input_size=40, exp_size=120, se=True, s=1,
act=RELU)),
DetectionEndpointSpec(),
], 40),
block([
bneck(kernel=3, input_size=40, exp_size=240, se=False, s=2,
act=SWISH6),
residual(bneck(kernel=3, input_size=80, exp_size=200, se=False, s=1,
act=SWISH6)),
residual(bneck(kernel=3, input_size=80, exp_size=184, se=False, s=1,
act=SWISH6)),
residual(bneck(kernel=3, input_size=80, exp_size=184, se=False, s=1,
act=SWISH6)),
], 80),
block([
bneck(kernel=3, input_size=80, exp_size=480, se=True, s=1,
act=SWISH6),
residual(bneck(kernel=3, input_size=112, exp_size=672, se=True, s=1,
act=SWISH6)),
DetectionEndpointSpec(),
], 112),
block([
bneck(kernel=5, input_size=112, exp_size=672, se=True, s=2,
act=SWISH6),
residual(bneck(kernel=5, input_size=160, exp_size=960, se=True, s=1,
act=SWISH6)),
residual(bneck(kernel=5, input_size=160, exp_size=960, se=True, s=1,
act=SWISH6)),
DetectionEndpointSpec(),
], 160),
# Head
block([
conv(kernel=1, s=1),
SWISH6,
global_avg_pool(),
], 960),
block([
conv(kernel=1, s=1, bn=False),
SWISH6,
], 1280),
]
return basic_specs.ConvTowerSpec(blocks=blocks, filters_base=8)
def mobilenet_v3_large():
"""Returns a reproduction of the MobileNetV3-Large model."""
return _mobilenet_v3_large_base(use_relative_filter_sizes=False)
def mobilenet_multi_avg():
"""Specification for MobileNet Multi-AVG model.
From the paper:
"Discovering Multi-Hardware Mobile Models via Architecture Search"
Returns:
A ConvTowerSpec namedtuple for the Mobilenet Multi-AVG model.
"""
block = basic_specs.block
residual = ResidualSpec
global_avg_pool = GlobalAveragePoolSpec
def conv(kernel, s, bn=True):
return ConvSpec(
kernel_size=kernel,
strides=s,
use_batch_norm=bn)
def bneck(kernel, exp_size, s):
return DepthwiseBottleneckSpec(
kernel_size=kernel,
expansion_filters=choose_filters([exp_size]),
use_squeeze_and_excite=False,
strides=s,
activation=RELU)
blocks = [
# Stem
block([
conv(kernel=3, s=2),
RELU,
DetectionEndpointSpec(),
], filters=32),
# Body
block([
bneck(kernel=3, exp_size=96, s=2),
residual(bneck(kernel=3, exp_size=64, s=1)),
DetectionEndpointSpec(),
], filters=32),
block([
bneck(kernel=5, exp_size=160, s=2),
residual(bneck(kernel=3, exp_size=192, s=1)),
residual(bneck(kernel=3, exp_size=128, s=1)),
residual(bneck(kernel=3, exp_size=192, s=1)),
DetectionEndpointSpec(),
], 64),
block([
bneck(kernel=5, exp_size=384, s=2),
residual(bneck(kernel=3, exp_size=384, s=1)),
residual(bneck(kernel=3, exp_size=384, s=1)),
residual(bneck(kernel=3, exp_size=384, s=1)),
], 128),
block([
bneck(kernel=3, exp_size=768, s=1),
residual(bneck(kernel=3, exp_size=640, s=1)),
DetectionEndpointSpec(),
], 160),
block([
bneck(kernel=3, exp_size=960, s=2),
residual(bneck(kernel=5, exp_size=768, s=1)),
residual(bneck(kernel=5, exp_size=768, s=1)),
residual(bneck(kernel=5, exp_size=768, s=1)),
DetectionEndpointSpec(),
], 192),
# Head
block([
conv(kernel=1, s=1),
RELU,
global_avg_pool(),
], 960),
block([
conv(kernel=1, s=1, bn=False),
RELU,
], 1280),
]
return basic_specs.ConvTowerSpec(blocks=blocks, filters_base=32)
def mobilenet_multi_max():
"""Specification for MobileNet Multi-MAX model.
From the paper:
"Discovering Multi-Hardware Mobile Models via Architecture Search"
Returns:
A ConvTowerSpec namedtuple for the Mobilenet Multi-MAX model.
"""
block = basic_specs.block
residual = ResidualSpec
global_avg_pool = GlobalAveragePoolSpec
def conv(kernel, s, bn=True):
return ConvSpec(
kernel_size=kernel,
strides=s,
use_batch_norm=bn)
def bneck(kernel, exp_size, s):
return DepthwiseBottleneckSpec(
kernel_size=kernel,
expansion_filters=choose_filters([exp_size]),
use_squeeze_and_excite=False,
strides=s,
activation=RELU)
blocks = [
# Stem
block([
conv(kernel=3, s=2),
RELU,
DetectionEndpointSpec(),
], filters=32),
# Body
block([
bneck(kernel=3, exp_size=96, s=2),
DetectionEndpointSpec(),
], filters=32),
block([
bneck(kernel=5, exp_size=192, s=2),
residual(bneck(kernel=3, exp_size=128, s=1)),
residual(bneck(kernel=3, exp_size=128, s=1)),
DetectionEndpointSpec(),
], 64),
block([
bneck(kernel=5, exp_size=384, s=2),
residual(bneck(kernel=3, exp_size=512, s=1)),
residual(bneck(kernel=3, exp_size=384, s=1)),
residual(bneck(kernel=3, exp_size=384, s=1)),
], 128),
block([
bneck(kernel=3, exp_size=768, s=1),
residual(bneck(kernel=3, exp_size=384, s=1)),
DetectionEndpointSpec(),
], 128),
block([
bneck(kernel=3, exp_size=768, s=2),
residual(bneck(kernel=5, exp_size=640, s=1)),
residual(bneck(kernel=3, exp_size=800, s=1)),
residual(bneck(kernel=5, exp_size=640, s=1)),
DetectionEndpointSpec(),
], 160),
# Head
block([
conv(kernel=1, s=1),
RELU,
global_avg_pool(),
], 960),
block([
conv(kernel=1, s=1, bn=False),
RELU,
], 1280),
]
return basic_specs.ConvTowerSpec(blocks=blocks, filters_base=32)
def _mobilenet_v3_large_search_base(
block_filters_multipliers,
expansion_multipliers,
search_squeeze_and_excite = False,
always_use_relu = False,
use_relative_expansion_filters = False,
base_filters=(16, 24, 40, 80, 112, 160, 960, 1280)
):
"""Experimental search space built around MobileNet V3 - Large model."""
swish6_or_relu = RELU if always_use_relu else SWISH6
def block(layers, filters):
all_filters = sorted({
search_space_utils.scale_filters(filters, multiplier, base=8)
for multiplier in block_filters_multipliers
})
return basic_specs.Block(layers=layers, filters=choose_filters(all_filters))
residual = ResidualSpec
global_avg_pool = GlobalAveragePoolSpec
def initial_conv(s, bn=True):
return ConvSpec(
kernel_size=schema.OneOf([3, 5], basic_specs.OP_TAG),
strides=s,
use_batch_norm=bn)
def sepconv(s, act):
return SeparableConvSpec(
kernel_size=schema.OneOf([3, 5, 7], basic_specs.OP_TAG),
strides=s,
activation=act)
def bneck(input_size, se, s, act):
"""Construct a DepthwiseBottleneckSpec namedtuple."""
if use_relative_expansion_filters:
expansion_filters = sorted({
basic_specs.FilterMultiplier(expansion)
for expansion in expansion_multipliers
})
else:
expansion_filters = sorted({
search_space_utils.scale_filters(input_size, expansion, base=8)
for expansion in expansion_multipliers
})
if search_squeeze_and_excite:
# Replace the default value of the argument 'se' with a OneOf node.
se = schema.OneOf([False, True], basic_specs.OP_TAG)
return DepthwiseBottleneckSpec(
kernel_size=schema.OneOf([3, 5, 7], basic_specs.OP_TAG),
expansion_filters=choose_filters(expansion_filters),
use_squeeze_and_excite=se,
strides=s,
activation=act)
def optional(layer):
return schema.OneOf([layer, basic_specs.ZeroSpec()], basic_specs.OP_TAG)
blocks = [
# Stem
block([
initial_conv(s=2),
swish6_or_relu,
residual(optional(sepconv(s=1, act=RELU))),
DetectionEndpointSpec(),
], filters=base_filters[0]),
# Body
block([
bneck(input_size=base_filters[0], se=False, s=2, act=RELU),
residual(optional(bneck(input_size=base_filters[1], se=False, s=1,
act=RELU))),
residual(optional(bneck(input_size=base_filters[1], se=False, s=1,
act=RELU))),
residual(optional(bneck(input_size=base_filters[1], se=False, s=1,
act=RELU))),
DetectionEndpointSpec(),
], filters=base_filters[1]),
block([
bneck(input_size=base_filters[1], se=True, s=2, act=RELU),
residual(optional(bneck(input_size=base_filters[2], se=True, s=1,
act=RELU))),
residual(optional(bneck(input_size=base_filters[2], se=True, s=1,
act=RELU))),
residual(optional(bneck(input_size=base_filters[2], se=True, s=1,
act=RELU))),
DetectionEndpointSpec(),
], base_filters[2]),
block([
bneck(input_size=base_filters[2], se=False, s=2, act=swish6_or_relu),
residual(optional(bneck(input_size=base_filters[3], se=False, s=1,
act=swish6_or_relu))),
residual(optional(bneck(input_size=base_filters[3], se=False, s=1,
act=swish6_or_relu))),
residual(optional(bneck(input_size=base_filters[3], se=False, s=1,
act=swish6_or_relu))),
], base_filters[3]),
block([
bneck(input_size=base_filters[3], se=True, s=1, act=swish6_or_relu),
residual(optional(bneck(input_size=base_filters[4], se=True, s=1,
act=swish6_or_relu))),
residual(optional(bneck(input_size=base_filters[4], se=True, s=1,
act=swish6_or_relu))),
residual(optional(bneck(input_size=base_filters[4], se=True, s=1,
act=swish6_or_relu))),
DetectionEndpointSpec(),
], base_filters[4]),
block([
bneck(input_size=base_filters[4], se=True, s=2, act=swish6_or_relu),
residual(optional(bneck(input_size=base_filters[5], se=True, s=1,
act=swish6_or_relu))),
residual(optional(bneck(input_size=base_filters[5], se=True, s=1,
act=swish6_or_relu))),
residual(optional(bneck(input_size=base_filters[5], se=True, s=1,
act=swish6_or_relu))),
DetectionEndpointSpec(),
], base_filters[5]),
# Head
block([
ConvSpec(kernel_size=1, strides=1, use_batch_norm=True),
swish6_or_relu,
global_avg_pool(),
], base_filters[6]),
block([
ConvSpec(kernel_size=1, strides=1, use_batch_norm=False),
swish6_or_relu,
], base_filters[7]),
]
return basic_specs.ConvTowerSpec(blocks=blocks, filters_base=8)
def mobilenet_v3_like_search():
"""Like exp11, but use base filter sizes which are increasing powers of 2."""
return _mobilenet_v3_large_search_base(
block_filters_multipliers=(0.5, 0.625, 0.75, 1.0, 1.25, 1.5, 2.0),
expansion_multipliers=(1.0, 2.0, 3.0, 4.0, 5.0, 6.0),
use_relative_expansion_filters=True,
search_squeeze_and_excite=True,
base_filters=(16, 16, 32, 64, 128, 256, 512, 1024))
def get_search_space_spec(ssd):
"""Returns the search space with the specified name."""
if ssd == MOBILENET_V2:
return mobilenet_v2()
elif ssd == MNASNET_B1:
return mnasnet_b1()
elif ssd == PROXYLESSNAS_MOBILE:
return proxylessnas_mobile()
elif ssd == MOBILENET_V3_LARGE:
return mobilenet_v3_large()
elif ssd == MOBILENET_MULTI_AVG:
return mobilenet_multi_avg()
elif ssd == MOBILENET_MULTI_MAX:
return mobilenet_multi_max()
elif ssd == PROXYLESSNAS_SEARCH:
return proxylessnas_search()
elif ssd == PROXYLESSNAS_ENLARGED_SEARCH:
return proxylessnas_enlarged_search()
elif ssd == MOBILENET_V3_LIKE_SEARCH:
return mobilenet_v3_like_search()
else:
raise ValueError('Unsupported SSD')
|
# Generated by gen_timm_models.py
import torch
import timm.models.vision_transformer
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import COMPUTER_VISION
from .config import TimmConfig
class Model(BenchmarkModel):
task = COMPUTER_VISION.GENERATION
def __init__(self, device=None, jit=False, variant='vit_small_patch16_224', precision='float32'):
super().__init__()
self.device = device
self.jit = jit
self.model = timm.create_model(variant, pretrained=False, scriptable=True)
self.cfg = TimmConfig(model = self.model, device = device, precision = precision)
self.model.to(
device=self.device,
dtype=self.cfg.model_dtype
)
if device == 'cuda':
torch.cuda.empty_cache()
if jit:
self.model = torch.jit.script(self.model)
assert isinstance(self.model, torch.jit.ScriptModule)
def _gen_target(self, batch_size):
return torch.empty(
(batch_size,) + self.cfg.target_shape,
device=self.device, dtype=torch.long).random_(self.cfg.num_classes)
def _step_train(self):
self.cfg.optimizer.zero_grad()
output = self.model(self.cfg.example_inputs)
if isinstance(output, tuple):
output = output[0]
target = self._gen_target(output.shape[0])
self.cfg.loss(output, target).backward()
self.cfg.optimizer.step()
def _step_eval(self):
output = self.model(self.cfg.infer_example_inputs)
def get_module(self):
return self.model, (self.cfg.example_inputs,)
def train(self, niter=1):
self.model.train()
for _ in range(niter):
self._step_train()
# TODO: use pretrained model weights, assuming the pretrained model is in .data/ dir
def eval(self, niter=1):
self.model.eval()
with torch.no_grad():
for _ in range(niter):
self._step_eval()
if __name__ == "__main__":
for device in ['cpu', 'cuda']:
for jit in [False, True]:
print("Test config: device %s, JIT %s" % (device, jit))
m = Model(device=device, jit=jit)
m, example_inputs = m.get_module()
m(example_inputs)
m.train()
m.eval()
|
"""
WSGI config for chat_server project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'chat_server.settings')
application = get_wsgi_application()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: gdalinfo testing
# Author: Even Rouault <even dot rouault @ mines-paris dot org>
#
###############################################################################
# Copyright (c) 2008-2013, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import sys
import os
import json
sys.path.append('../pymod')
from osgeo import gdal
import gdaltest
import test_cli_utilities
###############################################################################
# Simple test
def test_gdalinfo_1():
if test_cli_utilities.get_gdalinfo_path() is None:
return 'skip'
(ret, err) = gdaltest.runexternal_out_and_err(test_cli_utilities.get_gdalinfo_path() + ' ../gcore/data/byte.tif')
if not (err is None or err == ''):
gdaltest.post_reason('got error/warning')
print(err)
return 'fail'
if ret.find('Driver: GTiff/GeoTIFF') == -1:
return 'fail'
return 'success'
###############################################################################
# Test -checksum option
def test_gdalinfo_2():
if test_cli_utilities.get_gdalinfo_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_gdalinfo_path() + ' -checksum ../gcore/data/byte.tif')
if ret.find('Checksum=4672') == -1:
return 'fail'
return 'success'
###############################################################################
# Test -nomd option
def test_gdalinfo_3():
if test_cli_utilities.get_gdalinfo_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_gdalinfo_path() + ' ../gcore/data/byte.tif')
if ret.find('Metadata') == -1:
return 'fail'
ret = gdaltest.runexternal(test_cli_utilities.get_gdalinfo_path() + ' -nomd ../gcore/data/byte.tif')
if ret.find('Metadata') != -1:
return 'fail'
return 'success'
###############################################################################
# Test -noct option
def test_gdalinfo_4():
if test_cli_utilities.get_gdalinfo_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_gdalinfo_path() + ' ../gdrivers/data/bug407.gif')
if ret.find('0: 255,255,255,255') == -1:
return 'fail'
ret = gdaltest.runexternal(test_cli_utilities.get_gdalinfo_path() + ' -noct ../gdrivers/data/bug407.gif')
if ret.find('0: 255,255,255,255') != -1:
return 'fail'
return 'success'
###############################################################################
# Test -stats option
def test_gdalinfo_5():
if test_cli_utilities.get_gdalinfo_path() is None:
return 'skip'
try:
os.remove('../gcore/data/byte.tif.aux.xml')
except OSError:
pass
ret = gdaltest.runexternal(test_cli_utilities.get_gdalinfo_path() + ' ../gcore/data/byte.tif')
if ret.find('STATISTICS_MINIMUM=74') != -1:
gdaltest.post_reason('got wrong minimum.')
print(ret)
return 'fail'
ret = gdaltest.runexternal(test_cli_utilities.get_gdalinfo_path() + ' -stats ../gcore/data/byte.tif')
if ret.find('STATISTICS_MINIMUM=74') == -1:
gdaltest.post_reason('got wrong minimum (2).')
print(ret)
return 'fail'
# We will blow an exception if the file does not exist now!
os.remove('../gcore/data/byte.tif.aux.xml')
return 'success'
###############################################################################
# Test a dataset with overviews and RAT
def test_gdalinfo_6():
if test_cli_utilities.get_gdalinfo_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_gdalinfo_path() + ' ../gdrivers/data/int.img')
if ret.find('Overviews') == -1:
return 'fail'
if ret.find('GDALRasterAttributeTable') == -1:
return 'fail'
return 'success'
###############################################################################
# Test a dataset with GCPs
def test_gdalinfo_7():
if test_cli_utilities.get_gdalinfo_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_gdalinfo_path() + ' ../gcore/data/gcps.vrt')
if ret.find('GCP Projection =') == -1:
return 'fail'
if ret.find('PROJCS["NAD27 / UTM zone 11N"') == -1:
return 'fail'
if ret.find('(100,100) -> (446720,3745320,0)') == -1:
return 'fail'
# Same but with -nogcps
ret = gdaltest.runexternal(test_cli_utilities.get_gdalinfo_path() + ' -nogcp ../gcore/data/gcps.vrt')
if ret.find('GCP Projection =') != -1:
return 'fail'
if ret.find('PROJCS["NAD27 / UTM zone 11N"') != -1:
return 'fail'
if ret.find('(100,100) -> (446720,3745320,0)') != -1:
return 'fail'
return 'success'
###############################################################################
# Test -hist option
def test_gdalinfo_8():
if test_cli_utilities.get_gdalinfo_path() is None:
return 'skip'
try:
os.remove('../gcore/data/byte.tif.aux.xml')
except OSError:
pass
ret = gdaltest.runexternal(test_cli_utilities.get_gdalinfo_path() + ' ../gcore/data/byte.tif')
if ret.find('0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6 0 0 0 0 0 0 0 0 37 0 0 0 0 0 0 0 57 0 0 0 0 0 0 0 62 0 0 0 0 0 0 0 66 0 0 0 0 0 0 0 0 72 0 0 0 0 0 0 0 31 0 0 0 0 0 0 0 24 0 0 0 0 0 0 0 12 0 0 0 0 0 0 0 0 7 0 0 0 0 0 0 0 12 0 0 0 0 0 0 0 5 0 0 0 0 0 0 0 3 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1') != -1:
gdaltest.post_reason('did not expect histogram.')
print(ret)
return 'fail'
ret = gdaltest.runexternal(test_cli_utilities.get_gdalinfo_path() + ' -hist ../gcore/data/byte.tif')
if ret.find('0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6 0 0 0 0 0 0 0 0 37 0 0 0 0 0 0 0 57 0 0 0 0 0 0 0 62 0 0 0 0 0 0 0 66 0 0 0 0 0 0 0 0 72 0 0 0 0 0 0 0 31 0 0 0 0 0 0 0 24 0 0 0 0 0 0 0 12 0 0 0 0 0 0 0 0 7 0 0 0 0 0 0 0 12 0 0 0 0 0 0 0 5 0 0 0 0 0 0 0 3 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1') == -1:
gdaltest.post_reason('did not get expected histogram.')
print(ret)
return 'fail'
# We will blow an exception if the file does not exist now!
os.remove('../gcore/data/byte.tif.aux.xml')
return 'success'
###############################################################################
# Test -mdd option
def test_gdalinfo_9():
if test_cli_utilities.get_gdalinfo_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_gdalinfo_path() + ' ../gdrivers/data/fake_nsif.ntf')
if ret.find('BLOCKA=010000001000000000') != -1:
gdaltest.post_reason('got unexpected extra MD.')
print(ret)
return 'fail'
ret = gdaltest.runexternal(test_cli_utilities.get_gdalinfo_path() + ' -mdd TRE ../gdrivers/data/fake_nsif.ntf')
if ret.find('BLOCKA=010000001000000000') == -1:
gdaltest.post_reason('did not get extra MD.')
print(ret)
return 'fail'
return 'success'
###############################################################################
# Test -mm option
def test_gdalinfo_10():
if test_cli_utilities.get_gdalinfo_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_gdalinfo_path() + ' ../gcore/data/byte.tif')
if ret.find('Computed Min/Max=74.000,255.000') != -1:
return 'fail'
ret = gdaltest.runexternal(test_cli_utilities.get_gdalinfo_path() + ' -mm ../gcore/data/byte.tif')
if ret.find('Computed Min/Max=74.000,255.000') == -1:
return 'fail'
return 'success'
###############################################################################
# Test gdalinfo --version
def test_gdalinfo_11():
if test_cli_utilities.get_gdalinfo_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_gdalinfo_path() + ' --version', check_memleak=False)
if ret.find(gdal.VersionInfo('--version')) != 0:
print(ret)
return 'fail'
return 'success'
###############################################################################
# Test gdalinfo --build
def test_gdalinfo_12():
if test_cli_utilities.get_gdalinfo_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_gdalinfo_path() + ' --build', check_memleak=False)
ret = ret.replace('\r\n', '\n')
if ret.find(gdal.VersionInfo('BUILD_INFO')) != 0:
print(ret)
return 'fail'
return 'success'
###############################################################################
# Test gdalinfo --license
def test_gdalinfo_13():
if test_cli_utilities.get_gdalinfo_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_gdalinfo_path() + ' --license', check_memleak=False)
ret = ret.replace('\r\n', '\n')
if ret.find(gdal.VersionInfo('LICENSE')) != 0:
print(ret)
print(gdal.VersionInfo('LICENSE'))
if gdaltest.is_travis_branch('mingw'):
return 'expected_fail'
return 'fail'
return 'success'
###############################################################################
# Test erroneous use of --config.
def test_gdalinfo_14():
if test_cli_utilities.get_gdalinfo_path() is None:
return 'skip'
(out, err) = gdaltest.runexternal_out_and_err(test_cli_utilities.get_gdalinfo_path() + ' --config', check_memleak=False)
if err.find('--config option given without a key and value argument') < 0:
print(err)
return 'fail'
return 'success'
###############################################################################
# Test erroneous use of --mempreload.
def test_gdalinfo_15():
if test_cli_utilities.get_gdalinfo_path() is None:
return 'skip'
(out, err) = gdaltest.runexternal_out_and_err(test_cli_utilities.get_gdalinfo_path() + ' --mempreload', check_memleak=False)
if err.find('--mempreload option given without directory path') < 0:
print(err)
return 'fail'
return 'success'
###############################################################################
# Test --mempreload
def test_gdalinfo_16():
if test_cli_utilities.get_gdalinfo_path() is None:
return 'skip'
(ret, err) = gdaltest.runexternal_out_and_err(test_cli_utilities.get_gdalinfo_path() + ' --debug on --mempreload ../gcore/data /vsimem/byte.tif', check_memleak=False)
if ret.find('Driver: GTiff/GeoTIFF') != 0:
print(ret)
return 'fail'
return 'success'
###############################################################################
# Test erroneous use of --debug.
def test_gdalinfo_17():
if test_cli_utilities.get_gdalinfo_path() is None:
return 'skip'
(out, err) = gdaltest.runexternal_out_and_err(test_cli_utilities.get_gdalinfo_path() + ' --debug', check_memleak=False)
if err.find('--debug option given without debug level') < 0:
print(err)
return 'fail'
return 'success'
###############################################################################
# Test erroneous use of --optfile.
def test_gdalinfo_18():
if test_cli_utilities.get_gdalinfo_path() is None:
return 'skip'
(out, err) = gdaltest.runexternal_out_and_err(test_cli_utilities.get_gdalinfo_path() + ' --optfile', check_memleak=False)
if err.find('--optfile option given without filename') < 0:
gdaltest.post_reason('fail')
print(err)
return 'fail'
(out, err) = gdaltest.runexternal_out_and_err(test_cli_utilities.get_gdalinfo_path() + ' --optfile /foo/bar', check_memleak=False)
if err.find('Unable to open optfile') < 0:
gdaltest.post_reason('fail')
print(err)
return 'fail'
return 'success'
###############################################################################
# Test --optfile
def test_gdalinfo_19():
if test_cli_utilities.get_gdalinfo_path() is None:
return 'skip'
f = open('tmp/optfile.txt', 'wt')
f.write('# comment\n')
f.write('../gcore/data/byte.tif\n')
f.close()
ret = gdaltest.runexternal(test_cli_utilities.get_gdalinfo_path() + ' --optfile tmp/optfile.txt', check_memleak=False)
os.unlink('tmp/optfile.txt')
if ret.find('Driver: GTiff/GeoTIFF') != 0:
print(ret)
return 'fail'
return 'success'
###############################################################################
# Test --formats
def test_gdalinfo_20():
if test_cli_utilities.get_gdalinfo_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_gdalinfo_path() + ' --formats', check_memleak=False)
if ret.find('GTiff -raster- (rw+vs): GeoTIFF') < 0:
print(ret)
return 'fail'
return 'success'
###############################################################################
# Test erroneous use of --format.
def test_gdalinfo_21():
if test_cli_utilities.get_gdalinfo_path() is None:
return 'skip'
(out, err) = gdaltest.runexternal_out_and_err(test_cli_utilities.get_gdalinfo_path() + ' --format', check_memleak=False)
if err.find('--format option given without a format code') < 0:
gdaltest.post_reason('fail')
print(err)
return 'fail'
(out, err) = gdaltest.runexternal_out_and_err(test_cli_utilities.get_gdalinfo_path() + ' --format foo_bar', check_memleak=False)
if err.find('--format option given with format') < 0:
gdaltest.post_reason('fail')
print(err)
return 'fail'
return 'success'
###############################################################################
# Test --format
def test_gdalinfo_22():
if test_cli_utilities.get_gdalinfo_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_gdalinfo_path() + ' --format GTiff', check_memleak=False)
expected_strings = [
'Short Name:',
'Long Name:',
'Extensions:',
'Mime Type:',
'Help Topic:',
'Supports: Create()',
'Supports: CreateCopy()',
'Supports: Virtual IO',
'Creation Datatypes',
'<CreationOptionList>']
for expected_string in expected_strings:
if ret.find(expected_string) < 0:
gdaltest.post_reason('did not find %s' % expected_string)
print(ret)
return 'fail'
return 'success'
###############################################################################
# Test --help-general
def test_gdalinfo_23():
if test_cli_utilities.get_gdalinfo_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_gdalinfo_path() + ' --help-general', check_memleak=False)
if ret.find('Generic GDAL utility command options') < 0:
print(ret)
return 'fail'
return 'success'
###############################################################################
# Test --locale
def test_gdalinfo_24():
if test_cli_utilities.get_gdalinfo_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_gdalinfo_path() + ' --locale C ../gcore/data/byte.tif', check_memleak=False)
if ret.find('Driver: GTiff/GeoTIFF') != 0:
print(ret)
return 'fail'
return 'success'
###############################################################################
# Test -listmdd
def test_gdalinfo_25():
if test_cli_utilities.get_gdalinfo_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_gdalinfo_path() + ' ../gdrivers/data/byte_with_xmp.tif -listmdd', check_memleak=False)
if ret.find('Metadata domains:') < 0:
print(ret)
return 'fail'
if ret.find(' xml:XMP') < 0:
print(ret)
return 'fail'
return 'success'
###############################################################################
# Test -mdd all
def test_gdalinfo_26():
if test_cli_utilities.get_gdalinfo_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_gdalinfo_path() + ' ../gdrivers/data/byte_with_xmp.tif -mdd all', check_memleak=False)
if ret.find('Metadata (xml:XMP)') < 0:
print(ret)
return 'fail'
return 'success'
###############################################################################
# Test -oo
def test_gdalinfo_27():
if test_cli_utilities.get_gdalinfo_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_gdalinfo_path() + ' ../gdrivers/data/float64.asc -oo datatype=float64', check_memleak=False)
if ret.find('Type=Float64') < 0:
print(ret)
return 'fail'
return 'success'
###############################################################################
# Simple -json test
def test_gdalinfo_28():
if test_cli_utilities.get_gdalinfo_path() is None:
return 'skip'
(ret, err) = gdaltest.runexternal_out_and_err(test_cli_utilities.get_gdalinfo_path() + ' -json ../gcore/data/byte.tif')
ret = json.loads(ret)
if not (err is None or err == ''):
gdaltest.post_reason('got error/warning')
print(err)
return 'fail'
if ret['driverShortName'] != 'GTiff':
return 'fail'
return 'success'
###############################################################################
# Test -json -checksum option
def test_gdalinfo_29():
if test_cli_utilities.get_gdalinfo_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_gdalinfo_path() + ' -json -checksum ../gcore/data/byte.tif')
ret = json.loads(ret)
if ret['bands'][0]['checksum'] != 4672:
return 'fail'
return 'success'
###############################################################################
# Test -json -nomd option
def test_gdalinfo_30():
if test_cli_utilities.get_gdalinfo_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_gdalinfo_path() + ' -json ../gcore/data/byte.tif')
ret = json.loads(ret)
if 'metadata' not in ret:
return 'fail'
ret = gdaltest.runexternal(test_cli_utilities.get_gdalinfo_path() + ' -json -nomd ../gcore/data/byte.tif')
ret = json.loads(ret)
if 'metadata' in ret:
return 'fail'
return 'success'
###############################################################################
# Test -json -noct option
def test_gdalinfo_31():
if test_cli_utilities.get_gdalinfo_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_gdalinfo_path() + ' -json ../gdrivers/data/bug407.gif')
ret = json.loads(ret)
if ret['bands'][0]['colorTable']['entries'][0] != [255, 255, 255, 255]:
return 'fail'
ret = gdaltest.runexternal(test_cli_utilities.get_gdalinfo_path() + ' -json -noct ../gdrivers/data/bug407.gif')
ret = json.loads(ret)
if 'colorTable' in ret['bands'][0]:
return 'fail'
return 'success'
###############################################################################
# Test -stats option
def test_gdalinfo_32():
if test_cli_utilities.get_gdalinfo_path() is None:
return 'skip'
try:
os.remove('../gcore/data/byte.tif.aux.xml')
except OSError:
pass
ret = gdaltest.runexternal(test_cli_utilities.get_gdalinfo_path() + ' -json ../gcore/data/byte.tif')
ret = json.loads(ret)
if '' in ret['bands'][0]['metadata']:
gdaltest.post_reason('got wrong minimum.')
print(ret)
return 'fail'
ret = gdaltest.runexternal(test_cli_utilities.get_gdalinfo_path() + ' -json -stats ../gcore/data/byte.tif')
ret = json.loads(ret)
if ret['bands'][0]['metadata']['']['STATISTICS_MINIMUM'] != '74':
gdaltest.post_reason('got wrong minimum (2).')
print(ret)
return 'fail'
# We will blow an exception if the file does not exist now!
os.remove('../gcore/data/byte.tif.aux.xml')
return 'success'
###############################################################################
# Test a dataset with overviews and RAT
def test_gdalinfo_33():
if test_cli_utilities.get_gdalinfo_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_gdalinfo_path() + ' -json ../gdrivers/data/int.img')
ret = json.loads(ret)
if 'overviews' not in ret['bands'][0]:
return 'fail'
if 'rat' not in ret:
return 'fail'
return 'success'
###############################################################################
# Test a dataset with GCPs
def test_gdalinfo_34():
if test_cli_utilities.get_gdalinfo_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_gdalinfo_path() + ' -json ../gcore/data/gcps.vrt')
ret = json.loads(ret)
if 'wkt' not in ret['gcps']['coordinateSystem']:
return 'fail'
if ret['gcps']['coordinateSystem']['wkt'].find('PROJCS["NAD27 / UTM zone 11N"') == -1:
return 'fail'
if ret['gcps']['gcpList'][0]['x'] != 440720.0:
return 'fail'
ret = gdaltest.runexternal(test_cli_utilities.get_gdalinfo_path() + ' -json -nogcp ../gcore/data/gcps.vrt')
ret = json.loads(ret)
if 'gcps' in ret:
return 'fail'
return 'success'
###############################################################################
# Test -hist option
def test_gdalinfo_35():
if test_cli_utilities.get_gdalinfo_path() is None:
return 'skip'
try:
os.remove('../gcore/data/byte.tif.aux.xml')
except OSError:
pass
ret = gdaltest.runexternal(test_cli_utilities.get_gdalinfo_path() + ' -json ../gcore/data/byte.tif')
ret = json.loads(ret)
if 'histogram' in ret['bands'][0]:
gdaltest.post_reason('did not expect histogram.')
print(ret)
return 'fail'
ret = gdaltest.runexternal(test_cli_utilities.get_gdalinfo_path() + ' -json -hist ../gcore/data/byte.tif')
ret = json.loads(ret)
if ret['bands'][0]['histogram']['buckets'] != [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 37, 0, 0, 0, 0, 0, 0, 0, 57, 0, 0, 0, 0, 0, 0, 0, 62, 0, 0, 0, 0, 0, 0, 0, 66, 0, 0, 0, 0, 0, 0, 0, 0, 72, 0, 0, 0, 0, 0, 0, 0, 31, 0, 0, 0, 0, 0, 0, 0, 24, 0, 0, 0, 0, 0, 0, 0, 12, 0, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 12, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1]:
gdaltest.post_reason('did not get expected histogram.')
print(ret)
return 'fail'
# We will blow an exception if the file does not exist now!
os.remove('../gcore/data/byte.tif.aux.xml')
return 'success'
###############################################################################
# Test -mdd option
def test_gdalinfo_36():
if test_cli_utilities.get_gdalinfo_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_gdalinfo_path() + ' -json ../gdrivers/data/fake_nsif.ntf')
ret = json.loads(ret)
if 'TRE' in ret['metadata']:
gdaltest.post_reason('got unexpected extra MD.')
print(ret)
return 'fail'
ret = gdaltest.runexternal(test_cli_utilities.get_gdalinfo_path() + ' -json -mdd TRE ../gdrivers/data/fake_nsif.ntf')
ret = json.loads(ret)
if ret['metadata']['TRE']['BLOCKA'].find('010000001000000000') == -1:
gdaltest.post_reason('did not get extra MD.')
print(ret)
return 'fail'
return 'success'
###############################################################################
# Test -mm option
def test_gdalinfo_37():
if test_cli_utilities.get_gdalinfo_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_gdalinfo_path() + ' -json ../gcore/data/byte.tif')
ret = json.loads(ret)
if 'computedMin' in ret['bands'][0]:
return 'fail'
ret = gdaltest.runexternal(test_cli_utilities.get_gdalinfo_path() + ' -json -mm ../gcore/data/byte.tif')
ret = json.loads(ret)
if ret['bands'][0]['computedMin'] != 74.000:
return 'fail'
return 'success'
###############################################################################
# Test -listmdd
def test_gdalinfo_38():
if test_cli_utilities.get_gdalinfo_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_gdalinfo_path() + ' -json ../gdrivers/data/byte_with_xmp.tif -listmdd', check_memleak=False)
ret = json.loads(ret)
if 'metadataDomains' not in ret['metadata']:
print(ret)
return 'fail'
if ret['metadata']['metadataDomains'][0] != 'xml:XMP':
print(ret)
return 'fail'
return 'success'
###############################################################################
# Test -mdd all
def test_gdalinfo_39():
if test_cli_utilities.get_gdalinfo_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_gdalinfo_path() + ' -json ../gdrivers/data/byte_with_xmp.tif -mdd all', check_memleak=False)
ret = json.loads(ret)
if 'xml:XMP' not in ret['metadata']:
print(ret)
return 'fail'
return 'success'
###############################################################################
# Test -json wgs84Extent
def test_gdalinfo_40():
if test_cli_utilities.get_gdalinfo_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_gdalinfo_path() + ' -json ../gdrivers/data/small_world.tif')
ret = json.loads(ret)
if 'wgs84Extent' not in ret:
print(ret)
return 'fail'
if 'type' not in ret['wgs84Extent']:
print(ret)
return 'fail'
if ret['wgs84Extent']['type'] != 'Polygon':
print(ret)
return 'fail'
if 'coordinates' not in ret['wgs84Extent']:
print(ret)
return 'fail'
if ret['wgs84Extent']['coordinates'] != [[[-180.0, 90.0], [-180.0, -90.0], [180.0, -90.0], [180.0, 90.0], [-180.0, 90.0]]]:
print(ret)
return 'fail'
return 'success'
gdaltest_list = [
test_gdalinfo_1,
test_gdalinfo_2,
test_gdalinfo_3,
test_gdalinfo_4,
test_gdalinfo_5,
test_gdalinfo_6,
test_gdalinfo_7,
test_gdalinfo_8,
test_gdalinfo_9,
test_gdalinfo_10,
test_gdalinfo_11,
test_gdalinfo_12,
test_gdalinfo_13,
test_gdalinfo_14,
test_gdalinfo_15,
test_gdalinfo_16,
test_gdalinfo_17,
test_gdalinfo_18,
test_gdalinfo_19,
test_gdalinfo_20,
test_gdalinfo_21,
test_gdalinfo_22,
test_gdalinfo_23,
test_gdalinfo_24,
test_gdalinfo_25,
test_gdalinfo_26,
test_gdalinfo_27,
test_gdalinfo_28,
test_gdalinfo_29,
test_gdalinfo_30,
test_gdalinfo_31,
test_gdalinfo_32,
test_gdalinfo_33,
test_gdalinfo_34,
test_gdalinfo_35,
test_gdalinfo_36,
test_gdalinfo_37,
test_gdalinfo_38,
test_gdalinfo_39,
test_gdalinfo_40,
]
if __name__ == '__main__':
gdaltest.setup_run('test_gdalinfo')
gdaltest.run_tests(gdaltest_list)
gdaltest.summarize()
|
from .main import get_dictionary
from .main import add_pinyin
from .main import get_pinyin
from .main import do_not_parse_set
|
# ipop-project
# Copyright 2016, University of Florida
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import json
import time
import sys
from controller.framework.ControllerModule import ControllerModule
py_ver = sys.version_info[0]
# Check Python version and load appropriate urllib modules
if py_ver == 3:
import urllib.request as urllib2
else:
import urllib2
class OverlayVisualizer(ControllerModule):
def __init__(self, CFxHandle, paramDict, ModuleName):
super(OverlayVisualizer, self).__init__(CFxHandle, paramDict, ModuleName)
# Counter to keep track of time lapsed
self.interval_counter = 0
# Visualizer webservice URL
self.vis_address = "http://"+self.CMConfig["WebServiceAddress"]
# Datastructure to store Node network details
self.ipop_interface_details = {}
def initialize(self):
self.registerCBT('Logger', 'info', "{0} Loaded".format(self.ModuleName))
# Query VirtualNetwork Interface details from TincanInterface module
ipop_interfaces = self.CFxHandle.queryParam("TincanInterface", "Vnets")
# Create a dict of available net interfaces for collecting visualizer data
for interface_details in ipop_interfaces:
interface_name = interface_details["TapName"]
self.ipop_interface_details[interface_name] = {}
def processCBT(self, cbt):
msg = cbt.data
interface_name = msg.pop("interface_name")
# Check whether TapName exists in the internal table, if not create the entry
if interface_name not in self.ipop_interface_details.keys():
self.ipop_interface_details[interface_name] = {}
self.ipop_interface_details[interface_name].update(msg)
def timer_method(self):
# Increment the counter with every timer thread invocation
#self.interval_counter += 1
#if self.interval_counter % self.CMConfig["TopologyDataQueryInterval"] == 0:
for interface_name in self.ipop_interface_details.keys():
self.registerCBT("BaseTopologyManager", "GET_VISUALIZER_DATA", {"interface_name": interface_name})
#if self.interval_counter % self.CMConfig["WebServiceDataPostInterval"] == 0:
try:
# Iterate across the IPOP interface details table to send Node network details
for interface_name in self.ipop_interface_details.keys():
vis_req_msg = self.ipop_interface_details[interface_name]
if vis_req_msg:
vis_req_msg["node_name"] = self.CMConfig["NodeName"]
vis_req_msg["name"] = vis_req_msg["uid"]
vis_req_msg["uptime"] = int(time.time())
message = json.dumps(vis_req_msg).encode("utf8")
req = urllib2.Request(url=self.vis_address, data=message)
req.add_header("Content-Type", "application/json")
res = urllib2.urlopen(req)
# Check whether data has been successfully sent to the Visualizer
if res.getcode() != 200:
raise
except Exception as err:
log = "Failed to send data to the IPOP Visualizer webservice({0}). Exception: {1}".\
format(self.vis_address, str(err))
self.registerCBT('Logger', 'error', log)
def terminate(self):
pass
|
# Advent of Code 2020, Day 7
with open("../input07", "r") as infile:
lines = infile.readlines()
# Parses a number (quantity of bags) and bag name
def parse_num_bag(s):
(n,bs) = s.split(" ",1)
bag = bs.split(" bag")[0]
return (int(n), bag)
# Parse a bag content specification
def bag_parse(line):
(bag,spec) = line.split(" bags contain ")
if spec.startswith("no"): return(bag,[])
nbs = spec.split(", ")
conts = [parse_num_bag(nb) for nb in nbs]
return(bag,conts)
# a dictionary mapping bag names to their contents
bags = {}
for line in lines:
(bag,conts) = bag_parse(line)
bags.update({bag : conts})
# Part 1
# dictionary of contents (as sets without quantity)
bcont = { }
for bag in bags:
conts = set([b for (n,b) in bags[bag]])
bcont.update({bag : conts})
# construct the set of bags recursively containing a shiny gold bag
sbags = set()
newbs = { "shiny gold" } # bags added on previous pass
more = True # have we added some bags in the previous pass?
while more:
more = False
nextbs = set() # bags added on this pass
for bag in bcont.keys():
cont = bcont[bag]
if not(newbs.isdisjoint(cont)):
nextbs.add(bag)
bcont.pop(bag)
more = True
sbags.update(nextbs)
newbs = nextbs
print("Part 1: " + str(len(sbags)))
# Part 2
# Recursively count how many bags are contained in bag
def rec_cont(bag):
return(sum([n * (1 + rec_cont(b)) for (n,b) in bags[bag]]))
print("Part 2: " + str(rec_cont("shiny gold")))
|
import io
from types import NoneType
import utils
from pprint import pprint
def typeMapFromFlatDict():
union: dict[str, set] = {}
for doc in utils.docs(progress_bar=True):
for key, val in doc.items():
union.setdefault(key, set())
union[key].add(type(val))
return union
def flatTypeMapToPythonClassString(
typeMap: dict[str, set], name: str, indent: str = " ", table: str | None = None
):
def extractTypeString(type_):
if type_ is NoneType:
return "None"
else:
return type_.__name__
def extractSATypeString(types: set):
types = types - {NoneType}
if (
(str in types)
or (list in types)
or (tuple in types)
or (set in types)
or (dict in types)
):
return "sa.UnicodeText"
if float in types:
return "sa.Float"
if int in types:
return "sa.Integer"
buffer = io.StringIO()
if table is not None:
buffer.write(f"@mapper_registry.mapped\n")
buffer.write(f"@dataclass\n")
buffer.write(f"class {name}:\n{indent}")
if table is not None:
buffer.write("__table__ = sa.Table(\n")
buffer.write(f'{indent*2}"{table}",\n')
buffer.write(f"{indent*2}mapper_registry.metadata,\n")
buffer.write(
f'{indent*2}sa.Column("_id", sa.Integer, autoincrement=True, primary_key=True),\n'
)
buffer.write(f'{indent*2}sa.Column("_created", sa.DateTime, server_default=func.now()),\n')
buffer.write(f'{indent*2}sa.Column("_last_updated", sa.DateTime, onupdate=func.now()),\n')
buffer.write(f'{indent*2}sa.Column("_batch", sa.Integer),\n')
for key, types in typeMap.items():
buffer.write(f'{indent*2}sa.Column("{key}", {extractSATypeString(types)}),\n')
buffer.write(f"{indent})\n")
buffer.write(f"{indent}_id: int = field(init=False)\n")
buffer.write(f"{indent}_last_updated: datetime.datetime = field(init=False)\n")
buffer.write(f"{indent}_batch: int = None\n{indent}")
for key, types in typeMap.items():
buffer.write(f"{key}: ")
buffer.write(f"{' | '.join(extractTypeString(t) for t in types)} = None")
buffer.write(f"\n{indent}")
print(buffer.getvalue())
buffer.close()
if __name__ == "__main__":
typemap = typeMapFromFlatDict()
flatTypeMapToPythonClassString(typemap, "Doc", table="preview")
|
import math
from PyQt5.QtCore import QPointF
from PyQt5.QtGui import QPainterPath
EDGE_CP_ROUNDNESS = 100 #: Bezier controll point distance on the line
class GraphicsEdgePathBase:
"""Base Class for calculating the graphics path to draw for an graphics Edge"""
def __init__(self, owner: 'QDMGraphicsEdge'):
# keep the reference to owner GraphicsEdge class
self.owner = owner
def calcPath(self):
"""Calculate the Direct line connection
:returns: ``QPainterPath`` of the graphics path to draw
:rtype: ``QPainterPath`` or ``None``
"""
return None
class GraphicsEdgePathDirect(GraphicsEdgePathBase):
"""Direct line connection Graphics Edge"""
def calcPath(self) -> QPainterPath:
"""Calculate the Direct line connection
:returns: ``QPainterPath`` of the direct line
:rtype: ``QPainterPath``
"""
path = QPainterPath(QPointF(self.owner.posSource[0], self.owner.posSource[1]))
path.lineTo(self.owner.posDestination[0], self.owner.posDestination[1])
return path
class GraphicsEdgePathBezier(GraphicsEdgePathBase):
"""Cubic line connection Graphics Edge"""
def calcPath(self) -> QPainterPath:
"""Calculate the cubic Bezier line connection with 2 control points
:returns: ``QPainterPath`` of the cubic Bezier line
:rtype: ``QPainterPath``
"""
s = self.owner.posSource
d = self.owner.posDestination
dist = (d[0] - s[0]) * 0.5
cpx_s = +dist
cpx_d = -dist
cpy_s = 0
cpy_d = 0
if self.owner.edge.start_socket is not None:
ssin = self.owner.edge.start_socket.is_input
ssout = self.owner.edge.start_socket.is_output
if (s[0] > d[0] and ssout) or (s[0] < d[0] and ssin):
cpx_d *= -1
cpx_s *= -1
cpy_d = (
(s[1] - d[1]) / math.fabs(
(s[1] - d[1]) if (s[1] - d[1]) != 0 else 0.00001
)
) * EDGE_CP_ROUNDNESS
cpy_s = (
(d[1] - s[1]) / math.fabs(
(d[1] - s[1]) if (d[1] - s[1]) != 0 else 0.00001
)
) * EDGE_CP_ROUNDNESS
path = QPainterPath(QPointF(self.owner.posSource[0], self.owner.posSource[1]))
path.cubicTo( s[0] + cpx_s, s[1] + cpy_s, d[0] + cpx_d, d[1] + cpy_d, self.owner.posDestination[0], self.owner.posDestination[1])
return path
|
# -*- coding: utf-8 -*-
# import numpy as np
import copy
from ast import literal_eval
from PyQt5.QtWidgets import (QMainWindow, QComboBox, QWidget, QGridLayout,
QDesktopWidget, QLabel, QVBoxLayout, QLineEdit,
QPushButton, QHBoxLayout, QTextEdit, QFileDialog)
class raman_preprocessing_window(QMainWindow):
def __init__(self, raman_data):
self.update_data(raman_data)
super().__init__()
self.init_window()
self.define_widgets()
self.init_baseline_parameters()
self.set_option_values()
self.position_widgets()
self.connect_event_handlers()
def init_window(self):
self.setGeometry(500,500,600,900) #xPos,yPos,width, heigth
self.center() #center function is defined below
self.setWindowTitle('Spectrum preprocessing')
self.container0 = QWidget(self)
self.setCentralWidget(self.container0)
self.grid_container = QGridLayout()
self.container0.setLayout(self.grid_container)
def define_widgets(self):
self.processing_label = QLabel('<b>Spectrum processing string</b>')
self.edit_string_textedit = QTextEdit()
self.button_process_spectra = QPushButton('Apply to processed spectra')
self.button_reset_processing = QPushButton('Reset processing steps')
self.preprocessing_label = QLabel('<b><h1>Baseline options</h1></b>')
self.ALSS_label = QLabel('<b>ALSS baseline options</b>')
self.ALSS_lam_label = QLabel('lam',self.container0)
self.ALSS_lam_lineedit = QLineEdit(self.container0)
self.ALSS_p_label = QLabel('p',self.container0)
self.ALSS_p_lineedit = QLineEdit(self.container0)
self.ALSS_niter_label = QLabel('n_iter', self.container0)
self.ALSS_niter_lineedit = QLineEdit(self.container0)
self.iALSS_label = QLabel('<b>iALSS baseline options</b>')
self.iALSS_lam_label = QLabel('lam',self.container0)
self.iALSS_lam_lineedit = QLineEdit(self.container0)
self.iALSS_lam_1_label = QLabel('lam_1',self.container0)
self.iALSS_lam_1_lineedit = QLineEdit(self.container0)
self.iALSS_p_label = QLabel('p',self.container0)
self.iALSS_p_lineedit = QLineEdit(self.container0)
self.iALSS_niter_label = QLabel('n_iter', self.container0)
self.iALSS_niter_lineedit = QLineEdit(self.container0)
self.drPLS_label = QLabel('<b>drPLS baseline options</b>')
self.drPLS_lam_label = QLabel('lam',self.container0)
self.drPLS_lam_lineedit = QLineEdit(self.container0)
self.drPLS_eta_label = QLabel('eta',self.container0)
self.drPLS_eta_lineedit = QLineEdit(self.container0)
self.drPLS_niter_label = QLabel('n_iter', self.container0)
self.drPLS_niter_lineedit = QLineEdit(self.container0)
self.SNIP_label = QLabel('<b>SNIP baseline options</b>')
self.SNIP_niter_label = QLabel('n_iter',self.container0)
self.SNIP_niter_lineedit = QLineEdit(self.container0)
self.ModPoly_label = QLabel('<b>ModPoly and IModPoly baseline options</b>')
self.ModPoly_niter_label = QLabel('n_iter',self.container0)
self.ModPoly_niter_lineedit = QLineEdit(self.container0)
self.ModPoly_polyorder_label = QLabel('poly_order', self.container0)
self.ModPoly_polyorder_lineedit = QLineEdit(self.container0)
self.PPF_label = QLabel('<b>Piecewise polynomial baseline options</b>')
self.PPF_niter_label = QLabel('n_iter', self.container0)
self.PPF_niter_lineedit = QLineEdit(self.container0)
self.PPF_polyorders_label = QLabel('poly_orders', self.container0)
self.PPF_polyorders_lineedit = QLineEdit(self.container0)
self.PPF_segment_borders_label = QLabel('segment_borders', self.container0)
self.PPF_segment_borders_lineedit = QLineEdit(self.container0)
self.PPF_fit_method_label = QLabel('fit method', self.container0)
self.PPF_fit_method_lineedit = QLineEdit(self.container0)
self.sav_gol_label = QLabel('<b>Savitzky-Golay options</b>')
self.derivative_order_label = QLabel('Differentiation order', self.container0)
self.derivative_order_combo = QComboBox(self.container0)
self.derivative_order_combo.addItems(['0','1','2'])
self.savgol_points_label = QLabel('Data points for Savitzky-Golay (one side)', self.container0)
self.savgol_points_combo = QComboBox(self.container0)
self.savgol_points_combo.addItems(['0','1','2','3','4','5','6','7','8','9'])
self.median_filter_label = QLabel('<b>Median filter options</b>')
self.median_filter_points_label = QLabel('Window size',self.container0)
self.median_filter_points_combo = QComboBox(self.container0)
self.median_filter_points_combo.addItems(['3','5','7','9'])
self.pca_smoothing_label = QLabel('<b>PCA smoothing options</b>')
self.pca_smoothing_components_label = QLabel('Number of PCs',self.container0)
self.pca_smoothing_components_combo = QComboBox(self.container0)
self.pca_smoothing_components_combo.addItems(['1','2','3','4','5','6','7','8','9'])
self.clip_wn_label = QLabel('<b>Clip wavenumbers options</b>')
self.clip_wn_lower_limit_label = QLabel('Wavenumber limit string',self.container0)
self.clip_wn_lower_limit_lineedit = QLineEdit(self.container0)
self.reset_button = QPushButton('Reset defaults')
def position_widgets(self):
self.processing_layout = QVBoxLayout()
self.processing_layout.addWidget(self.edit_string_textedit)
self.processing_layout.addWidget(self.button_process_spectra)
self.processing_layout.addWidget(self.button_reset_processing)
self.ALSS_selection_layout = QHBoxLayout()
self.ALSS_selection_layout.addWidget(self.ALSS_lam_label)
self.ALSS_selection_layout.addWidget(self.ALSS_lam_lineedit)
self.ALSS_selection_layout.addWidget(self.ALSS_p_label)
self.ALSS_selection_layout.addWidget(self.ALSS_p_lineedit)
self.ALSS_selection_layout.addWidget(self.ALSS_niter_label)
self.ALSS_selection_layout.addWidget(self.ALSS_niter_lineedit)
self.iALSS_selection_layout = QHBoxLayout()
self.iALSS_selection_layout.addWidget(self.iALSS_lam_label)
self.iALSS_selection_layout.addWidget(self.iALSS_lam_lineedit)
self.iALSS_selection_layout.addWidget(self.iALSS_lam_1_label)
self.iALSS_selection_layout.addWidget(self.iALSS_lam_1_lineedit)
self.iALSS_selection_layout.addWidget(self.iALSS_p_label)
self.iALSS_selection_layout.addWidget(self.iALSS_p_lineedit)
self.iALSS_selection_layout.addWidget(self.iALSS_niter_label)
self.iALSS_selection_layout.addWidget(self.iALSS_niter_lineedit)
self.drPLS_selection_layout = QHBoxLayout()
self.drPLS_selection_layout.addWidget(self.drPLS_lam_label)
self.drPLS_selection_layout.addWidget(self.drPLS_lam_lineedit)
self.drPLS_selection_layout.addWidget(self.drPLS_eta_label)
self.drPLS_selection_layout.addWidget(self.drPLS_eta_lineedit)
self.drPLS_selection_layout.addWidget(self.drPLS_niter_label)
self.drPLS_selection_layout.addWidget(self.drPLS_niter_lineedit)
self.SNIP_selection_layout = QHBoxLayout()
self.SNIP_selection_layout.addWidget(self.SNIP_niter_label)
self.SNIP_selection_layout.addWidget(self.SNIP_niter_lineedit)
self.ModPoly_selection_layout = QHBoxLayout()
self.ModPoly_selection_layout.addWidget(self.ModPoly_niter_label)
self.ModPoly_selection_layout.addWidget(self.ModPoly_niter_lineedit)
self.ModPoly_selection_layout.addWidget(self.ModPoly_polyorder_label)
self.ModPoly_selection_layout.addWidget(self.ModPoly_polyorder_lineedit)
self.PPF_selection_layout = QHBoxLayout()
self.PPF_selection_layout.addWidget(self.PPF_niter_label)
self.PPF_selection_layout.addWidget(self.PPF_niter_lineedit)
self.PPF_selection_layout.addWidget(self.PPF_polyorders_label)
self.PPF_selection_layout.addWidget(self.PPF_polyorders_lineedit)
self.PPF_selection_layout.addWidget(self.PPF_segment_borders_label)
self.PPF_selection_layout.addWidget(self.PPF_segment_borders_lineedit)
self.PPF_selection_layout.addWidget(self.PPF_fit_method_label)
self.PPF_selection_layout.addWidget(self.PPF_fit_method_lineedit)
self.sav_gol_selection_layout = QHBoxLayout()
self.sav_gol_selection_layout.addWidget(self.savgol_points_label)
self.sav_gol_selection_layout.addWidget(self.savgol_points_combo)
self.sav_gol_selection_layout.addWidget(self.derivative_order_label)
self.sav_gol_selection_layout.addWidget(self.derivative_order_combo)
self.median_filter_layout = QHBoxLayout()
self.median_filter_layout.addWidget(self.median_filter_points_label)
self.median_filter_layout.addWidget(self.median_filter_points_combo)
self.pca_smoothing_layout = QHBoxLayout()
self.pca_smoothing_layout.addWidget(self.pca_smoothing_components_label)
self.pca_smoothing_layout.addWidget(self.pca_smoothing_components_combo)
self.clip_wn_lower_limit_selection_layout = QHBoxLayout()
self.clip_wn_lower_limit_selection_layout.addWidget(self.clip_wn_lower_limit_label)
self.clip_wn_lower_limit_selection_layout.addWidget(self.clip_wn_lower_limit_lineedit)
self.preprocessing_layout = QVBoxLayout()
self.preprocessing_layout.addWidget(self.processing_label)
self.preprocessing_layout.addLayout(self.processing_layout)
self.preprocessing_layout.addWidget(self.preprocessing_label)
self.preprocessing_layout.addWidget(self.ALSS_label)
self.preprocessing_layout.addLayout(self.ALSS_selection_layout)
self.preprocessing_layout.addWidget(self.iALSS_label)
self.preprocessing_layout.addLayout(self.iALSS_selection_layout)
self.preprocessing_layout.addWidget(self.drPLS_label)
self.preprocessing_layout.addLayout(self.drPLS_selection_layout)
self.preprocessing_layout.addWidget(self.SNIP_label)
self.preprocessing_layout.addLayout(self.SNIP_selection_layout)
self.preprocessing_layout.addWidget(self.ModPoly_label)
self.preprocessing_layout.addLayout(self.ModPoly_selection_layout)
self.preprocessing_layout.addWidget(self.PPF_label)
self.preprocessing_layout.addLayout(self.PPF_selection_layout)
self.preprocessing_layout.addWidget(self.preprocessing_label)
self.preprocessing_layout.addWidget(self.sav_gol_label)
self.preprocessing_layout.addLayout(self.sav_gol_selection_layout)
self.preprocessing_layout.addWidget(self.median_filter_label)
self.preprocessing_layout.addLayout(self.median_filter_layout)
self.preprocessing_layout.addWidget(self.pca_smoothing_label)
self.preprocessing_layout.addLayout(self.pca_smoothing_layout)
self.preprocessing_layout.addWidget(self.clip_wn_label)
self.preprocessing_layout.addLayout(self.clip_wn_lower_limit_selection_layout)
self.preprocessing_layout.addStretch(1)
self.preprocessing_layout.addWidget(self.reset_button)
self.grid_container.addLayout(self.preprocessing_layout, *(0,0),1,1)
def connect_event_handlers(self):
self.button_process_spectra.clicked.connect(self.process_spectra)
self.button_reset_processing.clicked.connect(self.raman_data.reset_processed_data)
self.ALSS_lam_lineedit.editingFinished.connect(
self.update_processing_parameters)
self.ALSS_p_lineedit.editingFinished.connect(
self.update_processing_parameters)
self.ALSS_niter_lineedit.editingFinished.connect(
self.update_processing_parameters)
self.iALSS_lam_lineedit.editingFinished.connect(
self.update_processing_parameters)
self.iALSS_lam_1_lineedit.editingFinished.connect(
self.update_processing_parameters)
self.iALSS_p_lineedit.editingFinished.connect(
self.update_processing_parameters)
self.iALSS_niter_lineedit.editingFinished.connect(
self.update_processing_parameters)
self.drPLS_lam_lineedit.editingFinished.connect(
self.update_processing_parameters)
self.drPLS_eta_lineedit.editingFinished.connect(
self.update_processing_parameters)
self.drPLS_niter_lineedit.editingFinished.connect(
self.update_processing_parameters)
self.SNIP_niter_lineedit.editingFinished.connect(
self.update_processing_parameters)
self.ModPoly_niter_lineedit.editingFinished.connect(
self.update_processing_parameters)
self.ModPoly_polyorder_lineedit.editingFinished.connect(
self.update_processing_parameters)
self.PPF_niter_lineedit.editingFinished.connect(
self.update_processing_parameters)
self.PPF_polyorders_lineedit.editingFinished.connect(
self.update_processing_parameters)
self.PPF_segment_borders_lineedit.editingFinished.connect(
self.update_processing_parameters)
self.PPF_fit_method_lineedit.editingFinished.connect(
self.update_processing_parameters)
self.savgol_points_combo.currentIndexChanged.connect(
self.update_processing_parameters)
self.median_filter_points_combo.currentIndexChanged.connect(
self.update_processing_parameters)
self.derivative_order_combo.currentIndexChanged.connect(
self.update_processing_parameters)
self.pca_smoothing_components_combo.currentIndexChanged.connect(
self.update_processing_parameters)
self.clip_wn_lower_limit_lineedit.editingFinished.connect(
self.update_processing_parameters)
self.reset_button.clicked.connect(self.reset_defaults)
def update_data(self, raman_data):
self.raman_data = raman_data
def set_option_values(self, mode='initial'):
if mode == 'default':
current_options_dict = self.edit_args_dict_default
else:
current_options_dict = self.edit_args_dict
self.ALSS_lam_lineedit.setText(
str(current_options_dict['ALSS']['lam']))
self.ALSS_p_lineedit.setText(str(current_options_dict['ALSS']['p']))
self.ALSS_niter_lineedit.setText(
str(current_options_dict['ALSS']['n_iter']))
self.iALSS_lam_lineedit.setText(
str(current_options_dict['iALSS']['lam']))
self.iALSS_lam_1_lineedit.setText(
str(current_options_dict['iALSS']['lam_1']))
self.iALSS_p_lineedit.setText(str(current_options_dict['iALSS']['p']))
self.iALSS_niter_lineedit.setText(
str(current_options_dict['iALSS']['n_iter']))
self.drPLS_lam_lineedit.setText(
str(current_options_dict['drPLS']['lam']))
self.drPLS_eta_lineedit.setText(
str(current_options_dict['drPLS']['eta']))
self.drPLS_niter_lineedit.setText(
str(current_options_dict['drPLS']['n_iter']))
self.SNIP_niter_lineedit.setText(
str(current_options_dict['SNIP']['n_iter']))
self.ModPoly_niter_lineedit.setText(
str(current_options_dict['ModPoly']['n_iter']))
self.ModPoly_polyorder_lineedit.setText(
str(current_options_dict['IModPoly']['poly_order']))
self.PPF_niter_lineedit.setText(
str(current_options_dict['PPF']['n_iter']))
self.PPF_polyorders_lineedit.setText(
str(current_options_dict['PPF']['poly_orders']))
self.PPF_segment_borders_lineedit.setText(
str(current_options_dict['PPF']['segment_borders']))
self.PPF_fit_method_lineedit.setText(
str(current_options_dict['PPF']['fit_method']))
self.derivative_order_combo.setCurrentIndex(
self.derivative_order_combo.findText(
str(current_options_dict['sav_gol']['deriv'])))
self.savgol_points_combo.setCurrentIndex(
self.savgol_points_combo.findText(
str(current_options_dict['sav_gol']['savgol_points'])))
self.median_filter_points_combo.setCurrentIndex(
self.median_filter_points_combo.findText(
str(current_options_dict['median_filter']['window'])))
self.pca_smoothing_components_combo.setCurrentIndex(
self.pca_smoothing_components_combo.findText(
str(current_options_dict['pca_smoothing']['pca_components'])))
self.clip_wn_lower_limit_lineedit.setText(
str(current_options_dict['clip_wn']['wn_limits']))
# def save_options_to_file(self):
# save_file_name,__ = QFileDialog.getSaveFileName(self,'Save option settings to file','','*.npy')
# np.save(save_file_name,self.edit_args_dict)
# def read_options_from_file(self):
# open_file_name,__ = QFileDialog.getOpenFileName(self,'Read option settings from file','','*.npy')
# self.edit_args_dict = np.load(open_file_name).item()
# self.set_option_values()
def reset_defaults(self):
self.set_option_values(mode='default')
def center(self):#centers object on screen
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
def init_baseline_parameters(self):
self.edit_args_dict = {
'SNIP': {
'n_iter': 100},
'ALSS': {
'lam': 10000, 'p': 0.001, 'n_iter': 10},
'iALSS': {
'lam': 2000, 'lam_1': 0.01, 'p': 0.01, 'n_iter': 10},
'drPLS': {
'lam': 1000000, 'eta': 0.5, 'n_iter': 100},
'ModPoly': {
'n_iter': 100, 'poly_order': 5},
'IModPoly': {
'n_iter': 100, 'poly_order': 5},
'PPF': {
'n_iter': 100, 'poly_orders': [3, 3],
'segment_borders': [1000], 'fit_method': 'ModPoly'},
'convex_hull': {},
'sav_gol': {
'deriv': 0, 'savgol_points': 9},
'median_filter': {
'window': 5},
'pca_smoothing': {
'pca_components': 3},
'SNV': {},
'clip_wn': {
'wn_limits': [[1100, 1500]]},
'mean_center': {},
'total_intensity': {}}
self.edit_args_dict_default = copy.deepcopy(self.edit_args_dict)
def update_processing_parameters(self):
self.edit_args_dict = {
'SNIP': {
'n_iter': int(self.SNIP_niter_lineedit.text())},
'ALSS': {
'lam': float(self.ALSS_lam_lineedit.text()),
'p': float(self.ALSS_p_lineedit.text()),
'n_iter': int(self.ALSS_niter_lineedit.text())},
'iALSS': {
'lam': float(self.iALSS_lam_lineedit.text()),
'lam_1': float(self.iALSS_lam_1_lineedit.text()),
'p': float(self.iALSS_p_lineedit.text()),
'n_iter': int(self.iALSS_niter_lineedit.text())},
'drPLS': {
'lam': float(self.drPLS_lam_lineedit.text()),
'eta': float(self.drPLS_eta_lineedit.text()),
'n_iter': int(self.drPLS_niter_lineedit.text())},
'ModPoly': {
'n_iter': float(self.ModPoly_niter_lineedit.text()),
'poly_order': int(self.ModPoly_polyorder_lineedit.text())},
'IModPoly': {
'n_iter': float(self.ModPoly_niter_lineedit.text()),
'poly_order': int(self.ModPoly_polyorder_lineedit.text())},
'PPF': {
'n_iter': int(self.PPF_niter_lineedit.text()),
'poly_orders': literal_eval(self.PPF_polyorders_lineedit.text()),
'segment_borders': literal_eval(self.PPF_segment_borders_lineedit.text()),
'fit_method': self.PPF_fit_method_lineedit.text()},
'convex_hull': {},
'sav_gol':{
'deriv':int(self.derivative_order_combo.currentText()),
'savgol_points':int(self.savgol_points_combo.currentText())},
'median_filter':{
'window':int(self.median_filter_points_combo.currentText())},
'pca_smoothing':{
'pca_components':int(
self.pca_smoothing_components_combo.currentText())},
'SNV':{},
'clip_wn':{
'wn_limits':literal_eval(
self.clip_wn_lower_limit_lineedit.text())},
'mean_center':{},
'total_intensity':{}}
def process_spectra(self):
edit_string = self.edit_string_textedit.toPlainText()
edit_mods = edit_string.split(',') if edit_string != '' else []
# self.raman_data.reset_processed_data()
for edit_arg in edit_mods:
if edit_arg in ['SNIP', 'ALSS', 'iALSS', 'drPLS', 'ModPoly',
'IModPoly', 'PPF', 'convex_hull']:
self.raman_data.baseline_correction(
mode=edit_arg, **self.edit_args_dict[edit_arg])
elif edit_arg == 'sav_gol':
self.raman_data.smoothing(
'sav_gol', **self.edit_args_dict[edit_arg])
elif edit_arg == 'median_filter':
self.raman_data.smoothing(
'rolling_median', **self.edit_args_dict[edit_arg])
elif edit_arg == 'pca_smoothing':
self.raman_data.smoothing(
'pca', **self.edit_args_dict[edit_arg])
elif edit_arg == 'SNV':
self.raman_data.standard_normal_variate(
**self.edit_args_dict[edit_arg])
elif edit_arg == 'clip_wn':
self.raman_data.clip_wavenumbers(
**self.edit_args_dict[edit_arg])
elif edit_arg == 'mean_center':
self.raman_data.mean_center(**self.edit_args_dict[edit_arg])
elif edit_arg == 'total_intensity':
self.raman_data.normalize('total_intensity',
**self.edit_args_dict[edit_arg])
print('Preprocessing finshed!')
|
# ========================================================================
# Copyright 2020 Emory University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
# -*- coding:utf-8 -*-
# Author: hankcs, Liyan Xu
from typing import List
from elit.components.tokenizer import EnglishTokenizer
tokenizer = EnglishTokenizer()
def eos(text: List[str]) -> List[List[str]]:
results = []
for doc in text:
tokens = tokenizer.tokenize(doc)
sents = tokenizer.segment(tokens)
results.append(['\0'.join(x) for x in sents])
return results
def tokenize(sents: List[str]) -> List[List[str]]:
# Since text from user seldom contains \0, so we use \0 to indicate a pre-tokenized sentence
return [x.split('\0') if '\0' in x else tokenizer.tokenize(x) for x in sents]
|
import torch
import torch.fx.experimental.fx_acc.acc_ops as acc_ops
from torch.testing._internal.common_fx2trt import AccTestCase
from torch.fx.experimental.fx2trt.passes.fuse_pass import (
fuse_permute_linear,
trt_transposed_linear,
)
from torch.testing._internal.common_utils import run_tests
class TestFusePermuteLinear(AccTestCase):
def test_fuse_permute_linear(self):
class TestModule(torch.nn.Module):
def __init__(self, in_features, out_features):
super().__init__()
self.linear = torch.nn.Linear(in_features, out_features)
def forward(self, x):
return self.linear(x.permute(0, 2, 1))
inputs = [torch.randn(6, 10, 20)]
a = TestModule(10, 30)
self.run_test(
TestModule(10, 30),
inputs,
{trt_transposed_linear},
apply_passes=[fuse_permute_linear],
)
def test_fuse_permute_linear_keep_permute(self):
"""
Fusion while keep permute node since permute has more than one consumers
"""
class TestModule(torch.nn.Module):
def __init__(self, in_features, out_features):
super().__init__()
self.linear = torch.nn.Linear(in_features, out_features)
def forward(self, x):
y = x.permute(0, 2, 1)
return self.linear(y), y
inputs = [torch.randn(6, 10, 20)]
a = TestModule(10, 30)
self.run_test(
TestModule(10, 30),
inputs,
{acc_ops.permute, trt_transposed_linear},
apply_passes=[fuse_permute_linear],
)
def test_multi_fuse_permute_linear(self):
"""
Fusion when permute output is shared by multiple linears
"""
class TestModule(torch.nn.Module):
def __init__(self, in_features, out_features):
super().__init__()
self.linear1 = torch.nn.Linear(in_features, out_features)
self.linear2 = torch.nn.Linear(in_features, out_features)
def forward(self, x):
y = x.permute(0, 2, 1)
return self.linear1(y) + self.linear2(y)
inputs = [torch.randn(8, 10, 20)]
a = TestModule(10, 30)
self.run_test(
TestModule(10, 30),
inputs,
{trt_transposed_linear},
apply_passes=[fuse_permute_linear],
)
if __name__ == '__main__':
run_tests()
|
# Copyright 2018 The trfl Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Ops to implement gradient clipping."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
def huber_loss(input_tensor, quadratic_linear_boundary, name=None):
"""Calculates huber loss of `input_tensor`.
For each value x in `input_tensor`, the following is calculated:
```
0.5 * x^2 if |x| <= d
0.5 * d^2 + d * (|x| - d) if |x| > d
```
where d is `quadratic_linear_boundary`.
When `input_tensor` is a loss this results in a form of gradient clipping.
This is, for instance, how gradients are clipped in DQN and its variants.
Args:
input_tensor: `Tensor`, input values to calculate the huber loss on.
quadratic_linear_boundary: `float`, the point where the huber loss function
changes from a quadratic to linear.
name: `string`, name for the operation (optional).
Returns:
`Tensor` of the same shape as `input_tensor`, containing values calculated
in the manner described above.
Raises:
ValueError: if quadratic_linear_boundary <= 0.
"""
if quadratic_linear_boundary < 0:
raise ValueError("quadratic_linear_boundary must be > 0.")
with tf.name_scope(
name, default_name="huber_loss",
values=[input_tensor, quadratic_linear_boundary]):
abs_x = tf.abs(input_tensor)
delta = quadratic_linear_boundary
quad = tf.minimum(abs_x, delta)
# The following expression is the same in value as
# tf.maximum(abs_x - delta, 0), but importantly the gradient for the
# expression when abs_x == delta is 0 (for tf.maximum it would be 1). This
# is necessary to avoid doubling the gradient, since there is already a
# non-zero contribution to the gradient from the quadratic term.
lin = (abs_x - quad)
return 0.5 * quad**2 + delta * lin
|
#!/usr/bin/env python3
# Copyright (c) 2020 Sparkbase AG
# Distributed under the MIT software license, see the accompanying
# file COPYING or https://www.opensource.org/licenses/mit-license.php.
"""
Test checking:
1) Masternodes setup/creation.
2) Proposal creation.
3) Vote creation.
4) Proposal and vote broadcast.
5) Proposal and vote sync.
"""
import time
from test_framework.messages import COutPoint
from test_framework.test_framework import SparkTier2TestFramework
from test_framework.util import (
assert_equal,
assert_true,
satoshi_round,
)
class MasternodeGovernanceBasicTest(SparkTier2TestFramework):
def check_mns_status_legacy(self, node, txhash):
status = node.getmasternodestatus()
assert_equal(status["txhash"], txhash)
assert_equal(status["message"], "Masternode successfully started")
def check_mns_status(self, node, txhash):
status = node.getmasternodestatus()
assert_equal(status["proTxHash"], txhash)
assert_equal(status["dmnstate"]["PoSePenalty"], 0)
assert_equal(status["status"], "Ready")
def check_mn_list(self, node, txHashSet):
# check masternode list from node
mnlist = node.listmasternodes()
assert_equal(len(mnlist), 3)
foundHashes = set([mn["txhash"] for mn in mnlist if mn["txhash"] in txHashSet])
assert_equal(len(foundHashes), len(txHashSet))
def check_budget_finalization_sync(self, votesCount, status):
for i in range(0, len(self.nodes)):
node = self.nodes[i]
budFin = node.mnfinalbudget("show")
assert_true(len(budFin) == 1, "MN budget finalization not synced in node" + str(i))
budget = budFin[next(iter(budFin))]
assert_equal(budget["VoteCount"], votesCount)
assert_equal(budget["Status"], status)
def broadcastbudgetfinalization(self, node, with_ping_mns=[]):
self.log.info("suggesting the budget finalization..")
assert (node.mnfinalbudgetsuggest() is not None)
self.log.info("confirming the budget finalization..")
time.sleep(1)
self.stake(4, with_ping_mns)
self.log.info("broadcasting the budget finalization..")
return node.mnfinalbudgetsuggest()
def check_proposal_existence(self, proposalName, proposalHash):
for node in self.nodes:
proposals = node.getbudgetinfo(proposalName)
assert(len(proposals) > 0)
assert_equal(proposals[0]["Hash"], proposalHash)
def check_vote_existence(self, proposalName, mnCollateralHash, voteType, voteValid):
for i in range(0, len(self.nodes)):
node = self.nodes[i]
node.syncwithvalidationinterfacequeue()
votesInfo = node.getbudgetvotes(proposalName)
assert(len(votesInfo) > 0)
found = False
for voteInfo in votesInfo:
if (voteInfo["mnId"].split("-")[0] == mnCollateralHash) :
assert_equal(voteInfo["Vote"], voteType)
assert_equal(voteInfo["fValid"], voteValid)
found = True
assert_true(found, "Error checking vote existence in node " + str(i))
def get_proposal_obj(self, Name, URL, Hash, FeeHash, BlockStart, BlockEnd,
TotalPaymentCount, RemainingPaymentCount, PaymentAddress,
Ratio, Yeas, Nays, Abstains, TotalPayment, MonthlyPayment,
IsEstablished, IsValid, Allotted, TotalBudgetAllotted, IsInvalidReason = ""):
obj = {}
obj["Name"] = Name
obj["URL"] = URL
obj["Hash"] = Hash
obj["FeeHash"] = FeeHash
obj["BlockStart"] = BlockStart
obj["BlockEnd"] = BlockEnd
obj["TotalPaymentCount"] = TotalPaymentCount
obj["RemainingPaymentCount"] = RemainingPaymentCount
obj["PaymentAddress"] = PaymentAddress
obj["Ratio"] = Ratio
obj["Yeas"] = Yeas
obj["Nays"] = Nays
obj["Abstains"] = Abstains
obj["TotalPayment"] = TotalPayment
obj["MonthlyPayment"] = MonthlyPayment
obj["IsEstablished"] = IsEstablished
obj["IsValid"] = IsValid
if IsInvalidReason != "":
obj["IsInvalidReason"] = IsInvalidReason
obj["Allotted"] = Allotted
obj["TotalBudgetAllotted"] = TotalBudgetAllotted
return obj
def check_budgetprojection(self, expected):
for i in range(self.num_nodes):
assert_equal(self.nodes[i].getbudgetprojection(), expected)
self.log.info("Budget projection valid for node %d" % i)
def run_test(self):
self.enable_mocktime()
self.setup_3_masternodes_network()
txHashSet = set([self.mnOneCollateral.hash, self.mnTwoCollateral.hash, self.proRegTx1])
# check mn list from miner
self.check_mn_list(self.miner, txHashSet)
# check status of masternodes
self.check_mns_status_legacy(self.remoteOne, self.mnOneCollateral.hash)
self.log.info("MN1 active")
self.check_mns_status_legacy(self.remoteTwo, self.mnTwoCollateral.hash)
self.log.info("MN2 active")
self.check_mns_status(self.remoteDMN1, self.proRegTx1)
self.log.info("DMN1 active")
# Prepare the proposal
self.log.info("preparing budget proposal..")
firstProposalName = "super-cool"
firstProposalLink = "https://forum.spark.org/t/test-proposal"
firstProposalCycles = 2
firstProposalAddress = self.miner.getnewaddress()
firstProposalAmountPerCycle = 300
nextSuperBlockHeight = self.miner.getnextsuperblock()
proposalFeeTxId = self.miner.preparebudget(
firstProposalName,
firstProposalLink,
firstProposalCycles,
nextSuperBlockHeight,
firstProposalAddress,
firstProposalAmountPerCycle)
# generate 3 blocks to confirm the tx (and update the mnping)
self.stake(3, [self.remoteOne, self.remoteTwo])
# activate sporks
self.activate_spork(self.minerPos, "SPORK_8_MASTERNODE_PAYMENT_ENFORCEMENT")
self.activate_spork(self.minerPos, "SPORK_9_MASTERNODE_BUDGET_ENFORCEMENT")
self.activate_spork(self.minerPos, "SPORK_13_ENABLE_SUPERBLOCKS")
txinfo = self.miner.gettransaction(proposalFeeTxId)
assert_equal(txinfo['amount'], -50.00)
self.log.info("submitting the budget proposal..")
proposalHash = self.miner.submitbudget(
firstProposalName,
firstProposalLink,
firstProposalCycles,
nextSuperBlockHeight,
firstProposalAddress,
firstProposalAmountPerCycle,
proposalFeeTxId)
# let's wait a little bit and see if all nodes are sync
time.sleep(1)
self.check_proposal_existence(firstProposalName, proposalHash)
self.log.info("proposal broadcast successful!")
# Proposal is established after 5 minutes. Mine 7 blocks
# Proposal needs to be on the chain > 5 min.
self.stake(7, [self.remoteOne, self.remoteTwo])
# now let's vote for the proposal with the first MN
self.log.info("Voting with MN1...")
voteResult = self.ownerOne.mnbudgetvote("alias", proposalHash, "yes", self.masternodeOneAlias, True)
assert_equal(voteResult["detail"][0]["result"], "success")
# check that the vote was accepted everywhere
self.stake(1, [self.remoteOne, self.remoteTwo])
self.check_vote_existence(firstProposalName, self.mnOneCollateral.hash, "YES", True)
self.log.info("all good, MN1 vote accepted everywhere!")
# now let's vote for the proposal with the second MN
self.log.info("Voting with MN2...")
voteResult = self.ownerTwo.mnbudgetvote("alias", proposalHash, "yes", self.masternodeTwoAlias, True)
assert_equal(voteResult["detail"][0]["result"], "success")
# check that the vote was accepted everywhere
self.stake(1, [self.remoteOne, self.remoteTwo])
self.check_vote_existence(firstProposalName, self.mnTwoCollateral.hash, "YES", True)
self.log.info("all good, MN2 vote accepted everywhere!")
# now let's vote for the proposal with the first DMN
self.log.info("Voting with DMN1...")
voteResult = self.ownerOne.mnbudgetvote("alias", proposalHash, "yes", self.proRegTx1)
assert_equal(voteResult["detail"][0]["result"], "success")
# check that the vote was accepted everywhere
self.stake(1, [self.remoteOne, self.remoteTwo])
self.check_vote_existence(firstProposalName, self.proRegTx1, "YES", True)
self.log.info("all good, DMN1 vote accepted everywhere!")
# Now check the budget
blockStart = nextSuperBlockHeight
blockEnd = blockStart + firstProposalCycles * 145
TotalPayment = firstProposalAmountPerCycle * firstProposalCycles
Allotted = firstProposalAmountPerCycle
RemainingPaymentCount = firstProposalCycles
expected_budget = [
self.get_proposal_obj(firstProposalName, firstProposalLink, proposalHash, proposalFeeTxId, blockStart,
blockEnd, firstProposalCycles, RemainingPaymentCount, firstProposalAddress, 1,
3, 0, 0, satoshi_round(TotalPayment), satoshi_round(firstProposalAmountPerCycle),
True, True, satoshi_round(Allotted), satoshi_round(Allotted))
]
self.check_budgetprojection(expected_budget)
# Quick block count check.
assert_equal(self.ownerOne.getblockcount(), 276)
self.log.info("starting budget finalization sync test..")
self.stake(5, [self.remoteOne, self.remoteTwo])
# assert that there is no budget finalization first.
assert_true(len(self.ownerOne.mnfinalbudget("show")) == 0)
# suggest the budget finalization and confirm the tx (+4 blocks).
budgetFinHash = self.broadcastbudgetfinalization(self.miner,
with_ping_mns=[self.remoteOne, self.remoteTwo])
assert (budgetFinHash != "")
time.sleep(1)
self.log.info("checking budget finalization sync..")
self.check_budget_finalization_sync(0, "OK")
self.log.info("budget finalization synced!, now voting for the budget finalization..")
voteResult = self.ownerOne.mnfinalbudget("vote-many", budgetFinHash, True)
assert_equal(voteResult["detail"][0]["result"], "success")
self.log.info("Remote One voted successfully.")
voteResult = self.ownerTwo.mnfinalbudget("vote-many", budgetFinHash, True)
assert_equal(voteResult["detail"][0]["result"], "success")
self.log.info("Remote Two voted successfully.")
voteResult = self.remoteDMN1.mnfinalbudget("vote", budgetFinHash)
assert_equal(voteResult["detail"][0]["result"], "success")
self.log.info("DMN voted successfully.")
self.stake(2, [self.remoteOne, self.remoteTwo])
self.log.info("checking finalization votes..")
self.check_budget_finalization_sync(3, "OK")
self.stake(8, [self.remoteOne, self.remoteTwo])
addrInfo = self.miner.listreceivedbyaddress(0, False, False, firstProposalAddress)
assert_equal(addrInfo[0]["amount"], firstProposalAmountPerCycle)
self.log.info("budget proposal paid!, all good")
# Check that the proposal info returns updated payment count
expected_budget[0]["RemainingPaymentCount"] -= 1
self.check_budgetprojection(expected_budget)
self.stake(1, [self.remoteOne, self.remoteTwo])
# now let's verify that votes expire properly.
# Drop one MN and one DMN
self.log.info("expiring MN1..")
self.spend_collateral(self.ownerOne, self.mnOneCollateral, self.miner)
self.wait_until_mn_vinspent(self.mnOneCollateral.hash, 30, [self.remoteTwo])
self.stake(15, [self.remoteTwo]) # create blocks to remove staled votes
time.sleep(2) # wait a little bit
self.check_vote_existence(firstProposalName, self.mnOneCollateral.hash, "YES", False)
self.log.info("MN1 vote expired after collateral spend, all good")
self.log.info("expiring DMN1..")
lm = self.ownerOne.listmasternodes(self.proRegTx1)[0]
self.spend_collateral(self.ownerOne, COutPoint(lm["collateralHash"], lm["collateralIndex"]), self.miner)
self.wait_until_mn_vinspent(self.proRegTx1, 30, [self.remoteTwo])
self.stake(15, [self.remoteTwo]) # create blocks to remove staled votes
time.sleep(2) # wait a little bit
self.check_vote_existence(firstProposalName, self.proRegTx1, "YES", False)
self.log.info("DMN vote expired after collateral spend, all good")
if __name__ == '__main__':
MasternodeGovernanceBasicTest().main()
|
from dataclasses import dataclass
from fibo.types.blockchain_format.coin import Coin
from fibo.types.blockchain_format.sized_bytes import bytes32
from fibo.util.ints import uint32
from fibo.wallet.util.wallet_types import WalletType
@dataclass(frozen=True)
class WalletCoinRecord:
"""
These are values that correspond to a CoinName that are used
in keeping track of the unspent database.
"""
coin: Coin
confirmed_block_height: uint32
spent_block_height: uint32
spent: bool
coinbase: bool
wallet_type: WalletType
wallet_id: int
def name(self) -> bytes32:
return self.coin.name()
|
#!/usr/bin/env python
import sklearn as sk
import numpy as np
np.random.seed(1337)
import et_cleartk_io as ctk_io
import nn_models
import sys
import os.path
import dataset_hybrid
import keras as k
from keras.utils.np_utils import to_categorical
from keras.optimizers import RMSprop
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Merge
from keras.layers.core import Dense, Activation
from keras.layers import LSTM
from keras.layers.embeddings import Embedding
import pickle
def main(args):
if len(args) < 1:
sys.stderr.write("Error - one required argument: <data directory>\n")
sys.exit(-1)
working_dir = args[0]
data_file = os.path.join(working_dir, 'training-data.liblinear')
# learn alphabet from training data
provider = dataset_hybrid.DatasetProvider(data_file)
# now load training examples and labels
train_x1, train_x2, train_y = provider.load(data_file)
# turn x and y into numpy array among other things
maxlen = max([len(seq) for seq in train_x1])
classes = len(set(train_y))
train_x1 = pad_sequences(train_x1, maxlen=maxlen)
train_x2 = pad_sequences(train_x2, maxlen=maxlen)
train_y = to_categorical(np.array(train_y), classes)
pickle.dump(maxlen, open(os.path.join(working_dir, 'maxlen.p'),"wb"))
pickle.dump(provider.word2int, open(os.path.join(working_dir, 'word2int.p'),"wb"))
pickle.dump(provider.tag2int, open(os.path.join(working_dir, 'tag2int.p'),"wb"))
pickle.dump(provider.label2int, open(os.path.join(working_dir, 'label2int.p'),"wb"))
print 'train_x1 shape:', train_x1.shape
print 'train_x2 shape:', train_x2.shape
print 'train_y shape:', train_y.shape
branches = [] # models to be merged
train_xs = [] # train x for each branch
branch1 = Sequential()
branch1.add(Embedding(len(provider.word2int),
300,
input_length=maxlen,
dropout=0.25))
branch1.add(LSTM(128,
dropout_W = 0.20,
dropout_U = 0.20))
branches.append(branch1)
train_xs.append(train_x1)
branch2 = Sequential()
branch2.add(Embedding(len(provider.tag2int),
300,
input_length=maxlen,
dropout=0.25))
branch2.add(LSTM(128,
dropout_W = 0.20,
dropout_U = 0.20))
branches.append(branch2)
train_xs.append(train_x2)
model = Sequential()
model.add(Merge(branches, mode='concat'))
model.add(Dense(classes))
model.add(Activation('softmax'))
optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08)
model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
model.fit(train_xs,
train_y,
nb_epoch=25,
batch_size=50,
verbose=0,
validation_split=0.1)
json_string = model.to_json()
open(os.path.join(working_dir, 'model_0.json'), 'w').write(json_string)
model.save_weights(os.path.join(working_dir, 'model_0.h5'), overwrite=True)
sys.exit(0)
if __name__ == "__main__":
main(sys.argv[1:])
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['BoxCox'] , ['MovingAverage'] , ['Seasonal_Hour'] , ['SVR'] );
|
# Vanishing Journey Damage Skin
success = sm.addDamageSkin(2435972)
if success:
sm.chat("The Vanishing Journey Damage Skin has been added to your account's damage skin collection.")
# sm.consumeItem(2435972)
|
from .serving import run_simple as run_simple
from .test import Client as Client
from .wrappers import Request as Request
from .wrappers import Response as Response
__version__ = "2.0.3"
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/Narrative) on 2019-05-07.
# 2019, SMART Health IT.
from . import element
class Narrative(element.Element):
""" Human-readable summary of the resource (essential clinical and business
information).
A human-readable summary of the resource conveying the essential clinical
and business information for the resource.
"""
resource_type = "Narrative"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.div = None
""" Limited xhtml content.
Type `str`. """
self.status = None
""" generated | extensions | additional | empty.
Type `str`. """
super(Narrative, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(Narrative, self).elementProperties()
js.extend([
("div", "div", str, False, None, True),
("status", "status", str, False, None, True),
])
return js
|
# python3 Steven Otsu binary segmentation
import cv2
from ImageBase import loadImg, plotImg, grayImg, binaryImage
# from mainImageHist import plotImagAndHist, plotImgHist
import matplotlib.pyplot as plt
import numpy as np
def calculateOtsu(img):
hist = cv2.calcHist([img], [0], None, [256], [0, 256])
hist_norm = hist.ravel() / hist.max()
Q = hist_norm.cumsum()
bins = np.arange(256)
fn_min = np.inf
thresh = -1
fns = []
for i in range(1, 256):
p1, p2 = np.hsplit(hist_norm, [i]) # probabilities
q1, q2 = Q[i], Q[255] - Q[i] # cum sum of classes
b1, b2 = np.hsplit(bins, [i]) # weights
# finding means and variances
m1, m2 = np.sum(p1 * b1) / q1, np.sum(p2 * b2) / q2
v1, v2 = np.sum(((b1 - m1)**2) * p1) / q1, np.sum(((b2 - m2)**2) * p2) / q2
# calculates the minimization function
fn = v1 * q1 + v2 * q2
fns.append(fn)
if fn < fn_min:
fn_min = fn
thresh = i
return hist, fns, thresh
def plotHistAndOtsu(hist, otsu, thre):
ax = plt.subplot(1, 1, 1)
ax.set_title('Otsu')
ax.plot(range(len(hist)), hist, label='hist')
ax.plot(range(len(otsu)), otsu, label='otsu')
# print('hist=',hist)
# print('otsu=',otsu)
hist = hist[~np.isnan(hist)]
otsu = np.array(otsu)
otsu = otsu[~np.isnan(otsu)]
# print('max1=',np.max(hist))
# print('max2=',np.max(otsu))
# print('max=',max(np.max(hist),np.max(otsu)))
ax.vlines(thre, ymin=0, ymax=max(np.max(hist), np.max(otsu)), linestyles='dashdot', color='r', label='optimal thresh')
ax.set_xlabel('pixsel')
ax.set_ylabel('Hist&Otsu')
ax.legend()
plt.show()
def main():
img = loadImg(r'.\res\cap58.jpg') # otsus_algorithm.jpg Lenna.png
# plotImagAndHist(img)
print(np.arange(1, 10))
# plotImgHist(img)
# plt.show()
gray = grayImg(img)
hist, fns, thres = calculateOtsu(gray)
plotHistAndOtsu(hist, fns, thres)
print('Otsu thres=', thres)
plotImg(binaryImage(gray, thres), gray=True)
if __name__ == "__main__":
main()
|
"""This module contains the general information for AdaptorEthRdmaProfile ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class AdaptorEthRdmaProfileConsts:
pass
class AdaptorEthRdmaProfile(ManagedObject):
"""This is AdaptorEthRdmaProfile class."""
consts = AdaptorEthRdmaProfileConsts()
naming_props = set([])
mo_meta = {
"classic": MoMeta("AdaptorEthRdmaProfile", "adaptorEthRdmaProfile", "rdmaprofile", VersionMeta.Version2013e, "InputOutput", 0x7f, [], ["admin", "read-only", "user"], [u'adaptorHostEthIf'], [], ["Get", "Set"]),
"modular": MoMeta("AdaptorEthRdmaProfile", "adaptorEthRdmaProfile", "rdmaprofile", VersionMeta.Version2013e, "InputOutput", 0x7f, [], ["admin", "read-only", "user"], [u'adaptorHostEthIf'], [], ["Get", "Set"])
}
prop_meta = {
"classic": {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version2013e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x2, 0, 255, None, [], []),
"memory_regions": MoPropertyMeta("memory_regions", "memoryRegions", "uint", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x4, None, None, None, [], ["0-524288"]),
"queue_pairs": MoPropertyMeta("queue_pairs", "queuePairs", "uint", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, [], ["0-8192"]),
"resource_groups": MoPropertyMeta("resource_groups", "resourceGroups", "uint", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, [], ["0-128"]),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x20, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
},
"modular": {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version2013e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x2, 0, 255, None, [], []),
"memory_regions": MoPropertyMeta("memory_regions", "memoryRegions", "uint", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x4, None, None, None, [], ["0-524288"]),
"queue_pairs": MoPropertyMeta("queue_pairs", "queuePairs", "uint", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, [], ["0-8192"]),
"resource_groups": MoPropertyMeta("resource_groups", "resourceGroups", "uint", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, [], ["0-128"]),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x20, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
},
}
prop_map = {
"classic": {
"childAction": "child_action",
"dn": "dn",
"memoryRegions": "memory_regions",
"queuePairs": "queue_pairs",
"resourceGroups": "resource_groups",
"rn": "rn",
"status": "status",
},
"modular": {
"childAction": "child_action",
"dn": "dn",
"memoryRegions": "memory_regions",
"queuePairs": "queue_pairs",
"resourceGroups": "resource_groups",
"rn": "rn",
"status": "status",
},
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.memory_regions = None
self.queue_pairs = None
self.resource_groups = None
self.status = None
ManagedObject.__init__(self, "AdaptorEthRdmaProfile", parent_mo_or_dn, **kwargs)
|
from plotly.basedatatypes import BaseTraceType
import copy
class Sankey(BaseTraceType):
# arrangement
# -----------
@property
def arrangement(self):
"""
If value is `snap` (the default), the node arrangement is
assisted by automatic snapping of elements to preserve space
between nodes specified via `nodepad`. If value is
`perpendicular`, the nodes can only move along a line
perpendicular to the flow. If value is `freeform`, the nodes
can freely move on the plane. If value is `fixed`, the nodes
are stationary.
The 'arrangement' property is an enumeration that may be specified as:
- One of the following enumeration values:
['snap', 'perpendicular', 'freeform', 'fixed']
Returns
-------
Any
"""
return self['arrangement']
@arrangement.setter
def arrangement(self, val):
self['arrangement'] = val
# customdata
# ----------
@property
def customdata(self):
"""
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note that,
"scatter" traces also appends customdata items in the markers
DOM elements
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self['customdata']
@customdata.setter
def customdata(self, val):
self['customdata'] = val
# customdatasrc
# -------------
@property
def customdatasrc(self):
"""
Sets the source reference on plot.ly for customdata .
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['customdatasrc']
@customdatasrc.setter
def customdatasrc(self, val):
self['customdatasrc'] = val
# domain
# ------
@property
def domain(self):
"""
The 'domain' property is an instance of Domain
that may be specified as:
- An instance of plotly.graph_objs.sankey.Domain
- A dict of string/value properties that will be passed
to the Domain constructor
Supported dict properties:
column
If there is a layout grid, use the domain for
this column in the grid for this sankey trace .
row
If there is a layout grid, use the domain for
this row in the grid for this sankey trace .
x
Sets the horizontal domain of this sankey trace
(in plot fraction).
y
Sets the vertical domain of this sankey trace
(in plot fraction).
Returns
-------
plotly.graph_objs.sankey.Domain
"""
return self['domain']
@domain.setter
def domain(self, val):
self['domain'] = val
# hoverinfo
# ---------
@property
def hoverinfo(self):
"""
Determines which trace information appear on hover. If `none`
or `skip` are set, no information is displayed upon hovering.
But, if `none` is set, click and hover events are still fired.
Note that this attribute is superseded by `node.hoverinfo` and
`node.hoverinfo` for nodes and links respectively.
The 'hoverinfo' property is a flaglist and may be specified
as a string containing:
- Any combination of ['x', 'y', 'z', 'text', 'name'] joined with '+' characters
(e.g. 'x+y')
OR exactly one of ['all', 'none', 'skip'] (e.g. 'skip')
Returns
-------
Any
"""
return self['hoverinfo']
@hoverinfo.setter
def hoverinfo(self, val):
self['hoverinfo'] = val
# hoverlabel
# ----------
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of plotly.graph_objs.sankey.Hoverlabel
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Supported dict properties:
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on plot.ly for
bgcolor .
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on plot.ly for
bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the length (in number of characters) of
the trace name in the hover labels for this
trace. -1 shows the whole name regardless of
length. 0-3 shows the first 0-3 characters, and
an integer >3 will show the whole name if it is
less than that many characters, but if it is
longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on plot.ly for
namelength .
Returns
-------
plotly.graph_objs.sankey.Hoverlabel
"""
return self['hoverlabel']
@hoverlabel.setter
def hoverlabel(self, val):
self['hoverlabel'] = val
# ids
# ---
@property
def ids(self):
"""
Assigns id labels to each datum. These ids for object constancy
of data points during animation. Should be an array of strings,
not numbers or any other type.
The 'ids' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self['ids']
@ids.setter
def ids(self, val):
self['ids'] = val
# idssrc
# ------
@property
def idssrc(self):
"""
Sets the source reference on plot.ly for ids .
The 'idssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['idssrc']
@idssrc.setter
def idssrc(self, val):
self['idssrc'] = val
# legendgroup
# -----------
@property
def legendgroup(self):
"""
Sets the legend group for this trace. Traces part of the same
legend group hide/show at the same time when toggling legend
items.
The 'legendgroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self['legendgroup']
@legendgroup.setter
def legendgroup(self, val):
self['legendgroup'] = val
# link
# ----
@property
def link(self):
"""
The links of the Sankey plot.
The 'link' property is an instance of Link
that may be specified as:
- An instance of plotly.graph_objs.sankey.Link
- A dict of string/value properties that will be passed
to the Link constructor
Supported dict properties:
color
Sets the `link` color. It can be a single
value, or an array for specifying color for
each `link`. If `link.color` is omitted, then
by default, a translucent grey link will be
used.
colorsrc
Sets the source reference on plot.ly for color
.
hoverinfo
Determines which trace information appear when
hovering links. If `none` or `skip` are set, no
information is displayed upon hovering. But, if
`none` is set, click and hover events are still
fired.
hoverlabel
plotly.graph_objs.sankey.link.Hoverlabel
instance or dict with compatible properties
label
The shown name of the link.
labelsrc
Sets the source reference on plot.ly for label
.
line
plotly.graph_objs.sankey.link.Line instance or
dict with compatible properties
source
An integer number `[0..nodes.length - 1]` that
represents the source node.
sourcesrc
Sets the source reference on plot.ly for
source .
target
An integer number `[0..nodes.length - 1]` that
represents the target node.
targetsrc
Sets the source reference on plot.ly for
target .
value
A numeric value representing the flow volume
value.
valuesrc
Sets the source reference on plot.ly for value
.
Returns
-------
plotly.graph_objs.sankey.Link
"""
return self['link']
@link.setter
def link(self, val):
self['link'] = val
# name
# ----
@property
def name(self):
"""
Sets the trace name. The trace name appear as the legend item
and on hover.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self['name']
@name.setter
def name(self, val):
self['name'] = val
# node
# ----
@property
def node(self):
"""
The nodes of the Sankey plot.
The 'node' property is an instance of Node
that may be specified as:
- An instance of plotly.graph_objs.sankey.Node
- A dict of string/value properties that will be passed
to the Node constructor
Supported dict properties:
color
Sets the `node` color. It can be a single
value, or an array for specifying color for
each `node`. If `node.color` is omitted, then
the default `Plotly` color palette will be
cycled through to have a variety of colors.
These defaults are not fully opaque, to allow
some visibility of what is beneath the node.
colorsrc
Sets the source reference on plot.ly for color
.
hoverinfo
Determines which trace information appear when
hovering nodes. If `none` or `skip` are set, no
information is displayed upon hovering. But, if
`none` is set, click and hover events are still
fired.
hoverlabel
plotly.graph_objs.sankey.node.Hoverlabel
instance or dict with compatible properties
label
The shown name of the node.
labelsrc
Sets the source reference on plot.ly for label
.
line
plotly.graph_objs.sankey.node.Line instance or
dict with compatible properties
pad
Sets the padding (in px) between the `nodes`.
thickness
Sets the thickness (in px) of the `nodes`.
Returns
-------
plotly.graph_objs.sankey.Node
"""
return self['node']
@node.setter
def node(self, val):
self['node'] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the opacity of the trace.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self['opacity']
@opacity.setter
def opacity(self, val):
self['opacity'] = val
# orientation
# -----------
@property
def orientation(self):
"""
Sets the orientation of the Sankey diagram.
The 'orientation' property is an enumeration that may be specified as:
- One of the following enumeration values:
['v', 'h']
Returns
-------
Any
"""
return self['orientation']
@orientation.setter
def orientation(self, val):
self['orientation'] = val
# selectedpoints
# --------------
@property
def selectedpoints(self):
"""
Array containing integer indices of selected points. Has an
effect only for traces that support selections. Note that an
empty array means an empty selection where the `unselected` are
turned on for all points, whereas, any other non-array values
means no selection all where the `selected` and `unselected`
styles have no effect.
The 'selectedpoints' property accepts values of any type
Returns
-------
Any
"""
return self['selectedpoints']
@selectedpoints.setter
def selectedpoints(self, val):
self['selectedpoints'] = val
# showlegend
# ----------
@property
def showlegend(self):
"""
Determines whether or not an item corresponding to this trace
is shown in the legend.
The 'showlegend' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['showlegend']
@showlegend.setter
def showlegend(self, val):
self['showlegend'] = val
# stream
# ------
@property
def stream(self):
"""
The 'stream' property is an instance of Stream
that may be specified as:
- An instance of plotly.graph_objs.sankey.Stream
- A dict of string/value properties that will be passed
to the Stream constructor
Supported dict properties:
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to 50, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See
https://plot.ly/settings for more details.
Returns
-------
plotly.graph_objs.sankey.Stream
"""
return self['stream']
@stream.setter
def stream(self, val):
self['stream'] = val
# textfont
# --------
@property
def textfont(self):
"""
Sets the font for node labels
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of plotly.graph_objs.sankey.Textfont
- A dict of string/value properties that will be passed
to the Textfont constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.sankey.Textfont
"""
return self['textfont']
@textfont.setter
def textfont(self, val):
self['textfont'] = val
# uid
# ---
@property
def uid(self):
"""
The 'uid' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self['uid']
@uid.setter
def uid(self, val):
self['uid'] = val
# valueformat
# -----------
@property
def valueformat(self):
"""
Sets the value formatting rule using d3 formatting mini-
language which is similar to those of Python. See https://githu
b.com/d3/d3-format/blob/master/README.md#locale_format
The 'valueformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self['valueformat']
@valueformat.setter
def valueformat(self, val):
self['valueformat'] = val
# valuesuffix
# -----------
@property
def valuesuffix(self):
"""
Adds a unit to follow the value in the hover tooltip. Add a
space if a separation is necessary from the value.
The 'valuesuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self['valuesuffix']
@valuesuffix.setter
def valuesuffix(self, val):
self['valuesuffix'] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self['visible']
@visible.setter
def visible(self, val):
self['visible'] = val
# type
# ----
@property
def type(self):
return self._props['type']
# property parent name
# --------------------
@property
def _parent_path_str(self):
return ''
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
arrangement
If value is `snap` (the default), the node arrangement
is assisted by automatic snapping of elements to
preserve space between nodes specified via `nodepad`.
If value is `perpendicular`, the nodes can only move
along a line perpendicular to the flow. If value is
`freeform`, the nodes can freely move on the plane. If
value is `fixed`, the nodes are stationary.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on plot.ly for customdata .
domain
plotly.graph_objs.sankey.Domain instance or dict with
compatible properties
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired. Note that this attribute is
superseded by `node.hoverinfo` and `node.hoverinfo` for
nodes and links respectively.
hoverlabel
plotly.graph_objs.sankey.Hoverlabel instance or dict
with compatible properties
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on plot.ly for ids .
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
link
The links of the Sankey plot.
name
Sets the trace name. The trace name appear as the
legend item and on hover.
node
The nodes of the Sankey plot.
opacity
Sets the opacity of the trace.
orientation
Sets the orientation of the Sankey diagram.
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
stream
plotly.graph_objs.sankey.Stream instance or dict with
compatible properties
textfont
Sets the font for node labels
uid
valueformat
Sets the value formatting rule using d3 formatting
mini-language which is similar to those of Python. See
https://github.com/d3/d3-format/blob/master/README.md#l
ocale_format
valuesuffix
Adds a unit to follow the value in the hover tooltip.
Add a space if a separation is necessary from the
value.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
"""
def __init__(
self,
arg=None,
arrangement=None,
customdata=None,
customdatasrc=None,
domain=None,
hoverinfo=None,
hoverlabel=None,
ids=None,
idssrc=None,
legendgroup=None,
link=None,
name=None,
node=None,
opacity=None,
orientation=None,
selectedpoints=None,
showlegend=None,
stream=None,
textfont=None,
uid=None,
valueformat=None,
valuesuffix=None,
visible=None,
**kwargs
):
"""
Construct a new Sankey object
Sankey plots for network flow data analysis. The nodes are
specified in `nodes` and the links between sources and targets
in `links`. The colors are set in `nodes[i].color` and
`links[i].color`; otherwise defaults are used.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.Sankey
arrangement
If value is `snap` (the default), the node arrangement
is assisted by automatic snapping of elements to
preserve space between nodes specified via `nodepad`.
If value is `perpendicular`, the nodes can only move
along a line perpendicular to the flow. If value is
`freeform`, the nodes can freely move on the plane. If
value is `fixed`, the nodes are stationary.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on plot.ly for customdata .
domain
plotly.graph_objs.sankey.Domain instance or dict with
compatible properties
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired. Note that this attribute is
superseded by `node.hoverinfo` and `node.hoverinfo` for
nodes and links respectively.
hoverlabel
plotly.graph_objs.sankey.Hoverlabel instance or dict
with compatible properties
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on plot.ly for ids .
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
link
The links of the Sankey plot.
name
Sets the trace name. The trace name appear as the
legend item and on hover.
node
The nodes of the Sankey plot.
opacity
Sets the opacity of the trace.
orientation
Sets the orientation of the Sankey diagram.
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
stream
plotly.graph_objs.sankey.Stream instance or dict with
compatible properties
textfont
Sets the font for node labels
uid
valueformat
Sets the value formatting rule using d3 formatting
mini-language which is similar to those of Python. See
https://github.com/d3/d3-format/blob/master/README.md#l
ocale_format
valuesuffix
Adds a unit to follow the value in the hover tooltip.
Add a space if a separation is necessary from the
value.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
Returns
-------
Sankey
"""
super(Sankey, self).__init__('sankey')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.Sankey
constructor must be a dict or
an instance of plotly.graph_objs.Sankey"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators import (sankey as v_sankey)
# Initialize validators
# ---------------------
self._validators['arrangement'] = v_sankey.ArrangementValidator()
self._validators['customdata'] = v_sankey.CustomdataValidator()
self._validators['customdatasrc'] = v_sankey.CustomdatasrcValidator()
self._validators['domain'] = v_sankey.DomainValidator()
self._validators['hoverinfo'] = v_sankey.HoverinfoValidator()
self._validators['hoverlabel'] = v_sankey.HoverlabelValidator()
self._validators['ids'] = v_sankey.IdsValidator()
self._validators['idssrc'] = v_sankey.IdssrcValidator()
self._validators['legendgroup'] = v_sankey.LegendgroupValidator()
self._validators['link'] = v_sankey.LinkValidator()
self._validators['name'] = v_sankey.NameValidator()
self._validators['node'] = v_sankey.NodeValidator()
self._validators['opacity'] = v_sankey.OpacityValidator()
self._validators['orientation'] = v_sankey.OrientationValidator()
self._validators['selectedpoints'] = v_sankey.SelectedpointsValidator()
self._validators['showlegend'] = v_sankey.ShowlegendValidator()
self._validators['stream'] = v_sankey.StreamValidator()
self._validators['textfont'] = v_sankey.TextfontValidator()
self._validators['uid'] = v_sankey.UidValidator()
self._validators['valueformat'] = v_sankey.ValueformatValidator()
self._validators['valuesuffix'] = v_sankey.ValuesuffixValidator()
self._validators['visible'] = v_sankey.VisibleValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('arrangement', None)
self['arrangement'] = arrangement if arrangement is not None else _v
_v = arg.pop('customdata', None)
self['customdata'] = customdata if customdata is not None else _v
_v = arg.pop('customdatasrc', None)
self['customdatasrc'
] = customdatasrc if customdatasrc is not None else _v
_v = arg.pop('domain', None)
self['domain'] = domain if domain is not None else _v
_v = arg.pop('hoverinfo', None)
self['hoverinfo'] = hoverinfo if hoverinfo is not None else _v
_v = arg.pop('hoverlabel', None)
self['hoverlabel'] = hoverlabel if hoverlabel is not None else _v
_v = arg.pop('ids', None)
self['ids'] = ids if ids is not None else _v
_v = arg.pop('idssrc', None)
self['idssrc'] = idssrc if idssrc is not None else _v
_v = arg.pop('legendgroup', None)
self['legendgroup'] = legendgroup if legendgroup is not None else _v
_v = arg.pop('link', None)
self['link'] = link if link is not None else _v
_v = arg.pop('name', None)
self['name'] = name if name is not None else _v
_v = arg.pop('node', None)
self['node'] = node if node is not None else _v
_v = arg.pop('opacity', None)
self['opacity'] = opacity if opacity is not None else _v
_v = arg.pop('orientation', None)
self['orientation'] = orientation if orientation is not None else _v
_v = arg.pop('selectedpoints', None)
self['selectedpoints'
] = selectedpoints if selectedpoints is not None else _v
_v = arg.pop('showlegend', None)
self['showlegend'] = showlegend if showlegend is not None else _v
_v = arg.pop('stream', None)
self['stream'] = stream if stream is not None else _v
_v = arg.pop('textfont', None)
self['textfont'] = textfont if textfont is not None else _v
_v = arg.pop('uid', None)
self['uid'] = uid if uid is not None else _v
_v = arg.pop('valueformat', None)
self['valueformat'] = valueformat if valueformat is not None else _v
_v = arg.pop('valuesuffix', None)
self['valuesuffix'] = valuesuffix if valuesuffix is not None else _v
_v = arg.pop('visible', None)
self['visible'] = visible if visible is not None else _v
# Read-only literals
# ------------------
from _plotly_utils.basevalidators import LiteralValidator
self._props['type'] = 'sankey'
self._validators['type'] = LiteralValidator(
plotly_name='type', parent_name='sankey', val='sankey'
)
arg.pop('type', None)
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
import unittest
from retropq.zero_prefix_bst import ZeroPrefixBST
class ZeroPrefixBSTTest(unittest.TestCase):
def test(self):
bst = ZeroPrefixBST()
bst[6] = -1
bst[3] = 0
bst[0] = 1
self.assertEqual(0, bst.zero_prefix_before(5))
self.assertEqual(6, bst.zero_prefix_after(5))
self.assertEqual(7, bst.zero_prefix_before(7))
self.assertEqual(7, bst.zero_prefix_after(7))
self.assertEqual(0, bst.zero_prefix_before(6))
self.assertEqual(6, bst.zero_prefix_after(6))
self.assertEqual(-20, bst.zero_prefix_before(-20))
self.assertEqual(-20, bst.zero_prefix_after(-20))
|
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The OFIChain Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the invalidateblock RPC."""
from test_framework.test_framework import OFIChainTestFramework
from test_framework.util import *
class InvalidateTest(OFIChainTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
def setup_network(self):
self.setup_nodes()
def run_test(self):
self.log.info("Make sure we repopulate setBlockIndexCandidates after InvalidateBlock:")
self.log.info("Mine 4 blocks on Node 0")
self.nodes[0].generate(4)
assert(self.nodes[0].getblockcount() == 4)
besthash = self.nodes[0].getbestblockhash()
self.log.info("Mine competing 6 blocks on Node 1")
self.nodes[1].generate(6)
assert(self.nodes[1].getblockcount() == 6)
self.log.info("Connect nodes to force a reorg")
connect_nodes_bi(self.nodes,0,1)
sync_blocks(self.nodes[0:2])
assert(self.nodes[0].getblockcount() == 6)
badhash = self.nodes[1].getblockhash(2)
self.log.info("Invalidate block 2 on node 0 and verify we reorg to node 0's original chain")
self.nodes[0].invalidateblock(badhash)
newheight = self.nodes[0].getblockcount()
newhash = self.nodes[0].getbestblockhash()
if (newheight != 4 or newhash != besthash):
raise AssertionError("Wrong tip for node0, hash %s, height %d"%(newhash,newheight))
self.log.info("Make sure we won't reorg to a lower work chain:")
connect_nodes_bi(self.nodes,1,2)
self.log.info("Sync node 2 to node 1 so both have 6 blocks")
sync_blocks(self.nodes[1:3])
assert(self.nodes[2].getblockcount() == 6)
self.log.info("Invalidate block 5 on node 1 so its tip is now at 4")
self.nodes[1].invalidateblock(self.nodes[1].getblockhash(5))
assert(self.nodes[1].getblockcount() == 4)
self.log.info("Invalidate block 3 on node 2, so its tip is now 2")
self.nodes[2].invalidateblock(self.nodes[2].getblockhash(3))
assert(self.nodes[2].getblockcount() == 2)
self.log.info("..and then mine a block")
self.nodes[2].generate(1)
self.log.info("Verify all nodes are at the right height")
time.sleep(5)
assert_equal(self.nodes[2].getblockcount(), 3)
assert_equal(self.nodes[0].getblockcount(), 4)
node1height = self.nodes[1].getblockcount()
if node1height < 4:
raise AssertionError("Node 1 reorged to a lower height: %d"%node1height)
if __name__ == '__main__':
InvalidateTest().main()
|
#!c:\users\batle\pycharmprojects\rentomatic\venv\scripts\python.exe
# $Id: rst2odt.py 5839 2009-01-07 19:09:28Z dkuhlman $
# Author: Dave Kuhlman <dkuhlman@rexx.com>
# Copyright: This module has been placed in the public domain.
"""
A front end to the Docutils Publisher, producing OpenOffice documents.
"""
import sys
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline_to_binary, default_description
from docutils.writers.odf_odt import Writer, Reader
description = ('Generates OpenDocument/OpenOffice/ODF documents from '
'standalone reStructuredText sources. ' + default_description)
writer = Writer()
reader = Reader()
output = publish_cmdline_to_binary(reader=reader, writer=writer,
description=description)
|
from __future__ import absolute_import
from .element import set_modsym_print_mode
from .modsym import ModularSymbols, ModularSymbols_clear_cache
from .heilbronn import HeilbronnCremona, HeilbronnMerel
from .p1list import P1List, lift_to_sl2z
from .p1list_nf import P1NFList, MSymbol
from .ghlist import GHlist
from .g1list import G1list
|
from unittest import TestCase
from unittest.mock import (
Mock,
patch,
)
from pypika import Field
from fireant.database import Database
from fireant.middleware.decorators import connection_middleware
@connection_middleware
def test_fetch(database, query, **kwargs):
return kwargs.get('connection')
def test_connect():
mock_connection = Mock()
mock_connection.__enter__ = Mock()
mock_connection.__exit__ = Mock()
return mock_connection
class TestBaseDatabase(TestCase):
def test_database_api(self):
db = Database()
with self.assertRaises(NotImplementedError):
db.connect()
with self.assertRaises(NotImplementedError):
db.trunc_date(Field('abc'), 'day')
def test_to_char(self):
db = Database()
to_char = db.to_char(Field('field'))
self.assertEqual(str(to_char), 'CAST("field" AS VARCHAR)')
def test_no_custom_middlewares_specified_still_gives_connection_middleware(self):
db = Database()
self.assertEqual(1, len(db.middlewares))
self.assertIs(db.middlewares[0], connection_middleware)
@patch.object(Database, 'fetch')
@patch.object(Database, 'connect')
def test_database_reuse_passed_connection(self, mock_connect, mock_fetch):
db = Database()
mock_connect.side_effect = test_connect
mock_fetch.side_effect = test_fetch
with db.connect() as connection:
connection_1 = db.fetch(db, 'SELECT a from abc', connection=connection)
connection_2 = db.fetch(db, 'SELECT b from def', connection=connection)
self.assertEqual(1, mock_connect.call_count)
self.assertEqual(connection_1, connection_2)
@patch.object(Database, 'fetch')
@patch.object(Database, 'connect')
def test_database_opens_new_connection(self, mock_connect, mock_fetch):
db = Database()
mock_connect.side_effect = test_connect
mock_fetch.side_effect = test_fetch
connection_1 = db.fetch(db, 'SELECT a from abc')
connection_2 = db.fetch(db, 'SELECT b from def')
self.assertEqual(2, mock_connect.call_count)
self.assertNotEqual(connection_1, connection_2)
|
from entities.ships.allies.ally import Ally
from utils.ids.player_id import PlayerID
from utils.ids.projectile_id import ProjectileID
"""A friendly Aegis ship.
"""
class Aegis(Ally):
"""Constructs the Aegis.
"""
def __init__(self, hp, shield, x, y, speed, fire_rate, *args, **kwargs):
super().__init__(hp, shield, x, y, speed, fire_rate)
self.projectile_type = ProjectileID.FRIENDLY_MISSILE
self.projectile_damage = 20
self.entity_id = PlayerID.AEGIS
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test running zenxd with the -rpcbind and -rpcallowip options."""
import socket
import sys
from test_framework.test_framework import BitcoinTestFramework, SkipTest
from test_framework.util import *
from test_framework.netutil import *
class RPCBindTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def setup_network(self):
self.add_nodes(self.num_nodes, None)
def run_bind_test(self, allow_ips, connect_to, addresses, expected):
'''
Start a node with requested rpcallowip and rpcbind parameters,
then try to connect, and check if the set of bound addresses
matches the expected set.
'''
self.log.info("Bind test for %s" % str(addresses))
expected = [(addr_to_hex(addr), port) for (addr, port) in expected]
base_args = ['-disablewallet', '-nolisten']
if allow_ips:
base_args += ['-rpcallowip=' + x for x in allow_ips]
binds = ['-rpcbind='+addr for addr in addresses]
self.nodes[0].rpchost = connect_to
self.start_node(0, base_args + binds)
pid = self.nodes[0].process.pid
assert_equal(set(get_bind_addrs(pid)), set(expected))
self.stop_nodes()
def run_allowip_test(self, allow_ips, rpchost, rpcport):
'''
Start a node with rpcallow IP, and request getnetworkinfo
at a non-localhost IP.
'''
self.log.info("Allow IP test for %s:%d" % (rpchost, rpcport))
base_args = ['-disablewallet', '-nolisten'] + ['-rpcallowip='+x for x in allow_ips]
self.nodes[0].rpchost = None
self.start_nodes([base_args])
# connect to node through non-loopback interface
node = get_rpc_proxy(rpc_url(get_datadir_path(self.options.tmpdir, 0), 0, "%s:%d" % (rpchost, rpcport)), 0, coveragedir=self.options.coveragedir)
node.getnetworkinfo()
self.stop_nodes()
def run_test(self):
# due to OS-specific network stats queries, this test works only on Linux
if not sys.platform.startswith('linux'):
raise SkipTest("This test can only be run on linux.")
# find the first non-loopback interface for testing
non_loopback_ip = None
for name,ip in all_interfaces():
if ip != '127.0.0.1':
non_loopback_ip = ip
break
if non_loopback_ip is None:
raise SkipTest("This test requires at least one non-loopback IPv4 interface.")
try:
s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
s.connect(("::1",1))
s.close
except OSError:
raise SkipTest("This test requires IPv6 support.")
self.log.info("Using interface %s for testing" % non_loopback_ip)
defaultport = rpc_port(0)
# check default without rpcallowip (IPv4 and IPv6 localhost)
self.run_bind_test(None, '127.0.0.1', [],
[('127.0.0.1', defaultport), ('::1', defaultport)])
# check default with rpcallowip (IPv6 any)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', [],
[('::0', defaultport)])
# check only IPv4 localhost (explicit)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1'],
[('127.0.0.1', defaultport)])
# check only IPv4 localhost (explicit) with alternative port
self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171'],
[('127.0.0.1', 32171)])
# check only IPv4 localhost (explicit) with multiple alternative ports on same host
self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171', '127.0.0.1:32172'],
[('127.0.0.1', 32171), ('127.0.0.1', 32172)])
# check only IPv6 localhost (explicit)
self.run_bind_test(['[::1]'], '[::1]', ['[::1]'],
[('::1', defaultport)])
# check both IPv4 and IPv6 localhost (explicit)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1', '[::1]'],
[('127.0.0.1', defaultport), ('::1', defaultport)])
# check only non-loopback interface
self.run_bind_test([non_loopback_ip], non_loopback_ip, [non_loopback_ip],
[(non_loopback_ip, defaultport)])
# Check that with invalid rpcallowip, we are denied
self.run_allowip_test([non_loopback_ip], non_loopback_ip, defaultport)
assert_raises_rpc_error(-342, "non-JSON HTTP response with '403 Forbidden' from server", self.run_allowip_test, ['1.1.1.1'], non_loopback_ip, defaultport)
if __name__ == '__main__':
RPCBindTest().main()
|
import numpy as np
from sklearn import manifold, decomposition
from matplotlib import pyplot as plt
from sklearn.externals._arff import xrange
class vis:
colors = ['black', 'blue', 'green', 'yellow', 'red']
def __init__(self, X, y):
self.X = X.values[0:1000,0:28]
self.y = y.values[0:1000]
def pre(self):
label_list = ['normal', 'scan', 'dos', 'u2r', 'r2l']
for i in range(len(self.y)):
self.y[i] = label_list.index(self.y[i])
def PCA(self):
pca = decomposition.PCA(n_components=2, svd_solver='randomized')
X_pca = pca.fit_transform(self.X)
for i in xrange(len(vis.colors)):
px = X_pca[:, 0][self.y == i]
py = X_pca[:, 1][self.y == i]
plt.scatter(px, py, c=vis.colors[i])
plt.legend(np.arange(len(vis.colors)).astype(str))
plt.xlabel('First Principal Component')
plt.ylabel('Second Principal Component')
plt.savefig('PCA.png')
plt.show()
def t_SNE(self):
tsne = manifold.TSNE(n_components=2, init='pca', random_state=501)
X_tsne = tsne.fit_transform(self.X)
x_min, x_max = X_tsne.min(0), X_tsne.max(0)
X_norm = (X_tsne - x_min) / (x_max - x_min)
for i in range(X_norm.shape[0]):
plt.text(X_norm[i, 0], X_norm[i, 1], str(self.y[i]), color=vis.colors[self.y[i]],
fontdict={'weight': 'bold', 'size': 9})
plt.legend(np.arange(len(vis.colors)).astype(str))
plt.xlabel('First Principal Component')
plt.ylabel('Second Principal Component')
plt.savefig('t-SNE.png')
plt.show()
def solve(self):
vis.pre(self)
#vis.PCA(self)
vis.t_SNE(self)
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Collection of utilities for parsing CLI clients output."""
import logging
import re
from tempest.lib import exceptions
LOG = logging.getLogger(__name__)
delimiter_line = re.compile('^\+\-[\+\-]+\-\+$')
def details_multiple(output_lines, with_label=False):
"""Return list of dicts with item details from cli output tables.
If with_label is True, key '__label' is added to each items dict.
For more about 'label' see OutputParser.tables().
"""
items = []
tables_ = tables(output_lines)
for table_ in tables_:
if ('Property' not in table_['headers']
or 'Value' not in table_['headers']):
raise exceptions.InvalidStructure()
item = {}
for value in table_['values']:
item[value[0]] = value[1]
if with_label:
item['__label'] = table_['label']
items.append(item)
return items
def details(output_lines, with_label=False):
"""Return dict with details of first item (table) found in output."""
items = details_multiple(output_lines, with_label)
return items[0]
def listing(output_lines):
"""Return list of dicts with basic item info parsed from cli output."""
items = []
table_ = table(output_lines)
for row in table_['values']:
item = {}
for col_idx, col_key in enumerate(table_['headers']):
item[col_key] = row[col_idx]
items.append(item)
return items
def tables(output_lines):
"""Find all ascii-tables in output and parse them.
Return list of tables parsed from cli output as dicts.
(see OutputParser.table())
And, if found, label key (separated line preceding the table)
is added to each tables dict.
"""
tables_ = []
table_ = []
label = None
start = False
header = False
if not isinstance(output_lines, list):
output_lines = output_lines.split('\n')
for line in output_lines:
if delimiter_line.match(line):
if not start:
start = True
elif not header:
# we are after head area
header = True
else:
# table ends here
start = header = None
table_.append(line)
parsed = table(table_)
parsed['label'] = label
tables_.append(parsed)
table_ = []
label = None
continue
if start:
table_.append(line)
else:
if label is None:
label = line
else:
LOG.warning('Invalid line between tables: %s' % line)
if len(table_) > 0:
LOG.warning('Missing end of table')
return tables_
def table(output_lines):
"""Parse single table from cli output.
Return dict with list of column names in 'headers' key and
rows in 'values' key.
"""
table_ = {'headers': [], 'values': []}
columns = None
if not isinstance(output_lines, list):
output_lines = output_lines.split('\n')
if not output_lines[-1]:
# skip last line if empty (just newline at the end)
output_lines = output_lines[:-1]
for line in output_lines:
if delimiter_line.match(line):
columns = _table_columns(line)
continue
if '|' not in line:
LOG.warning('skipping invalid table line: %s' % line)
continue
row = []
for col in columns:
row.append(line[col[0]:col[1]].strip())
if table_['headers']:
table_['values'].append(row)
else:
table_['headers'] = row
return table_
def _table_columns(first_table_row):
"""Find column ranges in output line.
Return list of tuples (start,end) for each column
detected by plus (+) characters in delimiter line.
"""
positions = []
start = 1 # there is '+' at 0
while start < len(first_table_row):
end = first_table_row.find('+', start)
if end == -1:
break
positions.append((start, end))
start = end + 1
return positions
|
# encoding: utf-8
#
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Fraunhofer FKIE/US, Alexander Tiderko
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Fraunhofer nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, absolute_import, print_function, unicode_literals
from datetime import datetime
from docutils import examples
from fkie_multimaster_msgs.msg import MasterState
from python_qt_binding import loadUi, QT_BINDING_VERSION
from python_qt_binding.QtCore import QFile, QPoint, QSize, Qt, QTimer, QUrl, Signal
from python_qt_binding.QtGui import QDesktopServices, QIcon, QKeySequence, QPixmap
from python_qt_binding.QtGui import QPalette, QColor
import getpass
import grpc
import os
import rospy
import socket
import time
import uuid
import xmlrpclib
import ruamel.yaml
from fkie_master_discovery.common import resolve_url, subdomain, masteruri_from_master, masteruri_from_ros
from fkie_node_manager_daemon.common import utf8, get_pkg_path
from fkie_node_manager_daemon.host import get_hostname
from fkie_node_manager_daemon import screen
from fkie_node_manager_daemon import url as nmdurl
import fkie_node_manager as nm
from .capability_table import CapabilityTable
from .detailed_msg_box import MessageBox
from .discovery_listener import MasterListService, MasterStateTopic, MasterStatisticTopic, OwnMasterMonitoring
from .editor.editor import Editor
from .launch_enhanced_line_edit import EnhancedLineEdit
from .launch_files_widget import LaunchFilesWidget
from .log_widget import LogWidget
from .master_list_model import MasterModel, MasterIconsDelegate
from .master_view_proxy import MasterViewProxy
from .menu_rqt import MenuRqt
from .network_discovery_dialog import NetworkDiscoveryDialog
from .parameter_dialog import ParameterDialog
from .profile_widget import ProfileWidget
from .progress_queue import ProgressQueue
from .select_dialog import SelectDialog
from .sync_dialog import SyncDialog
from .update_handler import UpdateHandler
try:
from python_qt_binding.QtGui import QApplication, QFileDialog, QMainWindow, QStackedLayout, QWidget, QStyle
from python_qt_binding.QtGui import QShortcut, QVBoxLayout, QColorDialog, QDialog, QRadioButton
except Exception:
from python_qt_binding.QtWidgets import QApplication, QFileDialog, QMainWindow, QStackedLayout, QWidget, QStyle
from python_qt_binding.QtWidgets import QShortcut, QVBoxLayout, QColorDialog, QDialog, QRadioButton
from fkie_node_manager import gui_resources
try:
from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus
DIAGNOSTICS_AVAILABLE = True
except Exception:
import sys
sys.stderr.write("Cannot import 'diagnostic_msgs', feature disabled.")
DIAGNOSTICS_AVAILABLE = False
class MainWindow(QMainWindow):
'''
The class to create the main window of the application.
'''
close_signal = Signal()
DELAYED_NEXT_REQ_ON_ERR = 5.0
if DIAGNOSTICS_AVAILABLE:
diagnostics_signal = Signal(DiagnosticStatus)
'''@ivar: the signal is emitted if a message on topic nm_notifier was
reiceved (DiagnosticStatus)'''
def __init__(self, files=[], restricted_to_one_master=False, monitor_port=22622, parent=None):
'''
Creates the window, connects the signals and init the class.
'''
QMainWindow.__init__(self)
self.close_event_count = 0
self.default_load_launch = os.path.abspath(resolve_url(files[0])) if files else ''
self.default_profile_load = os.path.isfile(self.default_load_launch) and self.default_load_launch.endswith('.nmprofile')
restricted_to_one_master = False
self._finished = False
self._history_selected_robot = ''
self.__icons = {'empty': (QIcon(), ''),
'default_pc': (QIcon(':/icons/crystal_clear_miscellaneous.png'), ':/icons/crystal_clear_miscellaneous.png'),
'log_warning': (QIcon(':/icons/crystal_clear_no_io.png'), ':/icons/crystal_clear_no_io.png'),
'show_io': (QIcon(':/icons/crystal_clear_show_io.png'), ':/icons/crystal_clear_show_io.png')
} # (masnter name : (QIcon, path))
self.__current_icon = None
self.__current_master_label_name = None
self._syncs_to_start = [] # hostnames
self._accept_next_update = False
self._last_window_state = False
self._description_history = []
self._description_accept = ''
# self.setAttribute(Qt.WA_AlwaysShowToolTips, True)
# setup main window frame
self.setObjectName('MainWindow')
# self = mainWindow = QMainWindow()
# self = mainWindow = loader.load(":/forms/MainWindow.ui")
ui_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'MainWindow.ui')
loadUi(ui_file, self)
self.setObjectName('MainUI')
self.setDockOptions(QMainWindow.AllowNestedDocks | QMainWindow.AllowTabbedDocks | QMainWindow.AnimatedDocks | QMainWindow.VerticalTabs)
self.close_signal.connect(self.close)
self.close_without_ask = False
self.user_frame.setVisible(False)
self._add_user_to_combo(getpass.getuser())
self.userComboBox.editTextChanged.connect(self.on_user_changed)
self.masterInfoFrame.setEnabled(False)
self.infoButton.clicked.connect(self.on_info_clicked)
self.setTimeButton.clicked.connect(self.on_set_time_clicked)
self.refreshHostButton.clicked.connect(self.on_refresh_master_clicked)
self.masterLogButton.clicked.connect(self.on_master_log_clicked)
self.runButton.clicked.connect(self.on_run_node_clicked)
self.syncButton.released.connect(self.on_sync_dialog_released)
menu_rqt = MenuRqt(self.rqtButton)
menu_rqt.start_rqt_plugin_signal.connect(self.on_rqt_plugin_start)
pal = self.expert_tab.palette()
self._default_color = pal.color(QPalette.Window)
self._set_custom_colors()
# setup settings widget
self.profiler = ProfileWidget(self, self)
self.addDockWidget(Qt.LeftDockWidgetArea, self.profiler)
# setup logger widget
self.log_dock = LogWidget()
self.log_dock.added_signal.connect(self._on_log_added)
self.log_dock.cleared_signal.connect(self._on_log_cleared)
self.log_dock.setVisible(False)
self.addDockWidget(Qt.BottomDockWidgetArea, self.log_dock)
self.logButton.clicked.connect(self._on_log_button_clicked)
self.settingsButton.clicked.connect(self._on_settings_button_clicked)
# setup the launch files view
self.launch_dock = LaunchFilesWidget()
self.launch_dock.load_signal.connect(self.on_load_launch_file)
self.launch_dock.load_profile_signal.connect(self.profiler.on_load_profile_file)
self.launch_dock.edit_signal.connect(self.on_launch_edit)
self.launch_dock.transfer_signal.connect(self.on_launch_transfer)
self.launch_dock.save_profile_signal.connect(self.profiler.on_save_profile)
self.addDockWidget(Qt.LeftDockWidgetArea, self.launch_dock)
self.mIcon = QIcon(":/icons/crystal_clear_prop_run.png")
# self.style().standardIcon(QStyle.SP_FileIcon)
self.setWindowTitle("Node Manager")
self.setWindowIcon(self.mIcon)
# self.setCentralWidget(mainWindow)
# init the stack layout which contains the information about different ros master
self.stackedLayout = QStackedLayout()
self.stackedLayout.setObjectName('stackedLayout')
emptyWidget = QWidget()
emptyWidget.setObjectName('emptyWidget')
self.stackedLayout.addWidget(emptyWidget)
self.tabWidget.currentChanged.connect(self.on_currentChanged_tab)
self.tabLayout = QVBoxLayout(self.tabPlace)
self.tabLayout.setObjectName("tabLayout")
self.tabLayout.setContentsMargins(0, 0, 0, 0)
self.tabLayout.addLayout(self.stackedLayout)
# initialize the progress queue
self._progress_queue = ProgressQueue(self.progressFrame, self.progressBar, self.progressCancelButton, 'Network')
self._progress_queue_sync = ProgressQueue(self.progressFrame_sync, self.progressBar_sync, self.progressCancelButton_sync, 'Sync')
# initialize the view for the discovered ROS master
self.master_model = MasterModel(self.getMasteruri())
self.master_model.sync_start.connect(self.on_sync_start)
self.master_model.sync_stop.connect(self.on_sync_stop)
self.master_delegate = MasterIconsDelegate()
self.masterTableView.setItemDelegateForColumn(1, self.master_delegate)
self.masterTableView.setModel(self.master_model)
self.master_model.parent_view = self.masterTableView
# self.masterTableView.setAlternatingRowColors(True)
# self.masterTableView.clicked.connect(self.on_master_table_clicked)
# self.masterTableView.pressed.connect(self.on_master_table_pressed)
self.masterTableView.activated.connect(self.on_master_table_activated)
sm = self.masterTableView.selectionModel()
sm.currentRowChanged.connect(self.on_masterTableView_selection_changed)
for i, (_, width) in enumerate(MasterModel.header): # _:=name
self.masterTableView.setColumnWidth(i, width)
self.refreshAllButton.clicked.connect(self.on_all_master_refresh_clicked)
self.discoveryButton.clicked.connect(self.on_discover_network_clicked)
self.startRobotButton.clicked.connect(self.on_start_robot_clicked)
# stores the widget to a
self.masters = dict() # masteruri : MasterViewProxy
self.currentMaster = None # MasterViewProxy
self._close_on_exit = True
############################################################################
self.capabilitiesTable = CapabilityTable(self.capabilities_tab)
self.capabilitiesTable.setObjectName("capabilitiesTable")
self.capabilitiesTable.start_nodes_signal.connect(self.on_start_nodes)
self.capabilitiesTable.stop_nodes_signal.connect(self.on_stop_nodes)
self.capabilitiesTable.description_requested_signal.connect(self.on_description_update_cap)
self.capabilities_tab.layout().addWidget(self.capabilitiesTable)
self.descriptionTextEdit.setOpenLinks(False)
self.descriptionTextEdit.anchorClicked.connect(self.on_description_anchorClicked)
self._shortcut_copy = QShortcut(QKeySequence(self.tr("Ctrl+Shift+C", "copy selected description")), self.descriptionTextEdit)
self._shortcut_copy.activated.connect(self.descriptionTextEdit.copy)
self.tabifyDockWidget(self.launch_dock, self.descriptionDock)
self.launch_dock.raise_()
flags = self.windowFlags()
self.setWindowFlags(flags | Qt.WindowContextHelpButtonHint)
self._discover_dialog = None
self.restricted_to_one_master = restricted_to_one_master
if restricted_to_one_master:
self.syncButton.setEnabled(False)
self.refreshAllButton.setEnabled(False)
self.discoveryButton.setEnabled(False)
self.startRobotButton.setEnabled(False)
self._sync_dialog = SyncDialog()
self._shortcut_focus = QShortcut(QKeySequence(self.tr("Ctrl+Shift+F", "switch to next focus area")), self)
self._shortcut_focus.activated.connect(self._show_section_menu)
self.editor_dialogs = dict() # [file] = Editor
'''@ivar: stores the open Editor '''
self.simTimeLabel.setVisible(False)
self.launchServerLabel.setVisible(False)
# since the is_local method is threaded for host names, call it to cache the localhost
nm.is_local("localhost")
# add help page
self.ui_help_web_view.page().setLinkDelegationPolicy(self.ui_help_web_view.page().DelegateAllLinks)
self.ui_help_web_view.linkClicked.connect(self._on_help_link_clicked)
self._help_history = []
self._help_history_idx = -1
self._help_root_url = QUrl('file://%s' % nm.settings().HELP_FILE)
self._on_help_go_home()
self.ui_help_home.clicked.connect(self._on_help_go_home)
self.ui_help_back.clicked.connect(self._on_help_go_back)
self.ui_help_forward.clicked.connect(self._on_help_go_forward)
if self.ui_help_home.icon().isNull():
self.ui_help_home.setText("Home")
if self.ui_help_back.icon().isNull():
self.ui_help_back.setText("Back")
if self.ui_help_forward.icon().isNull():
self.ui_help_forward.setText("Forward")
try:
screen.test_screen()
except Exception as e:
rospy.logerr("No SCREEN available! You can't launch nodes.")
# MessageBox.warning(self, "No SCREEN",
# "No SCREEN available! You can't launch nodes.",
# '%s'%utf8(e))
self.imageLabel.mouseDoubleClickEvent = self.image_mouseDoubleClickEvent
self.masternameLabel.mouseDoubleClickEvent = self.mastername_mouseDoubleClickEvent
try:
self.readSettings()
self.launch_dock.raise_()
except Exception as e:
rospy.logwarn("Error while read settings: %s" % e)
# setup the hide button, which hides the docks on left side
docks = self._dock_widget_in(Qt.LeftDockWidgetArea, only_visible=True)
if not docks:
self.hideDocksButton.toggle()
self.on_hide_docks_toggled(True)
self.hideDocksButton.clicked.connect(self.on_hide_docks_toggled)
if not nm.settings().movable_dock_widgets:
self.networkDock.setFeatures(self.networkDock.NoDockWidgetFeatures)
self.launch_dock.setFeatures(self.launch_dock.NoDockWidgetFeatures)
self.descriptionDock.setFeatures(self.descriptionDock.NoDockWidgetFeatures)
self.log_dock.setFeatures(self.log_dock.NoDockWidgetFeatures)
# =============================
# Initialize the update handler
# =============================
# initialize the class to get the state of discovering of other ROS master
self._update_handler = UpdateHandler()
self._update_handler.master_info_signal.connect(self.on_master_info_retrieved)
self._update_handler.master_errors_signal.connect(self.on_master_errors_retrieved)
self._update_handler.timediff_signal.connect(self.on_master_timediff_retrieved)
self._update_handler.error_signal.connect(self.on_master_info_error)
# this monitor class is used, if no master_discovery node is running to get the state of the local ROS master
self.own_master_monitor = OwnMasterMonitoring()
self.own_master_monitor.init(monitor_port)
self.own_master_monitor.state_signal.connect(self.on_master_state_changed)
self.own_master_monitor.err_signal.connect(self.on_master_monitor_err)
# get the name of the service and topic of the discovery node. The name are determine by the message type of those topics
self.masterlist_service = masterlist_service = MasterListService()
masterlist_service.masterlist_signal.connect(self.on_master_list_retrieved)
masterlist_service.masterlist_err_signal.connect(self.on_master_list_err_retrieved)
self.state_topic = MasterStateTopic()
self.state_topic.state_signal.connect(self.on_master_state_changed)
self.stats_topic = MasterStatisticTopic()
self.stats_topic.stats_signal.connect(self.on_conn_stats_updated)
# timer to update the showed update time of the ros state
self.master_timecheck_timer = QTimer()
self.master_timecheck_timer.timeout.connect(self.on_master_timecheck)
self.master_timecheck_timer.start(1000)
self._refresh_time = time.time()
self._last_time_view_update = time.time()
self._con_tries = dict()
self._subscribe()
nm.nmd().monitor.system_diagnostics_signal.connect(self._callback_system_diagnostics)
nm.nmd().monitor.remote_diagnostics_signal.connect(self._callback_diagnostics)
self._select_index = 0
self._shortcut_restart_nodes = QShortcut(QKeySequence(self.tr("Ctrl+Shift+R", "restart selected nodes")), self)
self._shortcut_restart_nodes.activated.connect(self._restart_nodes)
self._shortcut_restart_nodes_g = QShortcut(QKeySequence(self.tr("Ctrl+Shift+Alt+R", "restart selected nodes and reload global parameter")), self)
self._shortcut_restart_nodes_g.activated.connect(self._restart_nodes_g)
nm.nmd().error.connect(self.on_nmd_err)
nm.nmd().settings.yaml_config_signal.connect(self._nmd_yaml_cfg)
def _dock_widget_in(self, area=Qt.LeftDockWidgetArea, only_visible=False):
result = []
docks = [self.launch_dock, self.descriptionDock, self.networkDock]
for dock in docks:
if self.dockWidgetArea(dock) == area:
if not only_visible or (only_visible and dock.isVisibleTo(self)):
result.append(dock)
return result
def _on_log_button_clicked(self):
self.log_dock.setVisible(not self.log_dock.isVisible())
def _on_settings_button_clicked(self):
params = nm.settings().yaml()
dia = ParameterDialog(params, store_geometry="settings_dialog")
dia.setWindowTitle('Node Manager Configuration')
if dia.exec_():
try:
params = dia.getKeywords(only_changed=True, with_tags=True)
nm.settings().set_yaml(params)
except Exception as err:
import traceback
print(traceback.format_exc())
MessageBox.warning(self, "Configuration error",
'Error while set parameter',
'%s' % utf8(err))
def _on_log_added(self, info, warn, err, fatal):
self.logButton.setEnabled(True)
def _on_log_cleared(self):
self.logButton.setIcon(self.__icons['show_io'][0])
self.logButton.setText('')
# self.logButton.setEnabled(False)
def on_hide_docks_toggled(self, checked):
if self.dockWidgetArea(self.launch_dock) == Qt.LeftDockWidgetArea:
self.launch_dock.setVisible(not checked)
if self.dockWidgetArea(self.descriptionDock) == Qt.LeftDockWidgetArea:
self.descriptionDock.setVisible(not checked)
if self.dockWidgetArea(self.networkDock) == Qt.LeftDockWidgetArea:
self.networkDock.setVisible(not checked)
self.hideDocksButton.setArrowType(Qt.RightArrow if checked else Qt.LeftArrow)
def on_currentChanged_tab(self, index):
pass
# if index == self.tabWidget.widget(0):
# self.networkDock.show()
# self.launch_dock.show()
# else:
# self.networkDock.hide()
# self.launch_dock.hide()
def readSettings(self):
if nm.settings().store_geometry:
settings = nm.settings().qsettings(nm.settings().CFG_GUI_FILE)
self._history_selected_robot = settings.value("selected_robot", '')
settings.beginGroup("mainwindow")
maximized = settings.value("maximized", 'false') == 'true'
if maximized:
self.showMaximized()
else:
self.resize(settings.value("size", QSize(1024, 720)))
self.move(settings.value("pos", QPoint(0, 0)))
try:
self.restoreState(settings.value("window_state"))
except Exception:
pass
settings.endGroup()
def storeSetting(self):
if nm.settings().store_geometry:
settings = nm.settings().qsettings(nm.settings().CFG_GUI_FILE)
settings.beginGroup("mainwindow")
settings.setValue("size", self.size())
settings.setValue("pos", self.pos())
settings.setValue("maximized", self.isMaximized())
settings.setValue("window_state", self.saveState())
settings.endGroup()
def closeEvent(self, event):
if self.close_event_count > 0:
# we handle force first
self.finish()
QMainWindow.closeEvent(self, event)
return
self.close_event_count += 1
# ask to close nodes on exit
# self.close_without_ask is changes in on_shutdown method in __init__.py
if self._close_on_exit and nm.settings().confirm_exit_when_closing and not self.close_without_ask:
masters = [uri for uri, m in self.masters.items() if m.online]
res = SelectDialog.getValue('Stop nodes?', "Select masters where to stop:",
masters, False, False, '', parent=self,
select_if_single=False,
checkitem1="don't show this dialog again",
closein=nm.settings().timeout_close_dialog,
store_geometry='stop_nodes')
masters2stop, self._close_on_exit = res[0], res[1]
nm.settings().confirm_exit_when_closing = not res[2]
if self._close_on_exit or rospy.is_shutdown():
self.on_finish = True
self._stop_local_master = None
for uri in masters2stop:
try:
m = self.masters[uri]
if m is not None:
if m.is_local:
self._stop_updating()
self._stop_local_master = m
m.stop_nodes_by_name(m.get_nodes_runningIfLocal(), True, [rospy.get_name(), '/rosout'])
if not m.is_local:
m.killall_roscore()
except Exception as e:
rospy.logwarn("Error while stop nodes on %s: %s" % (uri, utf8(e)))
QTimer.singleShot(200, self._test_for_finish)
if masters2stop:
event.ignore()
else:
event.accept()
else:
self._close_on_exit = True
self.close_event_count = 0
event.ignore()
elif self._are_master_in_process():
QTimer.singleShot(200, self._test_for_finish)
self.masternameLabel.setText('<span style=" font-size:14pt; font-weight:600;">%s ...closing...</span>' % self.masternameLabel.text())
rospy.loginfo("Wait for running processes are finished...")
event.ignore()
if event.isAccepted():
self.on_finish = True
self.master_timecheck_timer.stop()
try:
self.storeSetting()
except Exception as e:
rospy.logwarn("Error while store settings: %s" % e)
self.finish()
QMainWindow.closeEvent(self, event)
def _are_master_in_process(self):
for _uri, m in self.masters.items():
m.stop_echo_dialogs()
if m.in_process():
return True
return False
def _test_for_finish(self):
# this method test on exit for running process queues with stopping jobs
if self._are_master_in_process():
QTimer.singleShot(200, self._test_for_finish)
return
if hasattr(self, '_stop_local_master') and self._stop_local_master is not None:
self.finish()
self._stop_local_master.killall_roscore()
del self._stop_local_master
self._close_on_exit = False
self.close()
def _stop_updating(self):
if hasattr(self, "_discover_dialog") and self._discover_dialog is not None:
self._discover_dialog.stop()
self.masterlist_service.stop()
self._progress_queue.stop()
self._progress_queue_sync.stop()
self._update_handler.stop()
self.state_topic.stop()
self.stats_topic.stop()
self.own_master_monitor.stop()
self.launch_dock.stop()
self.log_dock.stop()
def finish(self):
if not self._finished:
self._finished = True
print("Mainwindow finish...")
self._stop_updating()
try:
editors = [e for e in self.editor_dialogs.values()]
for editor in editors:
editor.close()
except Exception as _err:
import traceback
print(traceback.format_exc())
for _, master in self.masters.iteritems():
try:
master.close()
except Exception as _err:
import traceback
print(traceback.format_exc())
print("Mainwindow finished!")
def getMasteruri(self):
'''
Requests the ROS master URI from the ROS master through the RPC interface and
returns it. The 'materuri' attribute will be set to the requested value.
@return: ROS master URI
@rtype: C{str} or C{None}
'''
if not hasattr(self, 'materuri') or self.materuri is None:
masteruri = masteruri_from_ros()
master = xmlrpclib.ServerProxy(masteruri)
_, _, self.materuri = master.getUri(rospy.get_name()) # _:=code, message
nm.is_local(get_hostname(self.materuri))
return self.materuri
def setMasterOnline(self, masteruri, online=True):
if masteruri in self.masters:
self.masters[masteruri].online = online
def removeMaster(self, masteruri):
'''
Removed master with given master URI from the list.
@param masteruri: the URI of the ROS master
@type masteruri: C{str}
'''
if masteruri in self.masters:
if self.currentMaster is not None and self.currentMaster.masteruri == masteruri:
self.setCurrentMaster(None)
self.masters[masteruri].stop()
self.masters[masteruri].updateHostRequest.disconnect()
self.masters[masteruri].host_description_updated.disconnect()
self.masters[masteruri].capabilities_update_signal.disconnect()
self.masters[masteruri].remove_config_signal.disconnect()
self.masters[masteruri].description_signal.disconnect()
self.masters[masteruri].request_xml_editor.disconnect()
self.masters[masteruri].stop_nodes_signal.disconnect()
self.masters[masteruri].robot_icon_updated.disconnect()
if DIAGNOSTICS_AVAILABLE:
self.diagnostics_signal.disconnect(self.masters[masteruri].append_diagnostic)
self.stackedLayout.removeWidget(self.masters[masteruri])
self.tabPlace.layout().removeWidget(self.masters[masteruri])
for cfg in self.masters[masteruri].default_cfgs:
self.capabilitiesTable.removeConfig(cfg)
self.masters[masteruri].setParent(None)
del self.masters[masteruri]
def getMaster(self, masteruri, create_new=True):
'''
@return: the Widget which represents the master of given ROS master URI. If no
Widget for given URI is available a new one will be created.
@rtype: L{MasterViewProxy}
'''
if masteruri not in self.masters:
if not create_new:
return None
self.masters[masteruri] = MasterViewProxy(masteruri, self)
self.masters[masteruri].updateHostRequest.connect(self.on_host_update_request)
self.masters[masteruri].host_description_updated.connect(self.on_host_description_updated)
self.masters[masteruri].capabilities_update_signal.connect(self.on_capabilities_update)
self.masters[masteruri].remove_config_signal.connect(self.on_remove_config)
self.masters[masteruri].description_signal.connect(self.on_description_update)
self.masters[masteruri].request_xml_editor.connect(self.on_launch_edit)
self.masters[masteruri].stop_nodes_signal.connect(self.on_stop_nodes)
self.masters[masteruri].robot_icon_updated.connect(self._on_robot_icon_changed)
if DIAGNOSTICS_AVAILABLE:
self.diagnostics_signal.connect(self.masters[masteruri].append_diagnostic)
self.stackedLayout.addWidget(self.masters[masteruri])
if masteruri == self.getMasteruri():
self.masters[masteruri].default_load_launch = self.default_load_launch
return self.masters[masteruri]
def on_host_update_request(self, host):
for key, value in self.masters.items():
if get_hostname(key) == host and value.master_state is not None:
self._update_handler.requestMasterInfo(value.master_state.uri, value.master_state.monitoruri)
def on_host_description_updated(self, masteruri, host, descr):
# self.master_model.update_description(nm.nameres().mastername(masteruri, host), descr)
pass
def on_capabilities_update(self, masteruri, address, config_node, descriptions):
for d in descriptions:
self.capabilitiesTable.updateCapabilities(masteruri, config_node, d)
if masteruri is not None:
master = self.getMaster(masteruri)
self.capabilitiesTable.updateState(masteruri, master.master_info)
def on_remove_config(self, cfg):
self.capabilitiesTable.removeConfig(cfg)
# ======================================================================================================================
# Handling of local monitoring
# (Backup, if no master_discovery node is running)
# ======================================================================================================================
def _subscribe(self):
'''
Try to subscribe to the topics of the master_discovery node. If it fails, the
own local monitoring of the ROS master state will be enabled.
'''
if not self.restricted_to_one_master:
try:
self.masterlist_service.retrieveMasterList(self.getMasteruri(), False)
except Exception:
pass
else:
self._setLocalMonitoring(True)
def _setLocalMonitoring(self, on, discoverer=''):
'''
Enables the local monitoring of the ROS master state and disables the view of
the discoved ROS master.
@param on: the enable / disable the local monitoring
@type on: C{boolean}
'''
if self.own_master_monitor.is_running() != on:
self.master_delegate.set_enabled(not on)
self.masterTableView.setEnabled(not on)
self.refreshAllButton.setEnabled(not on)
self.own_master_monitor.pause(not on)
if on:
self.masterTableView.setToolTip("use 'Start' button to enable the master discovering")
self.networkDock.setWindowTitle("ROS Network [disabled]")
else:
self.masterTableView.setToolTip('')
if on:
# remove discovered ROS master and set the local master to selected
for uri in self.masters.keys():
master = self.masters[uri]
if nm.is_local(get_hostname(uri)) or uri == self.getMasteruri():
if not self._history_selected_robot or master.mastername == self._history_selected_robot:
self.setCurrentMaster(master)
else:
if master.master_state is not None:
self.master_model.removeMaster(master.master_state.name)
else:
try:
# determine the ROS network ID
mcast_group = rospy.get_param(rospy.names.ns_join(discoverer, 'mcast_port'))
self.networkDock.setWindowTitle("ROS Network [id: %d]" % (mcast_group - 11511))
self._subscribe()
except Exception:
# try to get the multicast port of master discovery from log
port = 0
network_id = -1
import re
with open(screen.get_ros_logfile(node=discoverer.rstrip('/')), 'r') as mdfile:
for line in mdfile:
if line.find("Listen for multicast at") > -1:
port = map(int, re.findall(r'\d+', line))[-1]
elif line.find("Network ID") > -1:
network_id = map(int, re.findall(r'\d+', line))[-1]
port = 11511 + network_id
if port > 0:
self.networkDock.setWindowTitle("ROS Network [id: %d]" % (port - 11511))
else:
self.networkDock.setWindowTitle("ROS Network")
def on_master_list_err_retrieved(self, masteruri, error):
'''
The callback method connected to the signal, which is emitted on an error
while call the service to determine the discovered ROS master. On the error
the local monitoring will be enabled.
'''
if 'no service' not in error:
rospy.logwarn(error)
self._setLocalMonitoring(True)
def hasDiscoveryService(self, minfo):
'''
Test whether the new retrieved MasterInfo contains the master_discovery node.
This is identified by a name of the contained 'list_masters' service.
@param minfo: the ROS master Info
@type minfo: U{fkie_master_discovery.MasterInfo<http://docs.ros.org/api/fkie_master_discovery/html/modules.html#module-fkie_master_discovery.master_info>}
'''
# use no discovery services, if roscore is running on a remote host
if self.restricted_to_one_master:
return False
for service in minfo.services.keys():
if service.endswith('list_masters'):
return True
return False
# ======================================================================================================================
# Handling of received ROS master state messages
# ======================================================================================================================
def on_master_list_retrieved(self, masteruri, servic_name, master_list):
'''
Handle the retrieved list with ROS master.
1. update the ROS Network view
@param master_list: a list with ROS masters
@type master_list: C{[U{fkie_master_discovery.msg.MasterState<http://docs.ros.org/api/fkie_multimaster_msgs/html/msg/MasterState.html>}]}
'''
result_1 = self.state_topic.registerByROS(self.getMasteruri(), False)
result_2 = self.stats_topic.registerByROS(self.getMasteruri(), False)
local_mon = not result_1 or not result_2
self._setLocalMonitoring(local_mon, rospy.names.namespace(result_1))
self._con_tries[masteruri] = 0
# remove ROS master which are not in the new list
new_uris = [m.uri for m in master_list if m.uri is not None]
for uri in self.masters.keys():
if uri not in new_uris:
master = self.masters[uri]
if not (nm.is_local(get_hostname(uri)) or uri == self.getMasteruri()):
if master.master_state is not None:
self.master_model.removeMaster(master.master_state.name)
self.setMasterOnline(uri, False)
# self.removeMaster(uri)
# add or update master
for m in master_list:
if m.uri is not None:
host = get_hostname(m.uri)
nm.nameres().add_master_entry(m.uri, m.name, host)
m.name = nm.nameres().mastername(m.uri)
master = self.getMaster(m.uri)
master.master_state = m
master.online = True
master.force_next_update()
self._assigne_icon(m.name)
self.master_model.updateMaster(m)
self._update_handler.requestMasterInfo(m.uri, m.monitoruri)
def on_master_state_changed(self, msg):
'''
Handle the received master state message.
1. update the ROS Network view
2. enable local master monitoring, if all masters are removed (the local master too)
@param msg: the ROS message with new master state
@type msg: U{fkie_master_discovery.msg.MasterState<http://docs.ros.org/api/fkie_multimaster_msgs/html/msg/MasterState.html>}
'''
# do not update while closing
if hasattr(self, "on_finish"):
rospy.logdebug("ignore changes on %s, because currently on closing...", msg.master.uri)
return
host = get_hostname(msg.master.uri)
if msg.state == MasterState.STATE_CHANGED:
nm.nameres().add_master_entry(msg.master.uri, msg.master.name, host)
msg.master.name = nm.nameres().mastername(msg.master.uri)
self.getMaster(msg.master.uri).master_state = msg.master
self._assigne_icon(msg.master.name)
self.master_model.updateMaster(msg.master)
if nm.settings().autoupdate:
self._update_handler.requestMasterInfo(msg.master.uri, msg.master.monitoruri)
else:
rospy.loginfo("Autoupdate disabled, the data will not be updated for %s" % msg.master.uri)
if not msg.master.online:
host = get_hostname(msg.master.uri)
rospy.loginfo("remove SSH connection for '%s' because the master is now offline" % host)
nm.ssh().remove(host)
if msg.state == MasterState.STATE_NEW:
# if new master with uri of the local master is received update the master list
if msg.master.uri == self.getMasteruri():
self.masterlist_service.retrieveMasterList(msg.master.uri, False)
nm.nameres().add_master_entry(msg.master.uri, msg.master.name, host)
msg.master.name = nm.nameres().mastername(msg.master.uri)
self.getMaster(msg.master.uri).master_state = msg.master
self._assigne_icon(msg.master.name)
self.master_model.updateMaster(msg.master)
if nm.settings().autoupdate:
self._update_handler.requestMasterInfo(msg.master.uri, msg.master.monitoruri)
else:
rospy.loginfo("Autoupdate disabled, the data will not be updated for %s" % msg.master.uri)
if msg.state == MasterState.STATE_REMOVED:
if msg.master.uri == self.getMasteruri():
# switch to locale monitoring, if the local master discovering was removed
self._setLocalMonitoring(True)
else:
nm.nameres().remove_master_entry(msg.master.uri)
self.master_model.removeMaster(msg.master.name)
self.setMasterOnline(msg.master.uri, False)
# self.removeMaster(msg.master.uri)
# start master_sync, if it was selected in the start dialog to start with master_dsicovery
if self._syncs_to_start:
if msg.state in [MasterState.STATE_NEW, MasterState.STATE_CHANGED]:
# we don't know which name for host was used to start master discovery
if host in self._syncs_to_start:
self._syncs_to_start.remove(host)
self.on_sync_start(msg.master.uri)
elif msg.master.name in self._syncs_to_start:
self._syncs_to_start.remove(msg.master.name)
self.on_sync_start(msg.master.uri)
else:
addresses = nm.nameres().addresses(msg.master.uri)
for address in addresses:
if address in self._syncs_to_start:
self._syncs_to_start.remove(address)
self.on_sync_start(msg.master.uri)
# if len(self.masters) == 0:
# self._setLocalMonitoring(True)
def _assigne_icon(self, name, path=None):
'''
Sets the new icon to the given robot. If the path is `None` set search for
.png file with robot name.
:param name: robot name
:type name: str
:param path: path of the icon (Default: None)
:type path: str
'''
icon_path = path if path else nm.settings().robot_image_file(name)
if name not in self.__icons or self.__icons[name][1] != path:
if QFile.exists(icon_path):
self.__icons[name] = (QIcon(icon_path), icon_path)
elif name in self.__icons:
del self.__icons[name]
def on_master_monitor_err(self, msg):
self._con_tries[self.getMasteruri()] += 1
def on_master_info_retrieved(self, minfo):
'''
Integrate the received master info.
@param minfo: the ROS master Info
@type minfo: U{fkie_master_discovery.MasterInfo<http://docs.ros.org/api/fkie_master_discovery/html/modules.html#module-fkie_master_discovery.master_info>}
'''
if hasattr(self, "on_finish"):
rospy.logdebug("ignore changes on %s, because currently on closing...", minfo.masteruri)
return
rospy.logdebug("MASTERINFO from %s (%s) received", minfo.mastername, minfo.masteruri)
self._con_tries[minfo.masteruri] = 0
# cputimes_m = os.times()
# cputime_init_m = cputimes_m[0] + cputimes_m[1]
if minfo.masteruri in self.masters:
for _, master in self.masters.items(): # _:=uri
try:
if not master.online and master.masteruri != minfo.masteruri:
continue
# check for running discovery service
new_info = master.master_info is None or master.master_info.timestamp < minfo.timestamp
# cputimes = os.times()
# cputime_init = cputimes[0] + cputimes[1]
master.master_info = minfo
# cputimes = os.times()
# cputime = cputimes[0] + cputimes[1] - cputime_init
# print master.master_state.name, cputime
if master.master_info is not None:
if self._history_selected_robot == minfo.mastername and self._history_selected_robot == master.mastername and self.currentMaster != master:
if self.currentMaster is not None and not self.currentMaster.is_local:
self.setCurrentMaster(master)
# elif nm.is_local(get_hostname(master.master_info.masteruri)) or self.restricted_to_one_master:
elif master.master_info.masteruri == masteruri_from_master() or self.restricted_to_one_master:
if new_info:
has_discovery_service = self.hasDiscoveryService(minfo)
if (not self.own_master_monitor.isPaused() or not self.masterTableView.isEnabled()) and has_discovery_service:
self._subscribe()
if self.currentMaster is None and (not self._history_selected_robot or self._history_selected_robot == minfo.mastername):
self.setCurrentMaster(master)
if not hasattr(self, "_sub_extended_log"):
agg_suffix = '_agg' if nm.settings().use_diagnostics_agg else ''
self._sub_extended_log = rospy.Subscriber('/diagnostics%s' % agg_suffix, DiagnosticArray, self._callback_diagnostics)
# update the list view, whether master is synchronized or not
if master.master_info.masteruri == minfo.masteruri:
self.master_model.setChecked(master.master_state.name, not minfo.getNodeEndsWith('master_sync') is None)
if self.default_profile_load:
self.default_profile_load = False
QTimer.singleShot(2000, self._load_default_profile_slot)
self.capabilitiesTable.updateState(minfo.masteruri, minfo)
except Exception, e:
rospy.logwarn("Error while process received master info from %s: %s", minfo.masteruri, utf8(e))
# update the duplicate nodes
self.updateDuplicateNodes()
# update the buttons, whether master is synchronized or not
if self.currentMaster is not None and self.currentMaster.master_info is not None and not self.restricted_to_one_master:
self.syncButton.setEnabled(True)
self.syncButton.setChecked(not self.currentMaster.master_info.getNodeEndsWith('master_sync') is None)
else:
self.masterlist_service.retrieveMasterList(minfo.masteruri, False)
self.profiler.update_progress()
# cputimes_m = os.times()
# cputime_m = cputimes_m[0] + cputimes_m[1] - cputime_init_m
# print "ALL:", cputime_m
def _load_default_profile_slot(self):
if not hasattr(self, "on_finish"):
self.profiler.on_load_profile_file(self.default_load_launch)
def on_master_errors_retrieved(self, masteruri, error_list):
self.master_model.updateMasterErrors(nm.nameres().mastername(masteruri), error_list)
def on_master_timediff_retrieved(self, masteruri, timediff):
self.master_model.updateTimeDiff(nm.nameres().mastername(masteruri), timediff)
def on_master_info_error(self, masteruri, error):
if masteruri not in self._con_tries:
self._con_tries[masteruri] = 0
self._con_tries[masteruri] += 1
if masteruri == self.getMasteruri():
rospy.logwarn("Error while connect to local master_discovery %s: %s", masteruri, error)
# switch to local monitoring after 3 timeouts
if self._con_tries[masteruri] > 2:
self._setLocalMonitoring(True)
master = self.getMaster(masteruri, False)
if master and master.master_state is not None:
self._update_handler.requestMasterInfo(master.master_state.uri, master.master_state.monitoruri, self.DELAYED_NEXT_REQ_ON_ERR)
def on_conn_stats_updated(self, stats):
'''
Handle the retrieved connection statistics.
1. update the ROS Network view
@param stats: a list with connection statistics
@type stats: C{[U{fkie_master_discovery.msg.LinkState<http://docs.ros.org/api/fkie_multimaster_msgs/html/msg/LinkState.html>}]}
'''
for stat in stats.links:
self.master_model.updateMasterStat(stat.destination, stat.quality)
# ======================================================================================================================
# Handling of master info frame
# ======================================================================================================================
def on_info_clicked(self):
text = '<dl>'
text = '%s<dt><b>Maintainer</b>: Alexander Tiderko <font color=gray>alexander.tiderko@gmail.com</font></dt>' % text
text = '%s<dt><b>Author</b>: Alexander Tiderko, Timo Roehling</dt>' % text
text = '%s<dt><b>License</b>: BSD, some icons are licensed under the GNU Lesser General Public License (LGPL) or Creative Commons Attribution-Noncommercial 3.0 License</dt>' % text
text = '%s</dl>' % text
if nm.__date__ == 'unknown':
text = '%s<dt><b>Version</b>: %s</dt>' % (text, nm.__version__)
else:
text = '%s<dt><b>Version</b>: %s (%s)</dt>' % (text, nm.__version__, nm.__date__)
text = '%s<dt><b>URL</b>: <a href="https://github.com/fkie/multimaster_fkie">https://github.com/fkie/multimaster_fkie</a></dt>' % (text)
MessageBox.about(self, 'About Node Manager', text)
def on_master_log_clicked(self):
'''
Tries to get the log of master_discovery node on the machine requested by a dialog.
'''
# get the history list
user_list = [self.userComboBox.itemText(i) for i in reversed(range(self.userComboBox.count()))]
user_list.insert(0, 'last used')
params = {'Host': {':type': 'string', ':value': 'localhost'},
'Show master discovery log': {':type': 'bool', ':value': True},
'Show master sync log': {':type': 'bool', ':value': False},
'Show daemon log': {':type': 'bool', ':value': False},
'Username': {':type': 'string', ':value': user_list},
'Only screen log': {':type': 'bool', ':value': True, ':hint': 'There are two logs: ROS-Log and SCREEN-Log'},
# 'Optional Parameter': ('list', params_optional)
}
dia = ParameterDialog(params, sidebar_var='Host', store_geometry="master_log_dialog")
dia.setFilterVisible(False)
dia.setWindowTitle('Show log')
dia.setFocusField('Host')
if dia.exec_():
try:
params = dia.getKeywords(only_changed=False, with_tags=False)
print("params", params)
hostnames = params['Host'] if isinstance(params['Host'], list) else [params['Host']]
log_master_discovery = params['Show master discovery log']
log_master_sync = params['Show master sync log']
log_nm_daemon = params['Show daemon log']
username = params['Username']
screen_only = params['Only screen log']
for hostname in hostnames:
try:
usr = username
if username == 'last used':
usr = nm.settings().host_user(hostname)
else:
nm.settings().set_host_user(hostname, usr)
if log_master_discovery:
self._progress_queue.add2queue(utf8(uuid.uuid4()),
'%s: show log of master discovery' % hostname,
nm.starter().openLog,
('/master_discovery', hostname, usr, screen_only))
if log_master_sync:
self._progress_queue.add2queue(utf8(uuid.uuid4()),
'%s: show log of master sync' % hostname,
nm.starter().openLog,
('/master_sync', hostname, usr, screen_only))
if log_nm_daemon:
self._progress_queue.add2queue(utf8(uuid.uuid4()),
'%s: show log of nm daemon' % hostname,
nm.starter().openLog,
('/node_manager_daemon', hostname, usr, screen_only))
except (Exception, nm.StartException) as err:
import traceback
print(traceback.format_exc(1))
rospy.logwarn("Error while show LOG for master_discovery %s: %s" % (utf8(hostname), utf8(err)))
MessageBox.warning(self, "Show log error",
'Error while show log of master_discovery',
'%s' % utf8(err))
self._progress_queue.start()
except Exception as err:
MessageBox.warning(self, "Show log error",
'Error while parse parameter',
'%s' % utf8(err))
def on_set_time_clicked(self):
if self.currentMaster is not None: # and not self.currentMaster.is_local:
time_dialog = QDialog()
ui_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'TimeInput.ui')
loadUi(ui_file, time_dialog)
host = get_hostname(self.currentMaster.master_state.uri)
time_dialog.setWindowTitle('Set time on %s' % host)
time_dialog.hostsComboBox.addItems(nm.history().cachedParamValues('/ntp'))
if self.currentMaster.is_local:
time_dialog.dateFrame.setVisible(False)
if time_dialog.exec_():
running_nodes = self.currentMaster.get_nodes_runningIfLocal(remove_system_nodes=True)
if running_nodes:
ret = MessageBox.question(self, 'Set Time', 'There are running nodes. Stop them?', buttons=MessageBox.Yes | MessageBox.No)
if ret == MessageBox.Yes:
self.currentMaster.stop_nodes_by_name(running_nodes)
if time_dialog.dateRadioButton.isChecked():
try:
rospy.loginfo("Set remote host time to local time: %s" % self.currentMaster.master_state.uri)
socket.setdefaulttimeout(10)
p = xmlrpclib.ServerProxy(self.currentMaster.master_state.monitoruri)
uri, success, newtime, errormsg = p.setTime(time.time())
if not success:
if errormsg.find('password') > -1:
errormsg += "\nPlease modify /etc/sudoers with sudoedit and add user privilege, e.g:"
errormsg += "\n%s ALL=NOPASSWD: /bin/date" % self.currentMaster.current_user
errormsg += "\n!!!needed to be at the very end of file, don't forget a new line at the end!!!"
errormsg += "\n\nBe aware, it does not replace the time synchronization!"
errormsg += "\nIt sets approximate time without undue delays on communication layer."
MessageBox.warning(self, "Time set error",
'Error while set time on %s' % uri, '%s' % utf8(errormsg))
else:
timediff = time.time() - newtime
rospy.loginfo(" New time difference to %s is approx.: %.3fs" % (self.currentMaster.master_state.uri, timediff))
self.on_master_timediff_retrieved(self.currentMaster.master_state.uri, timediff)
except Exception as e:
errormsg = '%s' % e
if errormsg.find('setTime') > -1:
errormsg += "\nUpdate remote fkie_multimaster!"
rospy.logwarn("Error while set time on %s: %s" % (self.currentMaster.master_state.uri, utf8(errormsg)))
MessageBox.warning(self, "Time sync error",
'Error while set time on %s' % self.currentMaster.master_state.uri,
'%s' % utf8(errormsg))
finally:
socket.setdefaulttimeout(None)
elif time_dialog.ntpdateRadioButton.isChecked():
ntp_host = time_dialog.hostsComboBox.currentText()
nm.history().addParamCache('/ntp', ntp_host)
cmd = "%s %s" % ('sudo ntpdate -v -u -t 1', ntp_host)
nm.starter().ntpdate(host, cmd)
def on_refresh_master_clicked(self):
if self.currentMaster is not None:
rospy.loginfo("Request an update from %s", utf8(self.currentMaster.master_state.monitoruri))
if self.currentMaster.master_info is not None:
check_ts = self.currentMaster.master_info.check_ts
self.currentMaster.master_info.timestamp = self.currentMaster.master_info.timestamp - 1.0
self.currentMaster.master_info.check_ts = check_ts
self.currentMaster.perform_master_checks()
if self.currentMaster.master_state is not None:
self._update_handler.requestMasterInfo(self.currentMaster.master_state.uri, self.currentMaster.master_state.monitoruri)
self.currentMaster.force_next_update()
# self.currentMaster.remove_all_def_configs()
def on_run_node_clicked(self):
'''
Open a dialog to run a ROS node without a configuration
'''
from .run_dialog import RunDialog
if self.currentMaster is not None:
dia = RunDialog(get_hostname(self.currentMaster.masteruri), self.currentMaster.masteruri)
if dia.exec_():
params = dia.run_params()
if params:
params = params + (True, False, self.currentMaster.current_user,) # autorequest must be false
try:
self._progress_queue.add2queue(utf8(uuid.uuid4()),
'run `%s` on %s' % (params[2], params[0]),
nm.starter().runNodeWithoutConfig,
params)
self._progress_queue.start()
except (Exception, nm.StartException), e:
rospy.logwarn("Error while run `%s` on %s: %s", params[2], params[0], utf8(e))
MessageBox.warning(self, "Run error",
'Error while run node %s [%s]' % (params[2], params[1]),
utf8(e))
else:
MessageBox.critical(self, "Run error",
'No binary specified')
def on_rqt_plugin_start(self, name, plugin):
if self.currentMaster is not None:
try:
if name == 'Terminal':
host = get_hostname(self.currentMaster.master_state.uri)
nm.starter().open_terminal(host)
return
args = []
package = 'rqt_gui'
binary = 'rqt_gui'
prefix = 'rqt_'
suffix = ''
if name == 'RViz':
prefix = 'rviz_'
package = 'rviz'
binary = 'rviz'
if plugin:
args = ['-s', plugin]
if name == 'rosbag record':
package = 'rosbag'
binary = 'record'
prefix = ''
topic_names = []
current_tab = self.currentMaster.masterTab.tabWidget.tabText(self.currentMaster.masterTab.tabWidget.currentIndex())
if (current_tab == 'Nodes'):
nodes = self.currentMaster.nodesFromIndexes(self.currentMaster.masterTab.nodeTreeView.selectionModel().selectedIndexes())
if nodes:
for n in nodes:
topic_names.extend(n.published)
else:
topics = self.currentMaster.topicsFromIndexes(self.currentMaster.masterTab.topicsView.selectionModel().selectedIndexes())
if topics:
topic_names.extend([t.name for t in topics])
count_topics = 'ALL'
if topic_names:
args = [' '.join(topic_names)]
count_topics = '%d selected' % len(topic_names)
else:
args = ['-a']
ret = MessageBox.question(self, 'Start rosbag', 'Start rosbag record with %s topics to %s/record_TIMESTAMP?' % (count_topics, nm.settings().LOG_PATH), buttons=MessageBox.Yes | MessageBox.No)
if ret == MessageBox.No:
return
args.append("-o %s/record" % nm.settings().LOG_PATH)
suffix = "_%d" % int(time.time())
node_name = '%s%s_%s%s' % (prefix, name.lower().replace(' ', '_'),
self.currentMaster.master_state.name, suffix)
self.currentMaster._progress_queue.add2queue(utf8(uuid.uuid4()),
'start %s' % name,
nm.starter().runNodeWithoutConfig,
('localhost', package, binary,
nm.nameres().normalize_name(node_name), args,
'%s' % self.currentMaster.master_state.uri,
True, False))
except (Exception, nm.StartException), e:
import traceback
print(utf8(traceback.format_exc(1)))
rospy.logwarn("Error while start %s: %s" % (name, utf8(e)))
MessageBox.warning(self, "Start error",
'Error while start %s' % name,
'%s' % utf8(e))
self.currentMaster._progress_queue.start()
def on_sync_dialog_released(self, released=False, masteruri=None, external_call=False):
self.syncButton.setEnabled(False)
master = self.currentMaster
sync_node = None
if masteruri is not None:
master = self.getMaster(masteruri, False)
if master is not None and master.master_info is not None:
sync_node = master.master_info.getNodeEndsWith('master_sync')
if master is not None and (sync_node is None or external_call):
self._sync_dialog.resize(350, 190)
if self._sync_dialog.exec_():
try:
host = get_hostname(master.masteruri)
if self._sync_dialog.interface_filename is not None and not nm.is_local(host):
nmd_uri = nmdurl.nmduri(master.masteruri)
sync_file = nmdurl.join(nmdurl.nmduri(), self._sync_dialog.interface_filename)
# copy the interface file to remote machine
self._progress_queue_sync.add2queue(utf8(uuid.uuid4()),
'Transfer sync interface to %s' % nmd_uri,
nm.starter().transfer_file_nmd,
("%s" % nmd_uri, sync_file, False, master.current_user))
self._progress_queue_sync.add2queue(utf8(uuid.uuid4()),
'Start sync on %s' % host,
nm.starter().runNodeWithoutConfig,
("%s" % host, 'fkie_master_sync', 'master_sync', 'master_sync', self._sync_dialog.sync_args, "%s" % master.masteruri, False, False, master.current_user))
self._progress_queue_sync.start()
except Exception:
import traceback
MessageBox.warning(self, "Start sync error",
"Error while start sync node",
utf8(traceback.format_exc(1)))
else:
self.syncButton.setChecked(False)
elif sync_node is not None:
master.stop_nodes([sync_node])
self.syncButton.setEnabled(True)
def on_sync_start(self, masteruri=None):
'''
Enable or disable the synchronization of the master cores
'''
key_mod = QApplication.keyboardModifiers()
if (key_mod & Qt.ShiftModifier or key_mod & Qt.ControlModifier):
self.on_sync_dialog_released(masteruri=masteruri, external_call=True)
# if not master.master_info is None:
# node = master.master_info.getNodeEndsWith('master_sync')
# self.syncButton.setChecked(not node is None)
else:
self.syncButton.setEnabled(False)
master = self.currentMaster
if masteruri is not None:
master = self.getMaster(masteruri, False)
if master is not None:
# ask the user to start the master_sync with loaded launch file
if master.master_info is not None:
node = master.getNode('/master_sync')
if node and node[0].has_configs():
def_cfg_info = '\nNote: default_cfg parameter will be changed!' if node[0].has_default_cfgs(node[0].cfgs) else ''
ret = MessageBox.question(self, 'Start synchronization', 'Start the synchronization using loaded configuration?\n\n `No` starts the master_sync with default parameter.%s' % def_cfg_info, buttons=MessageBox.Yes | MessageBox.No)
if ret == MessageBox.Yes:
master.start_nodes([node[0]])
return
# start the master sync with default settings
default_sync_args = ["_interface_url:='.'",
'_sync_topics_on_demand:=False',
'_ignore_hosts:=[]', '_sync_hosts:=[]',
'_ignore_nodes:=[]', '_sync_nodes:=[]',
'_ignore_topics:=[]', '_sync_topics:=[]',
'_ignore_services:=[]', '_sync_services:=[]',
'_sync_remote_nodes:=False']
try:
host = get_hostname(master.masteruri)
self._progress_queue_sync.add2queue(utf8(uuid.uuid4()),
'start sync on ' + utf8(host),
nm.starter().runNodeWithoutConfig,
(utf8(host), 'fkie_master_sync', 'master_sync', 'master_sync', default_sync_args, utf8(master.masteruri), False, False, master.current_user))
self._progress_queue_sync.start()
except Exception:
pass
self.syncButton.setEnabled(True)
def on_sync_stop(self, masteruri=None):
master = self.currentMaster
if masteruri is not None:
master = self.getMaster(masteruri, False)
if master is not None and master.master_info is not None:
node = master.master_info.getNodeEndsWith('master_sync')
if node is not None:
master.stop_nodes([node])
def on_master_timecheck(self):
# HACK: sometimes the local monitoring will not be activated. This is the detection.
if len(self.masters) < 2 and self.currentMaster is None:
self._subscribe()
return
# update the info panel of the robot. If the node manager is not selected the updates are rarer.
current_time = time.time()
if self.isActiveWindow() or current_time - self._last_time_view_update > 15:
self._last_time_view_update = current_time
if self.currentMaster is not None and self.currentMaster.master_state is not None:
master = self.getMaster(self.currentMaster.master_state.uri)
name = master.master_state.name
masteruri = master.master_state.uri
if self.restricted_to_one_master:
name = ''.join([name, ' <span style=" color:red;">(restricted)</span>'])
if not self.masternameLabel.toolTip():
self.masternameLabel.setToolTip('The multicore options are disabled, because the roscore is running on remote host!')
if master.master_info is not None:
self.showMasterName(masteruri, name, self.timestampStr(master.master_info.check_ts), master.master_state.online)
pass
elif master.master_state is not None:
text = 'Try to get info!!!'
if not nm.settings().autoupdate:
text = 'Press F5 or click on reload to get info'
self.showMasterName(masteruri, name, text, master.master_state.online)
else:
self.showMasterName('', 'No robot selected', None, False)
if (current_time - self._refresh_time > 30.0):
masteruri = self.getMasteruri()
if masteruri is not None:
master = self.getMaster(masteruri)
if master is not None and master.master_state is not None and nm.settings().autoupdate:
self._update_handler.requestMasterInfo(master.master_state.uri, master.master_state.monitoruri)
self._refresh_time = current_time
def showMasterName(self, masteruri, name, timestamp, online=True):
'''
Update the view of the info frame.
'''
con_err = ''
try:
tries = self._con_tries[masteruri]
if tries > 1:
con_err = '<span style=" color:red;">connection problems (%s tries)! </span>' % utf8(tries)
except Exception:
pass
if self.__current_master_label_name != name:
self.__current_master_label_name = name
show_name = name if nm.settings().show_domain_suffix else subdomain(name)
self.masternameLabel.setText('<span style=" font-size:14pt; font-weight:600; color:black">%s</span>' % show_name)
color = QColor.fromRgb(nm.settings().host_color(self.__current_master_label_name, self._default_color.rgb()))
self._new_color(color)
ts = 'updated: %s' % utf8(timestamp) if timestamp is not None else ''
if not nm.settings().autoupdate:
ts = '%s<span style=" color:orange;"> AU off</span>' % ts
self.masterInfoLabel.setText('<span style=" font-size:8pt; color:black">%s%s</span>' % (con_err, ts))
# load the robot image, if one exists
if self.masternameLabel.isEnabled():
if name in self.__icons:
if self.__icons[name][0] != self.__current_icon:
icon = self.__icons[name][0]
self.__current_icon = icon
self.imageLabel.setPixmap(icon.pixmap(self.nameFrame.size()))
self.imageLabel.setToolTip(''.join(['<html><head></head><body><img src="', self.__icons[name][1], '" alt="', name, '"></body></html>']))
elif self.__icons['default_pc'][0] != self.__current_icon:
icon = self.__icons['default_pc'][0]
self.__current_icon = icon
self.imageLabel.setPixmap(icon.pixmap(self.nameFrame.size()))
self.imageLabel.setToolTip('')
# set sim_time info
master = self.getMaster(masteruri, False)
sim_time_enabled = self.masternameLabel.isEnabled() and master is not None and master.use_sim_time
self.simTimeLabel.setVisible(bool(sim_time_enabled))
launch_server_enabled = self.masternameLabel.isEnabled() and (master is not None) and master.has_launch_server()
self.launchServerLabel.setVisible(launch_server_enabled)
self.masternameLabel.setEnabled(online)
self.masterInfoFrame.setEnabled((timestamp is not None))
# update warning symbol / text
if not self.log_dock.isVisible() and self.log_dock.count_warn():
if self.logButton.text():
self.logButton.setIcon(self.__icons['log_warning'][0])
self.logButton.setText('')
else:
self.logButton.setText('%d' % self.log_dock.count_warn())
self.logButton.setIcon(self.__icons['empty'][0])
def timestampStr(self, timestamp):
dt = datetime.fromtimestamp(timestamp)
diff = time.time() - timestamp
diff_dt = datetime.fromtimestamp(diff)
before = '0 sec'
if (diff < 60):
before = diff_dt.strftime('%S sec')
elif (diff < 3600):
before = diff_dt.strftime('%M:%S min')
elif (diff < 86400):
before = diff_dt.strftime('%H:%M:%S std')
else:
before = diff_dt.strftime('%d Day(s) %H:%M:%S')
return '%s (%s)' % (dt.strftime('%H:%M:%S'), before)
def updateDuplicateNodes(self):
# update the duplicate nodes
running_nodes = dict()
for _, m in self.masters.items():
if m.online and m.master_state is not None and m.master_state.online:
running_nodes.update(m.get_nodes_runningIfLocal())
for _, m in self.masters.items():
if m.master_state is not None:
m.set_duplicate_nodes(running_nodes)
# ======================================================================================================================
# Handling of master list view
# ======================================================================================================================
def on_master_table_pressed(self, selected):
pass
def on_master_table_clicked(self, selected):
'''
On click on the sync item, the master_sync node will be started or stopped,
depending on run state.
'''
pass
# item = self.master_model.itemFromIndex(selected)
# if isinstance(item, MasterSyncItem):
# pass
def on_master_table_activated(self, selected):
item = self.master_model.itemFromIndex(selected)
MessageBox.information(self, item.name, item.toolTip())
def on_master_selection_changed(self, selected):
'''
If a master was selected, set the corresponding Widget of the stacked layout
to the current widget and shows the state of the selected master.
'''
# si = self.masterTableView.selectedIndexes()
# for index in si:
# if index.row() == selected.row():
item = self.master_model.itemFromIndex(selected)
if item is not None:
self._history_selected_robot = item.master.name
self.setCurrentMaster(item.master.uri)
if not nm.nmd().file.get_packages(item.master.uri):
nm.nmd().file.list_packages_threaded(nmdurl.nmduri(item.master.uri))
if self.currentMaster.master_info is not None and not self.restricted_to_one_master:
node = self.currentMaster.master_info.getNodeEndsWith('master_sync')
self.syncButton.setEnabled(True)
self.syncButton.setChecked(node is not None)
else:
self.syncButton.setEnabled(False)
return
self.launch_dock.raise_()
def setCurrentMaster(self, master):
'''
Changes the view of the master.
:param master: the MasterViewProxy object or masteruri
:type master: MasterViewProxy or str
'''
show_user_field = False
if isinstance(master, MasterViewProxy):
self.currentMaster = master
self.stackedLayout.setCurrentWidget(master)
show_user_field = not master.is_local
self._add_user_to_combo(self.currentMaster.current_user)
self.userComboBox.setEditText(self.currentMaster.current_user)
elif master is None:
self.currentMaster = None
self.stackedLayout.setCurrentIndex(0)
else: # it's masteruri
self.currentMaster = self.getMaster(master)
if self.currentMaster is not None:
self.stackedLayout.setCurrentWidget(self.currentMaster)
show_user_field = not self.currentMaster.is_local
self._add_user_to_combo(self.currentMaster.current_user)
self.userComboBox.setEditText(self.currentMaster.current_user)
else:
self.stackedLayout.setCurrentIndex(0)
if self.currentMaster is not None:
self.launch_dock.set_current_master(self.currentMaster.masteruri, self.currentMaster.master_state.name)
self.user_frame.setVisible(show_user_field)
self.on_master_timecheck()
def _add_user_to_combo(self, user):
for i in range(self.userComboBox.count()):
if user.lower() == self.userComboBox.itemText(i).lower():
return
self.userComboBox.addItem(user)
def on_user_changed(self, user):
if self.currentMaster is not None:
self.currentMaster.current_user = user
def on_masterTableView_selection_changed(self, selected, deselected):
'''
On selection of a master list.
'''
if selected.isValid():
self.on_master_selection_changed(selected)
def on_all_master_refresh_clicked(self):
'''
Retrieves from the master_discovery node the list of all discovered ROS
master and get their current state.
'''
# set the timestamp of the current master info back
for _, m in self.masters.items():
if m.master_info is not None:
check_ts = m.master_info.check_ts
m.master_info.timestamp = m.master_info.timestamp - 1.0
m.master_info.check_ts = check_ts
self.masterlist_service.refresh(self.getMasteruri(), False)
def on_discover_network_clicked(self):
try:
self._discover_dialog.raise_()
except Exception:
mcast_group = rospy.get_param('/master_discovery/mcast_group', '226.0.0.0')
self._discover_dialog = NetworkDiscoveryDialog(mcast_group, 11511, 100, self)
self._discover_dialog.network_join_request.connect(self._join_network)
self._discover_dialog.show()
def on_start_robot_clicked(self):
'''
Tries to start the master_discovery node on the machine requested by a dialog.
'''
# get the history list
user_list = [self.userComboBox.itemText(i) for i in reversed(range(self.userComboBox.count()))]
user_list.insert(0, 'last used')
params_optional = {'Discovery type': {':type': 'string', ':value': ['master_discovery', 'zeroconf']},
'ROS Master Name': {':type': 'string', ':value': 'autodetect'},
'ROS Master URI': {':type': 'string', ':value': 'ROS_MASTER_URI'},
'Robot hosts': {':type': 'string', ':value': ''},
'Username': {':type': 'string', ':value': user_list},
'MCast Group': {':type': 'string', ':value': '226.0.0.0'},
'Heartbeat [Hz]': {':type': 'float', ':value': 0.5}
}
params = {'Host': {':type': 'string', ':value': 'localhost'},
'Network(0..99)': {':type': 'int', ':value': '0'},
'Start sync': {':type': 'bool', ':value': nm.settings().start_sync_with_discovery},
'Optional Parameter': params_optional
}
dia = ParameterDialog(params, sidebar_var='Host', store_geometry="start_robot_dialog")
dia.setFilterVisible(False)
dia.setWindowTitle('Start discovery')
dia.setFocusField('Host')
if dia.exec_():
try:
params = dia.getKeywords(only_changed=False)
hostnames = params['Host'] if isinstance(params['Host'], list) else [params['Host']]
port = params['Network(0..99)']
start_sync = params['Start sync']
discovery_type = params['Optional Parameter']['Discovery type']
mastername = 'autodetect'
masteruri = 'ROS_MASTER_URI'
if len(hostnames) < 2:
mastername = params['Optional Parameter']['ROS Master Name']
masteruri = params['Optional Parameter']['ROS Master URI']
robot_hosts = params['Optional Parameter']['Robot hosts']
username = params['Optional Parameter']['Username']
mcast_group = params['Optional Parameter']['MCast Group']
heartbeat_hz = params['Optional Parameter']['Heartbeat [Hz]']
if robot_hosts:
robot_hosts = robot_hosts.replace(' ', ',')
robot_hosts = robot_hosts.replace(',,', ',')
robot_hosts = robot_hosts.replace('[', '')
robot_hosts = robot_hosts.replace(']', '')
for hostname in hostnames:
try:
args = []
if port is not None and port and int(port) < 100 and int(port) >= 0:
args.append('_mcast_port:=%s' % (11511 + int(port)))
else:
args.append('_mcast_port:=%s' % (11511))
if not mastername == 'autodetect':
args.append('_name:=%s' % (mastername))
args.append('_mcast_group:=%s' % mcast_group)
args.append('_robot_hosts:=[%s]' % robot_hosts)
args.append('_heartbeat_hz:=%s' % heartbeat_hz)
# TODO: remove the name parameter from the ROS parameter server
usr = username
if username == 'last used':
usr = nm.settings().host_user(hostname)
else:
nm.settings().set_host_user(hostname, usr)
muri = None if masteruri == 'ROS_MASTER_URI' else utf8(masteruri)
self._progress_queue.add2queue(utf8(uuid.uuid4()),
'start discovering on %s' % hostname,
nm.starter().runNodeWithoutConfig,
(utf8(hostname), 'fkie_master_discovery', utf8(discovery_type), utf8(discovery_type), args, muri, False, False, usr))
# start the master sync with default settings
if start_sync:
if nm.is_local(hostname):
default_sync_args = ["_interface_url:='.'",
'_sync_topics_on_demand:=False',
'_ignore_hosts:=[]', '_sync_hosts:=[]',
'_ignore_nodes:=[]', '_sync_nodes:=[]',
'_ignore_topics:=[]', '_sync_topics:=[]',
'_ignore_services:=[]', '_sync_services:=[]',
'_sync_remote_nodes:=False']
self._progress_queue_sync.add2queue(utf8(uuid.uuid4()),
'start sync on %s' % hostname,
nm.starter().runNodeWithoutConfig,
(utf8(hostname), 'fkie_master_sync', 'master_sync', 'master_sync', default_sync_args, muri, False, False, usr))
self._progress_queue_sync.start()
else:
if hostname not in self._syncs_to_start:
self._syncs_to_start.append(hostname)
except (Exception, nm.StartException) as e:
import traceback
print(traceback.format_exc(1))
rospy.logwarn("Error while start master_discovery for %s: %s" % (utf8(hostname), utf8(e)))
MessageBox.warning(self, "Start error",
'Error while start master_discovery',
utf8(e))
self._progress_queue.start()
except Exception as e:
MessageBox.warning(self, "Start error",
'Error while parse parameter',
utf8(e))
def _join_network(self, network):
try:
hostname = 'localhost'
args = []
if network < 100 and network >= 0:
args.append(''.join(['_mcast_port:=', utf8(11511 + int(network))]))
self._progress_queue.add2queue(utf8(uuid.uuid4()),
'start discovering on ' + utf8(hostname),
nm.starter().runNodeWithoutConfig,
(utf8(hostname), 'fkie_master_discovery', 'master_discovery', 'master_discovery', args, None, False, False))
self._progress_queue.start()
except (Exception, nm.StartException), e:
rospy.logwarn("Error while start master_discovery for %s: %s", utf8(hostname), utf8(e))
MessageBox.warning(self, "Start error",
'Error while start master_discovery',
utf8(e))
def poweroff_host(self, host):
try:
if nm.is_local(utf8(host)):
ret = MessageBox.warning(self, "ROS Node Manager",
"Do you really want to shutdown localhost?",
buttons=MessageBox.Ok | MessageBox.Cancel)
if ret == MessageBox.Cancel:
return
self._progress_queue.add2queue(utf8(uuid.uuid4()),
'poweroff `%s`' % host,
nm.starter().poweroff,
('%s' % host,))
masteruris = nm.nameres().masterurisbyaddr(host)
for masteruri in masteruris:
master = self.getMaster(masteruri)
master.stop_nodes_by_name(['/master_discovery'])
self._progress_queue.start()
self.on_description_update('Description', '')
self.launch_dock.raise_()
except (Exception, nm.StartException), e:
rospy.logwarn("Error while poweroff %s: %s", host, utf8(e))
MessageBox.warning(self, "Run error",
'Error while poweroff %s' % host,
'%s' % utf8(e))
def rosclean(self, masteruri):
try:
host = get_hostname(masteruri)
nuri = nmdurl.nmduri(masteruri)
ret = MessageBox.warning(self, "ROS Node Manager",
"Do you really want delete all logs on `%s`?" % host,
buttons=MessageBox.Ok | MessageBox.Cancel)
if ret == MessageBox.Cancel:
return
self._progress_queue.add2queue(utf8(uuid.uuid4()),
'rosclean `%s`' % nuri,
nm.starter().rosclean,
('%s' % nuri,))
master = self.getMaster(masteruri, create_new=False)
if master is not None:
self._progress_queue.add2queue(utf8(uuid.uuid4()),
'update `%s`' % nuri,
master.perform_nmd_requests)
self._progress_queue.start()
self.launch_dock.raise_()
except (Exception, nm.StartException), e:
rospy.logwarn("Error while rosclean %s: %s", masteruri, utf8(e))
MessageBox.warning(self, "Run error",
'Error while rosclean %s' % masteruri,
'%s' % utf8(e))
# ======================================================================================================================
# Handling of the launch view signals
# ======================================================================================================================
def on_load_launch_file(self, path, args={}, masteruri=None):
'''
Load the launch file. A ROS master must be selected first.
:param str path: the path of the launch file.
'''
master_proxy = None
if masteruri is not None:
master_proxy = self.getMaster(masteruri, False)
if master_proxy is None:
master_proxy = self.stackedLayout.currentWidget()
if isinstance(master_proxy, MasterViewProxy):
try:
master_proxy.launchfiles = (path, args)
except Exception, e:
import traceback
print(utf8(traceback.format_exc(1)))
MessageBox.warning(self, "Loading launch file", path, '%s' % utf8(e))
# self.setCursor(cursor)
else:
MessageBox.information(self, "Load of launch file", "Select a master first!",)
def on_launch_edit(self, grpc_path, search_text='', trynr=1):
'''
Opens the given path in an editor. If file is already open, select the editor.
If search text is given, search for the text in files an goto the line.
:param str grpc_path: path with grpc prefix
:param str search_text: A string to search in file
'''
if grpc_path:
if grpc_path in self.editor_dialogs:
try:
self.editor_dialogs[grpc_path].on_load_request(grpc_path, search_text, only_launch=True)
# self.editor_dialogs[grpc_path].restore()
self.editor_dialogs[grpc_path].activateWindow()
except Exception:
if trynr > 1:
raise
import traceback
print(traceback.format_exc())
del self.editor_dialogs[grpc_path]
self.on_launch_edit(grpc_path, search_text, 2)
else:
editor = Editor([grpc_path], search_text)
if editor.tabWidget.count() > 0:
self.editor_dialogs[grpc_path] = editor
editor.finished_signal.connect(self._editor_dialog_closed)
editor.show()
def _editor_dialog_closed(self, files):
'''
If a editor dialog is closed, remove it from the list...
'''
if files[0] in self.editor_dialogs:
del self.editor_dialogs[files[0]]
def on_launch_transfer(self, files):
'''
Copies the selected file to a remote host
:param file: A list with paths
:type file: [str]
'''
# use node manager daemon
if files:
nmd_url = nmdurl.nmduri()
if self.currentMaster is not None:
nmd_url = get_hostname(self.currentMaster.masteruri)
params = {'master': {':type': 'string', ':value': self.currentMaster.masteruri},
'recursive': {':type': 'bool', ':value': False}
}
dia = ParameterDialog(params, store_geometry="launch_transfer_dialog")
dia.setFilterVisible(False)
dia.setWindowTitle('Transfer file')
dia.setFocusField('master')
if dia.exec_():
try:
params = dia.getKeywords()
nmd_url = params['master']
recursive = params['recursive']
for path in files:
nmd_url = nmdurl.nmduri(nmd_url)
rospy.loginfo("TRANSFER to %s: %s" % (nmd_url, path))
self.launch_dock.progress_queue.add2queue('%s' % uuid.uuid4(),
'transfer files to %s' % nmd_url,
nm.starter().transfer_file_nmd,
('%s' % nmd_url, path, False))
if recursive:
self.launch_dock.progress_queue.add2queue('%s' % uuid.uuid4(),
"transfer recursive '%s' to %s" % (path, nmd_url),
self._recursive_transfer,
(path, nmd_url))
self.launch_dock.progress_queue.start()
except Exception, e:
MessageBox.warning(self, "Transfer error",
'Error while transfer files', '%s' % utf8(e))
def _recursive_transfer(self, path, nmd_url):
includes = nm.nmd().launch.get_included_files_set(path, True, search_in_ext=nm.settings().SEARCH_IN_EXT)
copy_set = set()
for inc_file in includes:
copy_set.add(inc_file)
for cppath in copy_set:
self.launch_dock.progress_queue.add2queue(utf8(uuid.uuid4()),
'transfer file %s to %s' % (cppath, nmd_url),
nm.starter().transfer_file_nmd,
('%s' % nmd_url, cppath))
self.launch_dock.progress_queue.start()
def _reload_globals_at_next_start(self, launch_file):
if self.currentMaster is not None:
self.currentMaster.reload_global_parameter_at_next_start(launch_file)
# ======================================================================================================================
# Change file detection
# ======================================================================================================================
def changeEvent(self, event):
'''
'''
if self.isActiveWindow() and self.isActiveWindow() != self._last_window_state:
if hasattr(self, 'currentMaster') and self.currentMaster is not None:
# perform delayed checks for changed files or multiple screens
QTimer.singleShot(250, self.currentMaster.perform_master_checks)
self._last_window_state = self.isActiveWindow()
QMainWindow.changeEvent(self, event)
def enterEvent(self, event):
'''
Check for changed files, if the main gui was entered.
'''
QMainWindow.enterEvent(self, event)
# ======================================================================================================================
# Capabilities handling
# ======================================================================================================================
def on_start_nodes(self, masteruri, cfg, nodes):
if masteruri is not None:
master = self.getMaster(masteruri)
master.start_nodes_by_name(nodes, cfg)
def on_stop_nodes(self, masteruri, nodes):
if masteruri is not None:
master = self.getMaster(masteruri)
master.stop_nodes_by_name(nodes)
def on_description_update(self, title, text, force=False):
# ignore updates if we are currently browse in text dialog
if self._description_accept:
if self._description_accept != title:
if not force:
return
elif not title.endswith(' diagnostic'): # add 'back'-link if title ends with ' diagnostic'
self._description_accept = ''
wtitle = self.descriptionDock.windowTitle().replace('&', '')
same_title = wtitle == title
valid_sender = self.sender() == self.currentMaster or not isinstance(self.sender(), MasterViewProxy)
no_focus = not self.descriptionTextEdit.hasFocus()
if (valid_sender) and (same_title or no_focus or self._accept_next_update):
self._accept_next_update = False
# _description_accept is set to True on click on link of {node, topic, service}
if not same_title:
if self._description_accept:
self._description_history.append((wtitle, self.descriptionTextEdit.toHtml()))
else:
del self._description_history[:]
# prepend 'back' link the text
if self._description_history:
if len(self._description_history) > 15:
self._description_history.pop(0)
# text = '<a href="back://" title="back"><img src=":icons/back.png" alt="back"></a><br>%s' % text
text = '<a href="back://" title="back">back</a>%s' % text
self.descriptionDock.setWindowTitle(title)
vbar = self.descriptionTextEdit.verticalScrollBar()
stored_vpos = vbar.value()
self.descriptionTextEdit.setText(text)
vbar.setValue(stored_vpos)
if text and force:
self.descriptionDock.raise_()
def on_description_update_cap(self, title, text):
self.descriptionDock.setWindowTitle(title)
self.descriptionTextEdit.setText(text)
def on_description_anchorClicked(self, url):
self._description_accept = self.descriptionDock.windowTitle().replace('&', '')
self._accept_next_update = True
if url.toString().startswith('open-sync-dialog://'):
self.on_sync_dialog_released(False, url.toString().replace('open-sync-dialog', 'http'), True)
elif url.toString().startswith('show-all-screens://'):
master = self.getMaster(url.toString().replace('show-all-screens', 'http'), False)
if master is not None:
master.on_show_all_screens()
elif url.toString().startswith('remove-all-launch-server://'):
master = self.getMaster(url.toString().replace('remove-all-launch-server', 'http'), False)
if master is not None:
master.on_remove_all_launch_server()
elif url.toString().startswith('node://'):
if self.currentMaster is not None:
self._description_accept = self._url_path(url)
self.currentMaster.on_node_selection_changed(None, None, True, self._description_accept)
elif url.toString().startswith('topic://'):
if self.currentMaster is not None:
self._description_accept = self._url_path(url)
self.currentMaster.on_topic_selection_changed(None, None, True, self._description_accept)
elif url.toString().startswith('topicecho://'):
if self.currentMaster is not None:
self.currentMaster.show_topic_output(self._url_path(url), False)
elif url.toString().startswith('topichz://'):
if self.currentMaster is not None:
self.currentMaster.show_topic_output(self._url_path(url), True)
elif url.toString().startswith('topichzssh://'):
if self.currentMaster is not None:
self.currentMaster.show_topic_output(self._url_path(url), True, use_ssh=True)
elif url.toString().startswith('topicpub://'):
if self.currentMaster is not None:
self.currentMaster.start_publisher(self._url_path(url))
elif url.toString().startswith('topicrepub://'):
if self.currentMaster is not None:
self.currentMaster.start_publisher(self._url_path(url), True)
elif url.toString().startswith('topicstop://'):
if self.currentMaster is not None:
self.currentMaster.on_topic_pub_stop_clicked(self._url_path(url))
elif url.toString().startswith('service://'):
if self.currentMaster is not None:
self._description_accept = self._url_path(url)
self.currentMaster.on_service_selection_changed(None, None, True, self._description_accept)
elif url.toString().startswith('servicecall://'):
if self.currentMaster is not None:
self.currentMaster.service_call(self._url_path(url))
elif url.toString().startswith('unregister-node://'):
if self.currentMaster is not None:
self.currentMaster.on_unregister_nodes()
elif url.toString().startswith('start-node://'):
if self.currentMaster is not None:
self.currentMaster.on_start_clicked()
elif url.toString().startswith('restart-node://'):
if self.currentMaster is not None:
self.currentMaster.on_force_start_nodes()
elif url.toString().startswith('restart-node-g://'):
if self.currentMaster is not None:
self.currentMaster.on_force_start_nodes(True)
elif url.toString().startswith('start-node-at-host://'):
if self.currentMaster is not None:
self.currentMaster.on_start_nodes_at_host()
elif url.toString().startswith('start-node-adv://'):
if self.currentMaster is not None:
self.currentMaster.on_start_alt_clicked()
elif url.toString().startswith('kill-node://'):
if self.currentMaster is not None:
self.currentMaster.on_kill_nodes()
elif url.toString().startswith('kill-pid://pid'):
if self.currentMaster is not None:
self.currentMaster.on_kill_pid(int(url.toString().replace('kill-pid://pid', '')))
elif url.toString().startswith('kill-screen://'):
if self.currentMaster is not None:
self.currentMaster.on_kill_screens()
elif url.toString().startswith('copy-log-path://'):
if self.currentMaster is not None:
self.currentMaster.on_log_path_copy()
elif url.toString().startswith('launch://'):
self.on_launch_edit(self._url_path(url), '')
elif url.toString().startswith('reload-globals://'):
self._reload_globals_at_next_start(url.toString().replace('reload-globals://', 'grpc://'))
elif url.toString().startswith('poweroff://'):
self.poweroff_host(self._url_host(url))
elif url.toString().startswith('rosclean://'):
self.rosclean(url.toString().replace('rosclean', 'http'))
elif url.toString().startswith('sysmon-switch://'):
self.sysmon_active_update(url.toString().replace('sysmon-switch', 'http'))
elif url.toString().startswith('nmd-cfg://'):
self.nmd_cfg(url.toString().replace('nmd-cfg', 'http'))
elif url.toString().startswith('nm-cfg://'):
self._on_settings_button_clicked()
elif url.toString().startswith('show-all-diagnostics://'):
if self.currentMaster is not None:
self.currentMaster.show_diagnostic_messages(self._url_path(url))
elif url.toString().startswith('back://'):
if self._description_history:
# show last discription on click on back
title, text = self._description_history[-1]
self._description_accept = title
del self._description_history[-1]
self.descriptionDock.setWindowTitle(title)
self.descriptionTextEdit.setText(text)
else:
try:
from python_qt_binding.QtGui import QDesktopServices
QDesktopServices.openUrl(url)
except Exception as err:
rospy.logwarn("can't open url %s: %s" % (url, err))
self._accept_next_update = False
def _url_path(self, url):
'''Helper class for Qt5 compatibility'''
if hasattr(url, 'encodedPath'):
return utf8(url.encodedPath())
else:
return utf8(url.path())
def _url_host(self, url):
'''Helper class for Qt5 compatibility'''
if hasattr(url, 'encodedHost'):
return utf8(url.encodedHost())
else:
return utf8(url.host())
def _restart_nodes(self):
if self.currentMaster is not None:
self.currentMaster.on_force_start_nodes()
def _restart_nodes_g(self):
if self.currentMaster is not None:
self.currentMaster.on_force_start_nodes(True)
def keyPressEvent(self, event):
'''
'''
QMainWindow.keyPressEvent(self, event)
if event == QKeySequence.Find:
focus_widget = QApplication.focusWidget()
if not isinstance(focus_widget, EnhancedLineEdit):
# set focus to filter line
if self.currentMaster is not None:
self.currentMaster.focus_filter_line()
def _show_section_menu(self, event=None):
# self._timer_alt = None
if self._select_index == 0:
if self.currentMaster is not None:
if self.currentMaster._is_current_tab_name('tabNodes'):
self.currentMaster.masterTab.nodeTreeView.setFocus(Qt.TabFocusReason)
elif self.currentMaster._is_current_tab_name('tabTopics'):
self.currentMaster.masterTab.topicsView.setFocus(Qt.TabFocusReason)
elif self.currentMaster._is_current_tab_name('tabServices'):
self.currentMaster.masterTab.servicesView.setFocus(Qt.TabFocusReason)
elif self.currentMaster._is_current_tab_name('tabParameter'):
self.currentMaster.masterTab.parameterView.setFocus(Qt.TabFocusReason)
elif self._select_index == 1:
self.launch_dock.raise_()
self.launch_dock.ui_file_view.setFocus(Qt.TabFocusReason)
elif self._select_index == 2:
self.descriptionDock.raise_()
self.descriptionTextEdit.setFocus(Qt.TabFocusReason)
elif self._select_index == 3:
self.startRobotButton.setFocus(Qt.TabFocusReason)
elif self._select_index == 4:
self.hideDocksButton.setFocus(Qt.TabFocusReason)
else:
self._select_index = -1
self._select_index += 1
def keyReleaseEvent(self, event):
'''
Defines some of shortcuts for navigation/management in launch
list view or topics view.
'''
key_mod = QApplication.keyboardModifiers()
if self.currentMaster is not None and self.currentMaster.masterTab.nodeTreeView.hasFocus():
if event.key() == Qt.Key_F4 and not key_mod:
if self.currentMaster.masterTab.editConfigButton.isEnabled():
self.currentMaster.on_edit_config_clicked()
elif self.currentMaster.masterTab.editRosParamButton.isEnabled():
self.currentMaster.on_edit_rosparam_clicked()
elif event.key() == Qt.Key_F3 and not key_mod and self.currentMaster.masterTab.ioButton.isEnabled():
self.currentMaster.on_io_clicked()
QMainWindow.keyReleaseEvent(self, event)
def image_mouseDoubleClickEvent(self, event):
'''
Set the robot image
'''
if self.currentMaster:
try:
if not os.path.isdir(nm.settings().robots_path):
os.makedirs(nm.settings().robots_path)
(fileName, _) = QFileDialog.getOpenFileName(self,
"Set robot image",
nm.settings().robots_path,
"Image files (*.bmp *.gif *.jpg *.jpeg *.png *.pbm *.xbm);;All files (*)")
if fileName and self.__current_master_label_name:
p = QPixmap(fileName)
p.save(nm.settings().robot_image_file(self.__current_master_label_name))
if self.__current_master_label_name in self.__icons:
del self.__icons[self.__current_master_label_name]
self._assigne_icon(self.__current_master_label_name)
except Exception as e:
MessageBox.warning(self, "Error",
'Set robot image for %s failed!' % utf8(self.__current_master_label_name),
'%s' % utf8(e))
rospy.logwarn("Error while set robot image for %s: %s", utf8(self.__current_master_label_name), utf8(e))
def _set_custom_colors(self):
colors = [self._default_color, QColor(87, 93, 94), QColor(60, 116, 96)]
# QT4 compatibility hack (expected type by QT4 is QRgb, Qt5 is QColor)
if QT_BINDING_VERSION.startswith("4"):
colors = [c.rgb() for c in colors]
QColorDialog.setStandardColor(0, colors[0])
QColorDialog.setStandardColor(1, colors[1])
QColorDialog.setStandardColor(2, colors[2])
def _new_color(self, color):
bg_style = "QWidget#expert_tab { background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 %s, stop: 0.7 %s);}" % (color.name(), self._default_color.name())
self.expert_tab.setStyleSheet("%s" % (bg_style))
def mastername_mouseDoubleClickEvent(self, event):
'''
Set the robot color
'''
if self.currentMaster:
try:
prev_color = QColor.fromRgb(nm.settings().host_color(self.__current_master_label_name, self._default_color.rgb()))
cdiag = QColorDialog(prev_color)
cdiag.currentColorChanged.connect(self._new_color)
if cdiag.exec_():
nm.settings().set_host_color(self.__current_master_label_name, cdiag.selectedColor().rgb())
else:
self._new_color(prev_color)
except Exception as e:
MessageBox.warning(self, "Error",
'Set robot color for %s failed!' % utf8(self.__current_master_label_name),
'%s' % utf8(e))
rospy.logwarn("Error while set robot color for %s: %s", utf8(self.__current_master_label_name), utf8(e))
def _on_robot_icon_changed(self, masteruri, path):
'''
One of the robot icons was changed. Update the icon.
'''
master = self.getMaster(masteruri, False)
if master:
self._assigne_icon(master.mastername, resolve_url(path))
def _callback_system_diagnostics(self, data, grpc_url=''):
try:
muri = nmdurl.masteruri(grpc_url)
master = self.getMaster(muri, create_new=False)
if master:
master.update_system_diagnostics(data)
self.master_model.update_master_diagnostic(nm.nameres().mastername(muri), data)
except Exception as err:
rospy.logwarn('Error while process diagnostic messages: %s' % utf8(err))
def _callback_diagnostics(self, data, grpc_url=''):
try:
for diagnostic in data.status:
self.diagnostics_signal.emit(diagnostic)
except Exception as err:
rospy.logwarn('Error while process diagnostic messages: %s' % utf8(err))
def sysmon_active_update(self, masteruri):
master = self.getMaster(masteruri, create_new=False)
if master is not None:
master.sysmon_active_update()
def nmd_cfg(self, masteruri):
nmd_uri = nmdurl.nmduri(masteruri)
nm.nmd().settings.get_config_threaded(nmd_uri)
def _nmd_yaml_cfg(self, data, nmdurl):
params = {}
try:
params = ruamel.yaml.load(data, Loader=ruamel.yaml.Loader)
except Exception as err:
rospy.logwarn("Error while parse daemon configuration: %s" % utf8(err))
dia = ParameterDialog(params, store_geometry="nmd_cfg_dialog")
dia.setWindowTitle('Daemon Configuration')
dia.setFocusField('load_warn_level')
if dia.exec_():
try:
params = dia.getKeywords(with_tags=True)
buf = ruamel.yaml.compat.StringIO()
ruamel.yaml.dump(params, buf, Dumper=ruamel.yaml.RoundTripDumper)
self._progress_queue.add2queue(utf8(uuid.uuid4()),
'%s: set configuration for daemon' % nmdurl,
nm.nmd().settings.set_config,
(nmdurl, buf.getvalue()))
self._progress_queue.add2queue(utf8(uuid.uuid4()),
'%s: get system diagnostics' % nmdurl,
nm.nmd().monitor.get_system_diagnostics_threaded,
(nmdurl,))
self._progress_queue.start()
except Exception as err:
import traceback
print(traceback.format_exc())
MessageBox.warning(self, "Daemon configuration error",
'Error while parse parameter',
'%s' % utf8(err))
def on_nmd_err(self, method, url, path, error):
'''
Handles the error messages from node_manager_daemon.
:param str method: name of the method caused this error.
:param str url: the URI of the node manager daemon.
:param Exception error: on occurred exception.
'''
muri = nmdurl.masteruri(url)
master = self.getMaster(muri, False)
if master is not None and not master._has_nmd:
# no daemon for this master available, ignore errors
return
reason = method
if method == '_get_nodes':
reason = 'get launch configuration'
rospy.logwarn("Error while %s from %s: %s" % (reason, url, utf8(error)))
if hasattr(error, 'code'):
if error.code() == grpc.StatusCode.UNIMPLEMENTED:
muri = nmdurl.masteruri(url)
master = self.getMaster(muri, create_new=False)
if master:
self.master_model.add_master_error(nm.nameres().mastername(muri), 'node_manager_daemon has unimplemented methods! Please update!')
master.set_diagnostic_warn('/node_manager_daemon', 'unimplemented methods detected! Please update!')
# ======================================================================================================================
# Help site handling
# ======================================================================================================================
def _on_help_go_back(self):
self._on_help_link_clicked(QUrl(''), history_idx=-1)
def _on_help_go_home(self):
self._on_help_link_clicked(self._help_root_url)
def _on_help_go_forward(self):
self._on_help_link_clicked(QUrl(''), history_idx=1)
def _on_help_link_clicked(self, link, history_idx=0):
if link.isEmpty():
# read from history if given link is empty
try:
link = self._help_history[self._help_history_idx + history_idx]
self._help_history_idx += history_idx
except Exception:
pass
if not link.isEmpty():
if history_idx == 0:
# it was not a history request -> add link to history
current_link = self.ui_help_web_view.url()
if current_link != link:
# if we navigate in the history previously remove forward items
if len(self._help_history) - 1 > self._help_history_idx:
self._help_history = self._help_history[:self._help_history_idx + 1]
self._help_history_idx += 1
self._help_history.append(link)
if link.scheme() == 'file':
try:
fpath = link.toLocalFile()
if fpath.endswith('.rst'):
# render .rst files
with file(fpath) as f:
self.ui_help_web_view.setHtml(examples.html_body(utf8(f.read())), link)
else:
self.ui_help_web_view.setUrl(link)
except Exception:
import traceback
msg = "Error while generate help: %s" % traceback.format_exc(2)
rospy.logwarn(msg)
else:
QDesktopServices.openUrl(link)
# update navigation buttons
self.ui_help_back.setEnabled(self._help_history_idx > 0)
self.ui_help_forward.setEnabled(self._help_history_idx < len(self._help_history) - 1)
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import inspect
import numpy as np
import collections
import paddle
from paddle.fluid import core
from paddle.fluid.dygraph import layers
from paddle.fluid.layers.utils import flatten
from paddle.fluid.layers.utils import pack_sequence_as
from paddle.fluid.dygraph.base import switch_to_static_graph
from paddle.fluid.dygraph.dygraph_to_static import logging_utils
from paddle.fluid.dygraph.dygraph_to_static.utils import parse_arg_and_kwargs
from paddle.fluid.dygraph.dygraph_to_static.utils import parse_varargs_name
from paddle.fluid.dygraph.dygraph_to_static.utils import type_name
from paddle.fluid.dygraph.dygraph_to_static.utils import func_to_source_code
from paddle.fluid.dygraph.io import TranslatedLayer
class FunctionSpec(object):
"""
Wrapper class for a function for class method.
"""
def __init__(self, function, input_spec=None):
self._dygraph_function = function
if input_spec is None:
self._input_spec = None
self._flat_input_spec = None
else:
self._input_spec = self._verify_input_spec(input_spec)
self._flat_input_spec = flatten(self._input_spec)
# parse full argument names list.
self._arg_names, self._default_kwargs = parse_arg_and_kwargs(function)
# parse *args
self.varargs_name = parse_varargs_name(function)
if self.varargs_name is not None and isinstance(function.__self__,
TranslatedLayer):
self._arg_names += function.__self__._input_args_names
def unified_args_and_kwargs(self, args, kwargs):
"""
Moves kwargs with default value into arguments list to keep `args` contain the same length
value as function definition.
For example:
Given function definition: `def foo(x, a=1, b=2)`,
when calling it by `foo(23)`, the args is `[23]`, kwargs is `{a=1, b=2}`.
In this function, it will return args with `[23, 1, 2]`, kwargs with `{}`
Args:
args(tuple): tuple of input arguments value of decorated function.
kwargs(dict): dict of input keyword arguments value of decorated function.
Return:
New arguments tuple containing default kwargs value.
"""
if len(self._arg_names) < len(args):
error_msg = "The decorated function `{}` requires {} arguments: {}, but received {} with {}.".format(
self._dygraph_function.__name__,
len(self._arg_names), self._arg_names, len(args), args)
if args and inspect.isclass(args[0]):
error_msg += "\n\tMaybe the function has more than one decorator, we don't support this for now."
raise NotImplementedError(error_msg)
else:
raise ValueError(error_msg)
args = list(args)
for i in six.moves.range(len(args), len(self._arg_names)):
arg_name = self._arg_names[i]
if arg_name in kwargs:
args.append(kwargs[arg_name])
del kwargs[arg_name]
else:
if arg_name not in self._default_kwargs:
raise ValueError(
"`{}()` requires `{}` arguments, but not found in input `args`: {} and `kwargs`: {}.".
format(self._dygraph_function.__name__, arg_name, args,
kwargs))
args.append(self._default_kwargs[arg_name])
return tuple(args), kwargs
def _replace_value_with_input_spec(self, args):
args_with_spec = []
for idx, input_var in enumerate(flatten(args)):
if isinstance(input_var, np.ndarray):
input_var = paddle.static.InputSpec.from_numpy(input_var)
elif isinstance(input_var, core.VarBase):
input_var = paddle.static.InputSpec.from_tensor(input_var)
args_with_spec.append(input_var)
args_with_spec = pack_sequence_as(args, args_with_spec)
return args_with_spec
def args_to_input_spec(self, args, kwargs):
"""
Converts input arguments into InputSpec.
1. If specific input_spec, use them to construct feed layers.
2. If input_spec is None, consider all Tensor and Numpy.ndarray as feed layers
Args:
args(tuple): tuple of input arguments value of function containing default kwargs value.
kwargs(dict): kwargs arguments received by **kwargs.
Return:
Same nest structure with args and kwargs by replacing value with InputSpec.
"""
args_with_spec = []
kwargs_with_spec = []
if self._input_spec is not None:
# Note: Because the value type and length of `kwargs` is uncertain.
# So we don't support to deal this case while specificing `input_spec` currently.
if kwargs:
raise ValueError(
"{} got unexpected keyword arguments: {}. Cannot trace the function when `input_spec` is specificed.".
format(self._dygraph_function.__name__, kwargs))
# Note: The length of `input_spec` can be greater than `args`,
# because `args` may contains non-tensor value merged form `kwargs`
# after `unified_args_and_kwargs`.
if len(args) < len(self._input_spec):
raise ValueError(
"Requires len(arguments) >= len(input_spec), but received len(args):{} < len(InputSpec): {}".
format(len(args), len(self._input_spec)))
# replace argument with corresponding InputSpec.
args_with_spec = convert_to_input_spec(args, self._input_spec)
else:
args_with_spec = self._replace_value_with_input_spec(args)
kwargs_with_spec = self._replace_value_with_input_spec(kwargs)
# If without specificing name in input_spec, add default name
# according to argument name from decorated function.
args_with_spec = replace_spec_empty_name(self._arg_names,
args_with_spec)
return args_with_spec, kwargs_with_spec
@switch_to_static_graph
def to_static_inputs_with_spec(self, input_with_spec, main_program):
"""
Constructs feed layer by inputs with InputSpec information for main program.
Args:
input_with_spec(tuple): input arguments by replacing argument with InputSpec.
main_program(Program): main program for inserting feed layer.
"""
flat_input_spec = flatten(input_with_spec)
inputs = []
block = main_program.global_block()
for i, var_spec in enumerate(flat_input_spec):
if isinstance(var_spec, paddle.static.InputSpec):
feed_layer = block.create_var(
# TODO(Aurelius84): consider a more elegant way to name this
name=var_spec.name or "feed_%s" % i,
shape=var_spec.shape,
dtype=var_spec.dtype,
is_data=True,
need_check_feed=False)
else:
feed_layer = var_spec
inputs.append(feed_layer)
return pack_sequence_as(input_with_spec, inputs)
def _verify_input_spec(self, input_spec):
"""
Verifies the `input_spec` and its element type is valid.
"""
if not isinstance(input_spec, (tuple, list)):
raise TypeError(
"The type(input_spec) should be one of (tuple, list), but received {}.".
format(type_name(input_spec)))
return tuple(input_spec)
def __repr__(self):
return "function: {}({}), input_spec: {}".format(
self._dygraph_function.__name__, ','.join(self._arg_names),
self._input_spec)
@property
def dygraph_function(self):
return self._dygraph_function
@property
def args_name(self):
return self._arg_names
@property
def input_spec(self):
return self._input_spec
@property
def flat_input_spec(self):
return self._flat_input_spec
@property
def code(self):
return func_to_source_code(self._dygraph_function)
def get_parameters(layer_instance, include_sublayer=True):
"""
Returns parameters of decorated layers. If set `include_sublayer` True,
the parameters created in sub layers will be added.
"""
params = collections.OrderedDict()
if layer_instance is not None:
if isinstance(layer_instance, layers.Layer):
if include_sublayer:
params = layer_instance.parameters()
names = [p.name for p in params]
params = collections.OrderedDict(zip(names, params))
else:
params = layer_instance._parameters
else:
raise TypeError(
"Type of `layer_instance` should be nn.Layer, but received {}".
format(type_name(layer_instance)))
return params
def get_buffers(layer_instance, include_sublayer=True):
"""
Returns Variable buffers of decorated layers. If set `include_sublayer` True,
the Variable buffers created in sub layers will be added.
"""
buffers = collections.OrderedDict()
if layer_instance is not None:
if isinstance(layer_instance, layers.Layer):
if include_sublayer:
buffers = layer_instance.buffers()
names = [buffer.name for buffer in buffers]
buffers = collections.OrderedDict(zip(names, buffers))
else:
buffers = layer_instance._buffers
else:
raise TypeError(
"Type of `layer_instance` should be nn.Layer, but received {}".
format(type_name(layer_instance)))
return buffers
def convert_to_input_spec(inputs, input_spec):
"""
Replaces tensor in structured `inputs` by InputSpec in `input_spec`.
Args:
inputs(list|dict): nested structure list or dict.
input_spec(list|dict): same nested structure list or dict as inputs.
Return:
Same structure with inputs by replacing the element with specified InputSpec.
"""
def check_type_and_len(input, spec, check_length=False):
if type(input) is not type(spec):
raise TypeError('type(input) should be {}, but received {}.'.format(
type(spec), type(input)))
if check_length and len(input) < len(spec):
raise ValueError(
'Requires len(inputs) >= len(input_spec), but received len(inputs):{} < len(input_spec):{}'.
format(len(inputs), len(input_spec)))
if isinstance(input_spec, (tuple, list)):
input_with_spec = []
check_type_and_len(inputs, input_spec, True)
for i, spec in enumerate(input_spec):
out_spec = convert_to_input_spec(inputs[i], spec)
input_with_spec.append(out_spec)
# Note: If the rest inputs contain tensor or numpy.ndarray
# without specific InputSpec, raise warning.
if len(inputs) > len(input_spec):
for rest_input in inputs[len(input_spec):]:
if isinstance(rest_input, (core.VarBase, np.ndarray)):
logging_utils.warn(
"The inputs constain `{}` without specificing InputSpec, its shape and dtype will be treated immutable. "
"Please specific InputSpec information in `@declarative` if you expect them as mutable inputs.".
format(type_name(rest_input)))
input_with_spec.extend(inputs[len(input_spec):])
return input_with_spec
elif isinstance(input_spec, dict):
input_with_spec = {}
check_type_and_len(inputs, input_spec, True)
for name, input in six.iteritems(inputs):
if name in input_spec:
input_with_spec[name] = convert_to_input_spec(input,
input_spec[name])
else:
input_with_spec[name] = input
return input_with_spec
elif isinstance(input_spec, paddle.static.InputSpec):
return input_spec
else:
# NOTE(Aurelius84): Support non-Tensor type as input spec info
return input_spec
def replace_spec_empty_name(args_name, input_with_spec):
"""
Adds default name according to argument name from decorated function
if without specificing InputSpec.name
The naming rule are as followed:
1. If InputSpec.name is not None, do nothing.
2. If each argument `x` corresponds to an InputSpec, using the argument name like `x`
3. If the arguments `inputs` corresponds to a list(InputSpec), using name like `inputs_0`, `inputs_1`
4. If the arguments `input_dic` corresponds to a dict(InputSpec), using key as name.
For example:
# case 1: foo(x, y)
foo = to_static(foo, input_spec=[InputSpec([None, 10]), InputSpec([None])])
print([in_var.name for in_var in foo.inputs]) # [x, y]
# case 2: foo(inputs) where inputs is a list
foo = to_static(foo, input_spec=[[InputSpec([None, 10]), InputSpec([None])]])
print([in_var.name for in_var in foo.inputs]) # [inputs_0, inputs_1]
# case 3: foo(inputs) where inputs is a dict
foo = to_static(foo, input_spec=[{'x': InputSpec([None, 10]), 'y': InputSpec([None])}])
print([in_var.name for in_var in foo.inputs]) # [x, y]
"""
input_with_spec = list(input_with_spec)
candidate_arg_names = args_name[:len(input_with_spec)]
for i, arg_name in enumerate(candidate_arg_names):
input_spec = input_with_spec[i]
input_with_spec[i] = _replace_spec_name(arg_name, input_spec)
return input_with_spec
def _replace_spec_name(name, input_spec):
"""
Replaces InputSpec.name with given `name` while not specificing it.
"""
if isinstance(input_spec, paddle.static.InputSpec):
if input_spec.name is None:
input_spec.name = name
return input_spec
elif isinstance(input_spec, (list, tuple)):
processed_specs = []
for i, spec in enumerate(input_spec):
new_name = "{}_{}".format(name, i)
processed_specs.append(_replace_spec_name(new_name, spec))
return processed_specs
elif isinstance(input_spec, dict):
processed_specs = {}
for key, spec in six.iteritems(input_spec):
processed_specs[key] = _replace_spec_name(key, spec)
return processed_specs
else:
return input_spec
|
import rdkit
import rdkit.Chem as Chem
import copy
from fast_jtnn.chemutils import get_clique_mol, tree_decomp, get_mol, get_smiles, set_atommap, enum_assemble, decode_stereo
def get_slots(smiles):
mol = Chem.MolFromSmiles(smiles)
return [(atom.GetSymbol(), atom.GetFormalCharge(), atom.GetTotalNumHs()) for atom in mol.GetAtoms()]
class Vocab(object):
def __init__(self, smiles_list):
self.vocab = smiles_list
self.vmap = {x:i for i,x in enumerate(self.vocab)}
self.slots = [get_slots(smiles) for smiles in self.vocab]
def get_index(self, smiles):
return self.vmap[smiles]
def get_smiles(self, idx):
return self.vocab[idx]
def get_slots(self, idx):
return copy.deepcopy(self.slots[idx])
def size(self):
return len(self.vocab)
class MolTreeNode(object):
def __init__(self, smiles, clique=[]):
self.smiles = smiles
self.mol = get_mol(self.smiles)
self.clique = [x for x in clique] #copy
self.neighbors = []
def add_neighbor(self, nei_node):
self.neighbors.append(nei_node)
def recover(self, original_mol):
clique = []
clique.extend(self.clique)
if not self.is_leaf:
for cidx in self.clique:
original_mol.GetAtomWithIdx(cidx).SetAtomMapNum(self.nid)
for nei_node in self.neighbors:
clique.extend(nei_node.clique)
if nei_node.is_leaf: #Leaf node, no need to mark
continue
for cidx in nei_node.clique:
#allow singleton node override the atom mapping
if cidx not in self.clique or len(nei_node.clique) == 1:
atom = original_mol.GetAtomWithIdx(cidx)
atom.SetAtomMapNum(nei_node.nid)
clique = list(set(clique))
label_mol = get_clique_mol(original_mol, clique)
self.label = Chem.MolToSmiles(Chem.MolFromSmiles(get_smiles(label_mol)))
for cidx in clique:
original_mol.GetAtomWithIdx(cidx).SetAtomMapNum(0)
return self.label
def assemble(self):
neighbors = [nei for nei in self.neighbors if nei.mol.GetNumAtoms() > 1]
neighbors = sorted(neighbors, key=lambda x:x.mol.GetNumAtoms(), reverse=True)
singletons = [nei for nei in self.neighbors if nei.mol.GetNumAtoms() == 1]
neighbors = singletons + neighbors
cands = enum_assemble(self, neighbors)
if len(cands) > 0:
self.cands, _ = zip(*cands)
self.cands = list(self.cands)
else:
self.cands = []
class MolTree(object):
def __init__(self, smiles):
self.smiles = smiles
self.mol = get_mol(smiles)
#Stereo Generation (currently disabled)
#mol = Chem.MolFromSmiles(smiles)
#self.smiles3D = Chem.MolToSmiles(mol, isomericSmiles=True)
#self.smiles2D = Chem.MolToSmiles(mol)
#self.stereo_cands = decode_stereo(self.smiles2D)
cliques, edges = tree_decomp(self.mol)
self.nodes = []
root = 0
for i,c in enumerate(cliques):
cmol = get_clique_mol(self.mol, c)
node = MolTreeNode(get_smiles(cmol), c)
self.nodes.append(node)
if min(c) == 0: root = i
for x,y in edges:
self.nodes[x].add_neighbor(self.nodes[y])
self.nodes[y].add_neighbor(self.nodes[x])
if root > 0:
self.nodes[0],self.nodes[root] = self.nodes[root],self.nodes[0]
for i,node in enumerate(self.nodes):
node.nid = i + 1
if len(node.neighbors) > 1: #Leaf node mol is not marked
set_atommap(node.mol, node.nid)
node.is_leaf = (len(node.neighbors) == 1)
def size(self):
return len(self.nodes)
def recover(self):
for node in self.nodes:
node.recover(self.mol)
def assemble(self):
for node in self.nodes:
node.assemble()
if __name__ == "__main__":
import sys
lg = rdkit.RDLogger.logger()
lg.setLevel(rdkit.RDLogger.CRITICAL)
cset = set()
for line in sys.stdin:
smiles = line.split()[0]
mol = MolTree(smiles)
for c in mol.nodes:
cset.add(c.smiles)
for x in cset:
print(x)
|
'''Test the us of TorchScript to export and import of models with graph structure'''
from collections import namedtuple
import gym
from gym.spaces import Discrete, Box
import numpy as np
import torch
import torch.nn as nn
from torch.optim import Adam
from typing import Union, Tuple, Optional
Sample = namedtuple("Step", ["obs", "action"])
class MemoryGame(gym.Env):
'''The n-step memory game with noisy observations'''
def __init__(self, length=5, num_cues=2, noise=0.1):
self.observation_space = Box(0, 2, shape=(num_cues + 2,))
self.action_space = Discrete(num_cues)
self._length = length
self._num_cues = num_cues
self._noise = noise
self._current_step = 0
self._current_cue = 0
def _obs(self):
obs = np.random.uniform(0, self._noise, self.observation_space.shape)
if 0 == self._current_step:
obs[-2] += 1
obs[self._current_cue] += 1
elif self._length == self._current_step:
obs[-1] += 1
return obs
def reset(self):
self._current_step = 0
self._current_cue = np.random.randint(self._num_cues)
return self._obs()
def step(self, action):
if self._current_step < self._length:
self._current_step += 1
return self._obs(), 0, False, {}
else:
reward = (1 if action == self._current_cue else 0)
return self._obs(), reward, True, {}
def expert(self):
if self._current_step < self._length:
return self.action_space.sample()
else:
return self._current_cue
def generate_data(env, episodes):
data = []
for _ in range(episodes):
current_seq = []
obs = env.reset()
done = False
while not done:
action = env.expert()
current_seq.append(Sample(obs, action))
obs, _, done, _ = env.step(action)
data.append(current_seq)
return data
def evaluate(env, model, episodes):
total_reward = 0
total_successes = 0
for _ in range(episodes):
obs = env.reset()
episode_reward = 0
done = False
hidden = model.get_h0()
while not done:
# TODO: Switching to ONNX may change how the policy needs to be evaluated
logits, hidden = model(torch.as_tensor(obs, dtype=torch.float32).reshape(1,1, -1), hidden)
action = np.argmax(logits.detach().numpy()[0,0])
obs, reward, done, _ = env.step(action)
episode_reward += reward
total_reward += episode_reward
if episode_reward > 0:
total_successes += 1
return (total_reward / episodes), (total_successes / episodes)
class ReplayBuffer:
def __init__(self, num_actions, capacity=128):
self._num_actions = num_actions
self._capacity = capacity
self._index = 0
self._obs = []
self._actions = []
def add(self, episode):
obs = []
actions = []
for step in episode:
obs.append(step.obs)
actions.append(step.action)
obs = torch.tensor(obs, dtype=torch.float32)
actions = torch.tensor(actions, dtype=torch.int64)
actions = nn.functional.one_hot(actions, self._num_actions)
if len(obs) < self._capacity:
self._obs.append(obs)
self._actions.append(actions)
else:
self._obs[self._index] = obs
self._actions[self._index] = actions
self._index = (self._index + 1) % self._capacity
def sample(self, batch_size):
indices = np.random.randint(len(self._obs), size=batch_size)
obs_batch = [self._obs[idx] for idx in indices]
action_batch = [self._actions[idx] for idx in indices]
seq_mask = [torch.ones(len(seq), dtype=torch.float32) for seq in obs_batch]
seq_mask = nn.utils.rnn.pad_sequence(seq_mask)
obs_batch = nn.utils.rnn.pad_sequence(obs_batch)
action_batch = nn.utils.rnn.pad_sequence(action_batch)
return obs_batch, action_batch, seq_mask
class LSTMNet(nn.Module):
'''Simple LSTM network class'''
def __init__(self, input_size, output_size, lstm_size):
super(LSTMNet, self).__init__()
self._lstm = nn.LSTM(input_size, lstm_size)
self._linear = nn.Linear(lstm_size, output_size)
self._lstm_size = lstm_size
def forward(self, obs, hidden: Optional[Tuple[torch.Tensor, torch.Tensor]]):
out, hidden = self._lstm(obs, hidden)
out = self._linear(out)
return out, hidden
@torch.jit.export
def get_h0(self, batch_size: int=1):
hidden = torch.zeros((1, batch_size, self._lstm_size), dtype=torch.float32)
cell = torch.zeros((1, batch_size, self._lstm_size), dtype=torch.float32)
return hidden, cell
if __name__ == "__main__":
'''
seq = torch.tensor([[[1,2,3,4],[5,6,7,8]]], dtype=torch.float32)
h0 = (torch.zeros((1,2,32)), torch.zeros((1,2,32)))
class RNN(nn.Module):
def __init__(self):
super(RNN, self).__init__()
self.rnn = nn.LSTM(4,32)
# self.rnn = torch.jit.script(nn.LSTM(4,32)) # Doesn't fix anything
def forward(self, input, hidden: Optional[Tuple[torch.Tensor, torch.Tensor]]): # Note: Cannot pass two arguments to LSTM, seems to throw off torchscript
return self.rnn(input, hidden)
lstm = torch.jit.script(RNN())
# lstm = nn.LSTM(4,32)
# lstm = torch.jit.script(lstm)
print(lstm(seq, h0))
exit()
'''
# Configuration
env = MemoryGame(10, 4)
num_demonstrations = 1024
batch_size = 32
hidden_size = 10
training_epochs = 3000
eval_interval = 100
eval_episodes = 128
# Generate Data
data = generate_data(env, num_demonstrations)
buffer = ReplayBuffer(env.action_space.n, capacity=num_demonstrations)
for episode in data:
buffer.add(episode)
# Initialize model
model = LSTMNet(env.observation_space.shape[0], env.action_space.n, hidden_size)
model = torch.jit.script(model)
# Train pytoch model
print("\n===== Training Model =====")
optimizer = Adam(model.parameters(), lr=0.001)
initial_hidden = model.get_h0(batch_size)
for epoch in range(training_epochs):
obs_batch, action_batch, seq_mask = buffer.sample(batch_size)
optimizer.zero_grad()
logits, _ = model(obs_batch, initial_hidden)
# likelihoods = nn.functional.softmax(logits, -1)
likelihoods = nn.functional.log_softmax(logits, -1)
likelihoods = torch.sum(action_batch * likelihoods, -1)
loss = -torch.mean(seq_mask * likelihoods)
loss.backward()
optimizer.step()
if 0 == (epoch + 1) % eval_interval:
mean_reward, success_rate = evaluate(env, model, eval_episodes)
print(f"\n----- Epoch {epoch + 1} -----")
print(f" mean return: {mean_reward}")
print(f" success rate: {success_rate * 100}%")
# Export model to .pt file
torch.jit.save(model, "torch_lstm.pt")
# Import model from .pt file
model = torch.jit.load("torch_lstm.pt")
mean_reward, success_rate = evaluate(env, model, eval_episodes)
print(f"\n----- Serialized Model -----")
print(f" mean return: {mean_reward}")
print(f" success rate: {success_rate * 100}%")
# Copy model
model.eval()
model = torch.jit.freeze(model, ["get_h0"])
mean_reward, success_rate = evaluate(env, model, eval_episodes)
print(f"\n----- Frozen Model -----")
print(f" mean return: {mean_reward}")
print(f" success rate: {success_rate * 100}%")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.